blob: 03f9c5e068c19fe7950fca26139d89493ba8e20c [file] [log] [blame]
Jeremy Hayese2583052016-12-12 11:01:28 -07001/* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06006 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060010 * http://www.apache.org/licenses/LICENSE-2.0
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060012 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
Tobin Ehlisc96f8062016-03-09 16:12:48 -070017 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
Mark Lobodzinski9ea3d382017-01-11 15:25:28 -070025 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
Tobin Ehlisc96f8062016-03-09 16:12:48 -070034 */
35
36// Allow use of STL min and max functions in Windows
37#define NOMINMAX
38
Tobin Ehlisf263ba42016-04-05 13:33:00 -060039#include <SPIRV/spirv.hpp>
40#include <algorithm>
41#include <assert.h>
42#include <iostream>
43#include <list>
44#include <map>
Jeremy Hayesda8797f2016-04-13 16:20:24 -060045#include <mutex>
Tobin Ehlisf263ba42016-04-05 13:33:00 -060046#include <set>
Mark Lobodzinski9acd2e32016-12-21 15:22:39 -070047#include <sstream>
Tobin Ehlisc96f8062016-03-09 16:12:48 -070048#include <stdio.h>
49#include <stdlib.h>
50#include <string.h>
Tobin Ehlisf263ba42016-04-05 13:33:00 -060051#include <string>
Chris Forbes0b03b932016-05-16 14:09:35 +120052#include <tuple>
Tobin Ehlisc96f8062016-03-09 16:12:48 -070053
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
Mark Lobodzinski9acd2e32016-12-21 15:22:39 -070056#include "vk_enum_string_helper.h"
Tobin Ehlisc96f8062016-03-09 16:12:48 -070057#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
Tobin Ehlisc96f8062016-03-09 16:12:48 -070063#include "core_validation.h"
Mark Lobodzinski42fe5f72017-01-11 11:36:16 -070064#include "buffer_validation.h"
Tobin Ehlisc96f8062016-03-09 16:12:48 -070065#include "vk_layer_table.h"
66#include "vk_layer_data.h"
Tobin Ehlisc96f8062016-03-09 16:12:48 -070067#include "vk_layer_extension_utils.h"
68#include "vk_layer_utils.h"
Chris Forbesb4afd0f2016-04-04 10:48:35 +120069#include "spirv-tools/libspirv.h"
Tobin Ehlisc96f8062016-03-09 16:12:48 -070070
71#if defined __ANDROID__
72#include <android/log.h>
73#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
74#else
Michael Lentine860b0fe2016-05-20 10:14:00 -050075#define LOGCONSOLE(...) \
76 { \
77 printf(__VA_ARGS__); \
78 printf("\n"); \
79 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070080#endif
81
Mike Stroyanb985fca2016-11-01 11:50:16 -060082// This intentionally includes a cpp file
83#include "vk_safe_struct.cpp"
84
Mark Muelleraab36502016-05-03 13:17:29 -060085using namespace std;
86
Chia-I Wua67f6842016-05-06 11:20:20 +080087namespace core_validation {
88
Tobin Ehlisc96f8062016-03-09 16:12:48 -070089using std::unordered_map;
90using std::unordered_set;
91
Tobin Ehlisc96f8062016-03-09 16:12:48 -070092// WSI Image Objects bypass usual Image Object creation methods. A special Memory
93// Object value will be used to identify them internally.
94static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
Tobin Ehlis4ff58172016-09-22 10:52:00 -060095// 2nd special memory handle used to flag object as unbound from memory
96static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
Tobin Ehlise54be7b2016-04-11 14:49:55 -060097
Jamie Madill6069c822016-12-15 09:35:36 -050098// A special value of (0xFFFFFFFF, 0xFFFFFFFF) indicates that the surface size will be determined
99// by the extent of a swapchain targeting the surface.
100static const uint32_t kSurfaceSizeFromSwapchain = 0xFFFFFFFFu;
101
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700102struct devExts {
Dustin Graves8f1eab92016-04-05 09:41:17 -0600103 bool wsi_enabled;
Mark Young1a867442016-07-01 15:18:27 -0600104 bool wsi_display_swapchain_enabled;
Tobin Ehlis80c9afa2016-06-02 12:45:31 -0600105 unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700106 unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
107};
108
109// fwd decls
110struct shader_module;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700111
Chris Forbesfb06dd62016-10-03 19:14:25 +1300112struct instance_layer_data {
Chris Forbes5ce44e32016-10-03 17:18:42 +1300113 VkInstance instance = VK_NULL_HANDLE;
Chris Forbes5ce44e32016-10-03 17:18:42 +1300114 debug_report_data *report_data = nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700115 std::vector<VkDebugReportCallbackEXT> logging_callback;
Chris Forbes65724852016-10-03 19:54:31 +1300116 VkLayerInstanceDispatchTable dispatch_table;
117
Chris Forbesa13fe522016-10-13 15:34:59 +1300118 CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
119 uint32_t physical_devices_count = 0;
Mark Young39389872017-01-19 21:10:49 -0700120 CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
121 uint32_t physical_device_groups_count = 0;
Chris Forbesa13fe522016-10-13 15:34:59 +1300122 CHECK_DISABLED disabled = {};
123
Chris Forbesfb06dd62016-10-03 19:14:25 +1300124 unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
Chris Forbesf9f87832016-10-04 17:42:54 +1300125 unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
126
127 bool surfaceExtensionEnabled = false;
128 bool displayExtensionEnabled = false;
Chris Forbesf9f87832016-10-04 17:42:54 +1300129 bool androidSurfaceExtensionEnabled = false;
Chris Forbesf9f87832016-10-04 17:42:54 +1300130 bool mirSurfaceExtensionEnabled = false;
Chris Forbesf9f87832016-10-04 17:42:54 +1300131 bool waylandSurfaceExtensionEnabled = false;
Chris Forbesf9f87832016-10-04 17:42:54 +1300132 bool win32SurfaceExtensionEnabled = false;
Chris Forbesf9f87832016-10-04 17:42:54 +1300133 bool xcbSurfaceExtensionEnabled = false;
Chris Forbesf9f87832016-10-04 17:42:54 +1300134 bool xlibSurfaceExtensionEnabled = false;
Chris Forbesfb06dd62016-10-03 19:14:25 +1300135};
136
137struct layer_data {
138 debug_report_data *report_data = nullptr;
Chris Forbesaaa9c282016-10-03 20:01:14 +1300139 VkLayerDispatchTable dispatch_table;
Tobin Ehlisf263ba42016-04-05 13:33:00 -0600140
Chris Forbes5ce44e32016-10-03 17:18:42 +1300141 devExts device_extensions = {};
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -0600142 unordered_set<VkQueue> queues; // All queues under given device
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700143 // Global set of all cmdBuffers that are inFlight on this device
144 unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
145 // Layer specific data
Tobin Ehlisfad7adf2016-10-20 06:50:37 -0600146 unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
Tobin Ehlis8b26a382016-09-14 08:02:49 -0600147 unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600148 unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
Tobin Ehlis8b872462016-09-14 08:12:08 -0600149 unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700150 unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
Tobin Ehlis52c76a32016-10-12 09:05:51 -0600151 unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
Chris Forbes456c07a2016-06-21 13:04:18 +1200152 unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
Tobin Ehlisbd711bd2016-10-12 14:27:30 -0600153 unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
Tobin Ehlis05be5df2016-05-05 08:25:02 -0600154 unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
Tobin Ehlis0a43bde2016-05-03 08:31:08 -0600155 unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700156 unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
Tobin Ehlis997b2582016-06-02 08:43:37 -0600157 unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700158 unordered_map<VkFence, FENCE_NODE> fenceMap;
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -0700159 unordered_map<VkQueue, QUEUE_STATE> queueMap;
Tobin Ehlis1af17132016-10-20 14:17:21 -0600160 unordered_map<VkEvent, EVENT_STATE> eventMap;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700161 unordered_map<QueryObject, bool> queryToStateMap;
162 unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
163 unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
Tobin Ehlis223b01e2016-03-21 14:14:44 -0600164 unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
Tobin Ehlis04c04272016-10-12 11:54:09 -0600165 unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700166 unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
167 unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -0600168 unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
Chris Forbes90da2e92016-03-18 16:30:03 +1300169 unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
Mark Lobodzinskie6ce3f62016-12-10 10:53:34 -0700170
Chris Forbes5ce44e32016-10-03 17:18:42 +1300171 VkDevice device = VK_NULL_HANDLE;
Mark Lobodzinskicf0f7b62016-11-16 12:12:56 -0700172 VkPhysicalDevice physical_device = VK_NULL_HANDLE;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700173
Chris Forbesfb06dd62016-10-03 19:14:25 +1300174 instance_layer_data *instance_data = nullptr; // from device to enclosing instance
Chris Forbes2e6c4942016-10-03 17:44:52 +1300175
Chris Forbes94c5f532016-10-03 17:42:38 +1300176 VkPhysicalDeviceFeatures enabled_features = {};
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700177 // Device specific data
Chris Forbes5ce44e32016-10-03 17:18:42 +1300178 PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
179 VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
Mark Lobodzinskif6858592017-01-11 09:34:01 -0700180 VkPhysicalDeviceProperties phys_dev_props = {};
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700181};
182
Tobin Ehlise54be7b2016-04-11 14:49:55 -0600183// TODO : Do we need to guard access to layer_data_map w/ lock?
184static unordered_map<void *, layer_data *> layer_data_map;
Chris Forbesfb06dd62016-10-03 19:14:25 +1300185static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
Tobin Ehlise54be7b2016-04-11 14:49:55 -0600186
Mark Young39389872017-01-19 21:10:49 -0700187static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
188
Chia-I Wud1992742016-05-06 11:36:52 +0800189static const VkLayerProperties global_layer = {
Jon Ashburndc9111c2016-03-22 12:57:13 -0600190 "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
Chia-I Wud1992742016-05-06 11:36:52 +0800191};
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700192
193template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
194 bool foundLayer = false;
195 for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
Chia-I Wud1992742016-05-06 11:36:52 +0800196 if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700197 foundLayer = true;
198 }
199 // This has to be logged to console as we don't have a callback at this point.
200 if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
201 LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
Chia-I Wud1992742016-05-06 11:36:52 +0800202 global_layer.layerName);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700203 }
204 }
205}
206
207// Code imported from shader_checker
208static void build_def_index(shader_module *);
209
210// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
211// without the caller needing to care too much about the physical SPIRV module layout.
212struct spirv_inst_iter {
213 std::vector<uint32_t>::const_iterator zero;
214 std::vector<uint32_t>::const_iterator it;
215
Chris Forbesce7d7e02016-05-11 11:44:12 +1200216 uint32_t len() {
217 auto result = *it >> 16;
218 assert(result > 0);
219 return result;
220 }
221
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700222 uint32_t opcode() { return *it & 0x0ffffu; }
Chris Forbesce7d7e02016-05-11 11:44:12 +1200223
224 uint32_t const &word(unsigned n) {
225 assert(n < len());
226 return it[n];
227 }
228
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700229 uint32_t offset() { return (uint32_t)(it - zero); }
230
231 spirv_inst_iter() {}
232
233 spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
234
235 bool operator==(spirv_inst_iter const &other) { return it == other.it; }
236
237 bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
238
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700239 spirv_inst_iter operator++(int) { // x++
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700240 spirv_inst_iter ii = *this;
241 it += len();
242 return ii;
243 }
244
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700245 spirv_inst_iter operator++() { // ++x;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700246 it += len();
247 return *this;
248 }
249
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700250 // The iterator and the value are the same thing.
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700251 spirv_inst_iter &operator*() { return *this; }
252 spirv_inst_iter const &operator*() const { return *this; }
253};
254
255struct shader_module {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700256 // The spirv image itself
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700257 vector<uint32_t> words;
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700258 // A mapping of <id> to the first word of its def. this is useful because walking type
259 // trees, constant expressions, etc requires jumping all over the instruction stream.
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700260 unordered_map<unsigned, unsigned> def_index;
261
262 shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
263 : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
264 def_index() {
265
266 build_def_index(this);
267 }
268
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700269 // Expose begin() / end() to enable range-based for
270 spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } // First insn
271 spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); } // Just past last insn
272 // Given an offset into the module, produce an iterator there.
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700273 spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
274
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700275 // Gets an iterator to the definition of an id
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700276 spirv_inst_iter get_def(unsigned id) const {
277 auto it = def_index.find(id);
278 if (it == def_index.end()) {
279 return end();
280 }
281 return at(it->second);
282 }
283};
284
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700285// TODO : This can be much smarter, using separate locks for separate global data
Jeremy Hayesda8797f2016-04-13 16:20:24 -0600286static std::mutex global_lock;
Tobin Ehlisd9867fc2016-05-12 16:57:14 -0600287
Tobin Ehlis8b26a382016-09-14 08:02:49 -0600288// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
289IMAGE_VIEW_STATE *getImageViewState(const layer_data *dev_data, VkImageView image_view) {
Tobin Ehlisd5fb09e2016-06-02 10:54:09 -0600290 auto iv_it = dev_data->imageViewMap.find(image_view);
291 if (iv_it == dev_data->imageViewMap.end()) {
292 return nullptr;
293 }
294 return iv_it->second.get();
295}
Tobin Ehlise2f80292016-06-02 10:08:53 -0600296// Return sampler node ptr for specified sampler or else NULL
Tobin Ehlisfad7adf2016-10-20 06:50:37 -0600297SAMPLER_STATE *getSamplerState(const layer_data *dev_data, VkSampler sampler) {
Tobin Ehlisd5fb09e2016-06-02 10:54:09 -0600298 auto sampler_it = dev_data->samplerMap.find(sampler);
299 if (sampler_it == dev_data->samplerMap.end()) {
Tobin Ehlise2f80292016-06-02 10:08:53 -0600300 return nullptr;
301 }
302 return sampler_it->second.get();
303}
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700304// Return image state ptr for specified image or else NULL
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600305IMAGE_STATE *getImageState(const layer_data *dev_data, VkImage image) {
Tobin Ehlis1c9c55f2016-06-02 11:49:22 -0600306 auto img_it = dev_data->imageMap.find(image);
307 if (img_it == dev_data->imageMap.end()) {
308 return nullptr;
309 }
310 return img_it->second.get();
311}
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700312// Return buffer state ptr for specified buffer or else NULL
313BUFFER_STATE *getBufferState(const layer_data *dev_data, VkBuffer buffer) {
Tobin Ehlisd5fb09e2016-06-02 10:54:09 -0600314 auto buff_it = dev_data->bufferMap.find(buffer);
315 if (buff_it == dev_data->bufferMap.end()) {
Tobin Ehlis94bc5d22016-06-02 07:46:52 -0600316 return nullptr;
317 }
318 return buff_it->second.get();
319}
Tobin Ehlis4e380592016-06-02 12:41:47 -0600320// Return swapchain node for specified swapchain or else NULL
Tobin Ehlise83a46a2016-06-02 12:48:25 -0600321SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
Tobin Ehlis4e380592016-06-02 12:41:47 -0600322 auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
323 if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
324 return nullptr;
325 }
Tobin Ehlis80c9afa2016-06-02 12:45:31 -0600326 return swp_it->second.get();
Tobin Ehlis4e380592016-06-02 12:41:47 -0600327}
Tobin Ehlis969a5262016-06-02 12:13:32 -0600328// Return swapchain for specified image or else NULL
Tobin Ehlise83a46a2016-06-02 12:48:25 -0600329VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
Tobin Ehlis969a5262016-06-02 12:13:32 -0600330 auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
331 if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
332 return VK_NULL_HANDLE;
333 }
334 return img_it->second;
335}
Tobin Ehlis424859c2016-06-02 09:43:11 -0600336// Return buffer node ptr for specified buffer or else NULL
Tobin Ehlis8b872462016-09-14 08:12:08 -0600337BUFFER_VIEW_STATE *getBufferViewState(const layer_data *my_data, VkBufferView buffer_view) {
Tobin Ehlis424859c2016-06-02 09:43:11 -0600338 auto bv_it = my_data->bufferViewMap.find(buffer_view);
339 if (bv_it == my_data->bufferViewMap.end()) {
340 return nullptr;
341 }
342 return bv_it->second.get();
343}
Tobin Ehlis94bc5d22016-06-02 07:46:52 -0600344
Chris Forbesa70b6e12016-06-10 15:21:43 +1200345FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
346 auto it = dev_data->fenceMap.find(fence);
347 if (it == dev_data->fenceMap.end()) {
348 return nullptr;
349 }
350 return &it->second;
351}
352
Tobin Ehlis1af17132016-10-20 14:17:21 -0600353EVENT_STATE *getEventNode(layer_data *dev_data, VkEvent event) {
Tobin Ehliscab6b7d2016-07-07 16:47:10 -0600354 auto it = dev_data->eventMap.find(event);
355 if (it == dev_data->eventMap.end()) {
356 return nullptr;
357 }
358 return &it->second;
359}
360
Tobin Ehlis2e8f5322016-07-08 14:22:01 -0600361QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
362 auto it = dev_data->queryPoolMap.find(query_pool);
363 if (it == dev_data->queryPoolMap.end()) {
364 return nullptr;
365 }
366 return &it->second;
367}
368
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -0700369QUEUE_STATE *getQueueState(layer_data *dev_data, VkQueue queue) {
Chris Forbesa70b6e12016-06-10 15:21:43 +1200370 auto it = dev_data->queueMap.find(queue);
371 if (it == dev_data->queueMap.end()) {
372 return nullptr;
373 }
374 return &it->second;
375}
376
Chris Forbes28b668e2016-06-16 12:17:09 +1200377SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
378 auto it = dev_data->semaphoreMap.find(semaphore);
379 if (it == dev_data->semaphoreMap.end()) {
380 return nullptr;
381 }
382 return &it->second;
383}
384
Chris Forbes456c07a2016-06-21 13:04:18 +1200385COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
386 auto it = dev_data->commandPoolMap.find(pool);
387 if (it == dev_data->commandPoolMap.end()) {
388 return nullptr;
389 }
390 return &it->second;
391}
Chris Forbes8245eba2016-10-03 17:36:32 +1300392
Chris Forbesfb06dd62016-10-03 19:14:25 +1300393PHYSICAL_DEVICE_STATE *getPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
394 auto it = instance_data->physical_device_map.find(phys);
395 if (it == instance_data->physical_device_map.end()) {
Chris Forbes8245eba2016-10-03 17:36:32 +1300396 return nullptr;
397 }
398 return &it->second;
399}
400
Chris Forbesf9f87832016-10-04 17:42:54 +1300401SURFACE_STATE *getSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
402 auto it = instance_data->surface_map.find(surface);
403 if (it == instance_data->surface_map.end()) {
404 return nullptr;
405 }
406 return &it->second;
407}
408
Tobin Ehlise89829a2016-10-11 17:29:32 -0600409// Return ptr to memory binding for given handle of specified type
410static BINDABLE *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700411 switch (type) {
Tobin Ehlise89829a2016-10-11 17:29:32 -0600412 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
413 return getImageState(my_data, VkImage(handle));
414 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700415 return getBufferState(my_data, VkBuffer(handle));
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700416 default:
417 break;
418 }
Tobin Ehlisf263ba42016-04-05 13:33:00 -0600419 return nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700420}
Tobin Ehlis223b01e2016-03-21 14:14:44 -0600421// prototype
Chris Forbes664ca7f2016-05-06 16:55:18 +1200422static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
Tobin Ehlis223b01e2016-03-21 14:14:44 -0600423
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700424// Helper function to validate correct usage bits set for buffers or images
425// Verify that (actual & desired) flags != 0 or,
426// if strict is true, verify that (actual & desired) flags == desired
427// In case of error, report it via dbg callbacks
Jeremy Hayesc5a60b02016-11-15 15:41:47 -0700428static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict, uint64_t obj_handle,
429 VkDebugReportObjectTypeEXT obj_type, int32_t const msgCode, char const *ty_str,
430 char const *func_name, char const *usage_str) {
Dustin Graves8f1eab92016-04-05 09:41:17 -0600431 bool correct_usage = false;
Tobin Ehlisfe871282016-06-28 10:28:02 -0600432 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700433 if (strict)
434 correct_usage = ((actual & desired) == desired);
435 else
436 correct_usage = ((actual & desired) != 0);
437 if (!correct_usage) {
Jeremy Hayesc5a60b02016-11-15 15:41:47 -0700438 if (msgCode == -1) {
439 // TODO: Fix callers with msgCode == -1 to use correct validation checks.
440 skip_call =
441 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
442 MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
443 " used by %s. In this case, %s should have %s set during creation.",
444 ty_str, obj_handle, func_name, ty_str, usage_str);
445 } else {
446 const char *valid_usage = (msgCode == -1) ? "" : validation_error_map[msgCode];
447 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__, msgCode, "MEM",
448 "Invalid usage flag for %s 0x%" PRIxLEAST64
449 " used by %s. In this case, %s should have %s set during creation. %s",
450 ty_str, obj_handle, func_name, ty_str, usage_str, valid_usage);
451 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700452 }
Tobin Ehlisfe871282016-06-28 10:28:02 -0600453 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700454}
455
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700456// Helper function to validate usage flags for buffers
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700457// For given buffer_state send actual vs. desired usage off to helper above where
Tobin Ehlis5611e922016-06-28 15:52:55 -0600458// an error will be flagged if usage is not correct
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600459static bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_STATE const *image_state, VkFlags desired, VkBool32 strict,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -0700460 int32_t const msgCode, char const *func_name, char const *usage_string) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600461 return validate_usage_flags(dev_data, image_state->createInfo.usage, desired, strict,
462 reinterpret_cast<const uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -0700463 msgCode, "image", func_name, usage_string);
Tobin Ehlis5611e922016-06-28 15:52:55 -0600464}
465
466// Helper function to validate usage flags for buffers
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700467// For given buffer_state send actual vs. desired usage off to helper above where
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600468// an error will be flagged if usage is not correct
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700469static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_STATE const *buffer_state, VkFlags desired, VkBool32 strict,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -0700470 int32_t const msgCode, char const *func_name, char const *usage_string) {
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700471 return validate_usage_flags(dev_data, buffer_state->createInfo.usage, desired, strict,
472 reinterpret_cast<const uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -0700473 msgCode, "buffer", func_name, usage_string);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600474}
475
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700476// Return ptr to info in map container containing mem, or NULL if not found
477// Calls to this function should be wrapped in mutex
Tobin Ehlis997b2582016-06-02 08:43:37 -0600478DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
479 auto mem_it = dev_data->memObjMap.find(mem);
480 if (mem_it == dev_data->memObjMap.end()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700481 return NULL;
482 }
Tobin Ehlis997b2582016-06-02 08:43:37 -0600483 return mem_it->second.get();
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700484}
485
486static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
487 const VkMemoryAllocateInfo *pAllocateInfo) {
488 assert(object != NULL);
489
Tobin Ehlis997b2582016-06-02 08:43:37 -0600490 my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700491}
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600492
493// Helper function to print lowercase string of object type
494// TODO: Unify string helper functions, this should really come out of a string helper if not there already
495static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
496 switch (type) {
497 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
498 return "image";
499 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
500 return "buffer";
Tobin Ehlis83e14ca2016-09-14 11:21:55 -0600501 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
502 return "image view";
503 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
504 return "buffer view";
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600505 case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
506 return "swapchain";
507 case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
508 return "descriptor set";
509 case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
Tobin Ehlisb0fcdfa2016-09-06 20:37:39 -0600510 return "framebuffer";
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600511 case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
512 return "event";
513 case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
514 return "query pool";
Tobin Ehlis83e14ca2016-09-14 11:21:55 -0600515 case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
516 return "descriptor pool";
517 case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
518 return "command pool";
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600519 case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
520 return "pipeline";
521 case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
522 return "sampler";
Tobin Ehlis83e14ca2016-09-14 11:21:55 -0600523 case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
524 return "renderpass";
525 case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
526 return "device memory";
Tobin Ehlis7d1dd142016-08-18 08:23:30 -0600527 case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
528 return "semaphore";
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600529 default:
530 return "unknown";
531 }
532}
533
Tobin Ehlis3d2c3162016-08-10 16:08:00 -0600534// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
Tobin Ehlis5d461152016-08-10 19:11:54 -0600535static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600536 VkDebugReportObjectTypeEXT type, const char *functionName) {
Tobin Ehlis5d461152016-08-10 19:11:54 -0600537 DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
538 if (mem_info) {
539 if (!mem_info->bound_ranges[bound_object_handle].valid) {
Karl Schultze52f0142016-09-29 16:11:35 -0600540 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
Tobin Ehlisd28978b2016-09-06 18:46:55 -0600541 reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600542 "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
543 ", please fill the memory before using.",
544 functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700545 }
546 }
547 return false;
548}
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600549// For given image_state
550// If mem is special swapchain key, then verify that image_state valid member is true
Tobin Ehlis5d461152016-08-10 19:11:54 -0600551// Else verify that the image's bound memory range is valid
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600552static bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
Tobin Ehlis54108272016-10-11 14:26:49 -0600553 if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600554 if (!image_state->valid) {
Karl Schultze52f0142016-09-29 16:11:35 -0600555 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
Tobin Ehlis54108272016-10-11 14:26:49 -0600556 reinterpret_cast<uint64_t &>(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
Tobin Ehlis5d461152016-08-10 19:11:54 -0600557 "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600558 functionName, reinterpret_cast<uint64_t &>(image_state->image));
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700559 }
560 } else {
Tobin Ehlis54108272016-10-11 14:26:49 -0600561 return ValidateMemoryIsValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image),
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600562 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName);
Tobin Ehlis5d461152016-08-10 19:11:54 -0600563 }
564 return false;
565}
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700566// For given buffer_state, verify that the range it's bound to is valid
567static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
568 return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer),
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600569 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName);
Tobin Ehlis5d461152016-08-10 19:11:54 -0600570}
571// For the given memory allocation, set the range bound by the given handle object to the valid param value
572static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
573 DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
574 if (mem_info) {
575 mem_info->bound_ranges[handle].valid = valid;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700576 }
577}
Tobin Ehlis5d461152016-08-10 19:11:54 -0600578// For given image node
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600579// If mem is special swapchain key, then set entire image_state to valid param value
Tobin Ehlis5d461152016-08-10 19:11:54 -0600580// Else set the image's bound memory range to valid param value
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600581static void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
Tobin Ehlis54108272016-10-11 14:26:49 -0600582 if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600583 image_state->valid = valid;
Tobin Ehlis5d461152016-08-10 19:11:54 -0600584 } else {
Tobin Ehlis54108272016-10-11 14:26:49 -0600585 SetMemoryValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image), valid);
Tobin Ehlis5d461152016-08-10 19:11:54 -0600586 }
587}
588// For given buffer node set the buffer's bound memory range to valid param value
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700589static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
590 SetMemoryValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer), valid);
Tobin Ehlis5d461152016-08-10 19:11:54 -0600591}
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700592// Find CB Info and add mem reference to list container
593// Find Mem Obj Info and add CB reference to list container
Dustin Graves8f1eab92016-04-05 09:41:17 -0600594static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
595 const char *apiName) {
Tobin Ehlisfe871282016-06-28 10:28:02 -0600596 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700597
598 // Skip validation if this image was created through WSI
599 if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
600
601 // First update CB binding in MemObj mini CB list
Tobin Ehlis997b2582016-06-02 08:43:37 -0600602 DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700603 if (pMemInfo) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700604 // Now update CBInfo's Mem reference list
Tobin Ehlisc0d36802016-10-20 10:09:44 -0600605 GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, cb);
606 pMemInfo->cb_bindings.insert(cb_node);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700607 // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
Tobin Ehlisc0d36802016-10-20 10:09:44 -0600608 if (cb_node) {
609 cb_node->memObjs.insert(mem);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700610 }
611 }
612 }
Tobin Ehlisfe871282016-06-28 10:28:02 -0600613 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700614}
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600615
Tobin Ehlis8020eea2016-08-17 11:10:41 -0600616// Create binding link between given sampler and command buffer node
Tobin Ehlisfad7adf2016-10-20 06:50:37 -0600617void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
618 sampler_state->cb_bindings.insert(cb_node);
619 cb_node->object_bindings.insert(
620 {reinterpret_cast<uint64_t &>(sampler_state->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
Tobin Ehlis8020eea2016-08-17 11:10:41 -0600621}
622
623// Create binding link between given image node and command buffer node
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600624void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600625 // Skip validation if this image was created through WSI
Tobin Ehlis54108272016-10-11 14:26:49 -0600626 if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600627 // First update CB binding in MemObj mini CB list
Tobin Ehlis640a81c2016-11-15 15:37:18 -0700628 for (auto mem_binding : image_state->GetBoundMemory()) {
629 DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem_binding);
630 if (pMemInfo) {
631 pMemInfo->cb_bindings.insert(cb_node);
632 // Now update CBInfo's Mem reference list
633 cb_node->memObjs.insert(mem_binding);
634 }
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600635 }
Tobin Ehlis6fcd6e42016-09-21 14:28:42 -0600636 // Now update cb binding for image
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600637 cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
638 image_state->cb_bindings.insert(cb_node);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600639 }
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600640}
641
Tobin Ehlis15b8ea02016-09-19 14:02:58 -0600642// Create binding link between given image view node and its image with command buffer node
643void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
644 // First add bindings for imageView
645 view_state->cb_bindings.insert(cb_node);
Tobin Ehlis15b8ea02016-09-19 14:02:58 -0600646 cb_node->object_bindings.insert(
647 {reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT});
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600648 auto image_state = getImageState(dev_data, view_state->create_info.image);
Tobin Ehlis15b8ea02016-09-19 14:02:58 -0600649 // Add bindings for image within imageView
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600650 if (image_state) {
651 AddCommandBufferBindingImage(dev_data, cb_node, image_state);
Tobin Ehlis15b8ea02016-09-19 14:02:58 -0600652 }
653}
654
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600655// Create binding link between given buffer node and command buffer node
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700656void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600657 // First update CB binding in MemObj mini CB list
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700658 for (auto mem_binding : buffer_state->GetBoundMemory()) {
Tobin Ehlis640a81c2016-11-15 15:37:18 -0700659 DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem_binding);
660 if (pMemInfo) {
661 pMemInfo->cb_bindings.insert(cb_node);
662 // Now update CBInfo's Mem reference list
663 cb_node->memObjs.insert(mem_binding);
664 }
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600665 }
666 // Now update cb binding for buffer
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700667 cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
668 buffer_state->cb_bindings.insert(cb_node);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600669}
670
Tobin Ehlis2515c0e2016-09-28 07:12:28 -0600671// Create binding link between given buffer view node and its buffer with command buffer node
672void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
673 // First add bindings for bufferView
674 view_state->cb_bindings.insert(cb_node);
675 cb_node->object_bindings.insert(
676 {reinterpret_cast<uint64_t &>(view_state->buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT});
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700677 auto buffer_state = getBufferState(dev_data, view_state->create_info.buffer);
Tobin Ehlis2515c0e2016-09-28 07:12:28 -0600678 // Add bindings for buffer within bufferView
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700679 if (buffer_state) {
680 AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
Tobin Ehlis2515c0e2016-09-28 07:12:28 -0600681 }
682}
683
Tobin Ehlis4c522322016-04-11 16:39:29 -0600684// For every mem obj bound to particular CB, free bindings related to that CB
Tobin Ehlisc0d36802016-10-20 10:09:44 -0600685static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
686 if (cb_node) {
687 if (cb_node->memObjs.size() > 0) {
688 for (auto mem : cb_node->memObjs) {
Tobin Ehlis997b2582016-06-02 08:43:37 -0600689 DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700690 if (pInfo) {
Tobin Ehlisc0d36802016-10-20 10:09:44 -0600691 pInfo->cb_bindings.erase(cb_node);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700692 }
693 }
Tobin Ehlisc0d36802016-10-20 10:09:44 -0600694 cb_node->memObjs.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700695 }
Tobin Ehlisc0d36802016-10-20 10:09:44 -0600696 cb_node->validate_functions.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700697 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700698}
Tobin Ehlis4c522322016-04-11 16:39:29 -0600699// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
700static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
701 clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700702}
703
Tobin Ehlise89829a2016-10-11 17:29:32 -0600704// Clear a single object binding from given memory object, or report error if binding is missing
705static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory mem) {
706 DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
707 // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
Jeremy Hayes971ba4c2016-12-01 11:39:42 -0700708 if (mem_info) {
709 mem_info->obj_bindings.erase({handle, type});
Tobin Ehlise89829a2016-10-11 17:29:32 -0600710 }
711 return false;
712}
713
714// ClearMemoryObjectBindings clears the binding of objects to memory
715// For the given object it pulls the memory bindings and makes sure that the bindings
716// no longer refer to the object being cleared. This occurs when objects are destroyed.
717static bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
718 bool skip = false;
719 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
720 if (mem_binding) {
721 if (!mem_binding->sparse) {
722 skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
723 } else { // Sparse, clear all bindings
724 for (auto& sparse_mem_binding : mem_binding->sparse_bindings) {
725 skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700726 }
727 }
728 }
Tobin Ehlise89829a2016-10-11 17:29:32 -0600729 return skip;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700730}
731
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600732// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
733bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700734 const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600735 bool result = false;
736 if (VK_NULL_HANDLE == mem) {
737 result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700738 __LINE__, error_code, "MEM",
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600739 "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound. Memory should be bound by calling "
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700740 "vkBind%sMemory(). %s",
741 api_name, type_name, handle, type_name, validation_error_map[error_code]);
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600742 } else if (MEMORY_UNBOUND == mem) {
743 result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700744 __LINE__, error_code, "MEM",
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600745 "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound and previously bound memory was freed. "
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700746 "Memory must not be freed prior to this operation. %s",
747 api_name, type_name, handle, validation_error_map[error_code]);
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600748 }
749 return result;
750}
751
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -0600752// Check to see if memory was ever bound to this image
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700753bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
754 UNIQUE_VALIDATION_ERROR_CODE error_code) {
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -0600755 bool result = false;
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600756 if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700757 result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem,
758 reinterpret_cast<const uint64_t &>(image_state->image), api_name, "Image", error_code);
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -0600759 }
760 return result;
761}
762
763// Check to see if memory was bound to this buffer
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700764bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
765 UNIQUE_VALIDATION_ERROR_CODE error_code) {
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -0600766 bool result = false;
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700767 if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
768 result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem,
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700769 reinterpret_cast<const uint64_t &>(buffer_state->buffer), api_name, "Buffer", error_code);
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -0600770 }
771 return result;
772}
773
Tobin Ehlise89829a2016-10-11 17:29:32 -0600774// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700775// For NULL mem case, output warning
776// Make sure given object is in global object map
777// IF a previous binding existed, output validation error
778// Otherwise, add reference from objectInfo to memoryInfo
779// Add reference off of objInfo
Jeremy Hayese2583052016-12-12 11:01:28 -0700780// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600781static bool SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type,
782 const char *apiName) {
Tobin Ehlisfe871282016-06-28 10:28:02 -0600783 bool skip_call = false;
Tobin Ehlise89829a2016-10-11 17:29:32 -0600784 // It's an error to bind an object to NULL memory
Jeremy Hayes35fe1722016-12-13 13:09:44 -0700785 if (mem != VK_NULL_HANDLE) {
Tobin Ehlise89829a2016-10-11 17:29:32 -0600786 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600787 assert(mem_binding);
Tobin Ehlise89829a2016-10-11 17:29:32 -0600788 // TODO : Add check here to make sure object isn't sparse
789 // VALIDATION_ERROR_00792 for buffers
790 // VALIDATION_ERROR_00804 for images
791 assert(!mem_binding->sparse);
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600792 DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
793 if (mem_info) {
Tobin Ehlise89829a2016-10-11 17:29:32 -0600794 DEVICE_MEM_INFO *prev_binding = getMemObjInfo(dev_data, mem_binding->binding.mem);
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600795 if (prev_binding) {
Jeremy Hayese2583052016-12-12 11:01:28 -0700796 // TODO: VALIDATION_ERROR_00791 and VALIDATION_ERROR_00803
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600797 skip_call |=
798 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
799 reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
800 "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
801 ") which has already been bound to mem object 0x%" PRIxLEAST64,
802 apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem));
Tobin Ehlise89829a2016-10-11 17:29:32 -0600803 } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600804 skip_call |=
805 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
806 reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
807 "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
808 ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
809 "Vulkan so this attempt to bind to new memory is not allowed.",
810 apiName, reinterpret_cast<uint64_t &>(mem), handle);
Tobin Ehlisd5b56ea2016-05-11 14:40:44 -0600811 } else {
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600812 mem_info->obj_bindings.insert({handle, type});
Tobin Ehlisd5b56ea2016-05-11 14:40:44 -0600813 // For image objects, make sure default memory state is correctly set
814 // TODO : What's the best/correct way to handle this?
815 if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600816 auto const image_state = getImageState(dev_data, VkImage(handle));
817 if (image_state) {
818 VkImageCreateInfo ici = image_state->createInfo;
Tobin Ehlisd5b56ea2016-05-11 14:40:44 -0600819 if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
820 // TODO:: More memory state transition stuff.
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700821 }
822 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700823 }
Tobin Ehlise89829a2016-10-11 17:29:32 -0600824 mem_binding->binding.mem = mem;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700825 }
826 }
827 }
Tobin Ehlisfe871282016-06-28 10:28:02 -0600828 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700829}
830
831// For NULL mem case, clear any previous binding Else...
832// Make sure given object is in its object map
833// IF a previous binding existed, update binding
834// Add reference from objectInfo to memoryInfo
835// Add reference off of object's binding info
Chris Forbes73b82b12016-04-06 15:16:26 +1200836// Return VK_TRUE if addition is successful, VK_FALSE otherwise
Tobin Ehlise89829a2016-10-11 17:29:32 -0600837static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VkDebugReportObjectTypeEXT type,
838 const char *apiName) {
Tobin Ehlisfe871282016-06-28 10:28:02 -0600839 bool skip_call = VK_FALSE;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700840 // Handle NULL case separately, just clear previous binding & decrement reference
Tobin Ehlise89829a2016-10-11 17:29:32 -0600841 if (binding.mem == VK_NULL_HANDLE) {
842 // TODO : This should cause the range of the resource to be unbound according to spec
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700843 } else {
Tobin Ehlise89829a2016-10-11 17:29:32 -0600844 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
845 assert(mem_binding);
846 assert(mem_binding->sparse);
847 DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, binding.mem);
848 if (mem_info) {
849 mem_info->obj_bindings.insert({handle, type});
Tobin Ehlisd5b56ea2016-05-11 14:40:44 -0600850 // Need to set mem binding for this object
Tobin Ehlise89829a2016-10-11 17:29:32 -0600851 mem_binding->sparse_bindings.insert(binding);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700852 }
853 }
Tobin Ehlisfe871282016-06-28 10:28:02 -0600854 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700855}
856
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700857// Return a string representation of CMD_TYPE enum
858static string cmdTypeToString(CMD_TYPE cmd) {
859 switch (cmd) {
860 case CMD_BINDPIPELINE:
861 return "CMD_BINDPIPELINE";
862 case CMD_BINDPIPELINEDELTA:
863 return "CMD_BINDPIPELINEDELTA";
864 case CMD_SETVIEWPORTSTATE:
865 return "CMD_SETVIEWPORTSTATE";
866 case CMD_SETLINEWIDTHSTATE:
867 return "CMD_SETLINEWIDTHSTATE";
868 case CMD_SETDEPTHBIASSTATE:
869 return "CMD_SETDEPTHBIASSTATE";
870 case CMD_SETBLENDSTATE:
871 return "CMD_SETBLENDSTATE";
872 case CMD_SETDEPTHBOUNDSSTATE:
873 return "CMD_SETDEPTHBOUNDSSTATE";
874 case CMD_SETSTENCILREADMASKSTATE:
875 return "CMD_SETSTENCILREADMASKSTATE";
876 case CMD_SETSTENCILWRITEMASKSTATE:
877 return "CMD_SETSTENCILWRITEMASKSTATE";
878 case CMD_SETSTENCILREFERENCESTATE:
879 return "CMD_SETSTENCILREFERENCESTATE";
880 case CMD_BINDDESCRIPTORSETS:
881 return "CMD_BINDDESCRIPTORSETS";
882 case CMD_BINDINDEXBUFFER:
883 return "CMD_BINDINDEXBUFFER";
884 case CMD_BINDVERTEXBUFFER:
885 return "CMD_BINDVERTEXBUFFER";
886 case CMD_DRAW:
887 return "CMD_DRAW";
888 case CMD_DRAWINDEXED:
889 return "CMD_DRAWINDEXED";
890 case CMD_DRAWINDIRECT:
891 return "CMD_DRAWINDIRECT";
892 case CMD_DRAWINDEXEDINDIRECT:
893 return "CMD_DRAWINDEXEDINDIRECT";
894 case CMD_DISPATCH:
895 return "CMD_DISPATCH";
896 case CMD_DISPATCHINDIRECT:
897 return "CMD_DISPATCHINDIRECT";
898 case CMD_COPYBUFFER:
899 return "CMD_COPYBUFFER";
900 case CMD_COPYIMAGE:
901 return "CMD_COPYIMAGE";
902 case CMD_BLITIMAGE:
903 return "CMD_BLITIMAGE";
904 case CMD_COPYBUFFERTOIMAGE:
905 return "CMD_COPYBUFFERTOIMAGE";
906 case CMD_COPYIMAGETOBUFFER:
907 return "CMD_COPYIMAGETOBUFFER";
908 case CMD_CLONEIMAGEDATA:
909 return "CMD_CLONEIMAGEDATA";
910 case CMD_UPDATEBUFFER:
911 return "CMD_UPDATEBUFFER";
912 case CMD_FILLBUFFER:
913 return "CMD_FILLBUFFER";
914 case CMD_CLEARCOLORIMAGE:
915 return "CMD_CLEARCOLORIMAGE";
916 case CMD_CLEARATTACHMENTS:
917 return "CMD_CLEARCOLORATTACHMENT";
918 case CMD_CLEARDEPTHSTENCILIMAGE:
919 return "CMD_CLEARDEPTHSTENCILIMAGE";
920 case CMD_RESOLVEIMAGE:
921 return "CMD_RESOLVEIMAGE";
922 case CMD_SETEVENT:
923 return "CMD_SETEVENT";
924 case CMD_RESETEVENT:
925 return "CMD_RESETEVENT";
926 case CMD_WAITEVENTS:
927 return "CMD_WAITEVENTS";
928 case CMD_PIPELINEBARRIER:
929 return "CMD_PIPELINEBARRIER";
930 case CMD_BEGINQUERY:
931 return "CMD_BEGINQUERY";
932 case CMD_ENDQUERY:
933 return "CMD_ENDQUERY";
934 case CMD_RESETQUERYPOOL:
935 return "CMD_RESETQUERYPOOL";
936 case CMD_COPYQUERYPOOLRESULTS:
937 return "CMD_COPYQUERYPOOLRESULTS";
938 case CMD_WRITETIMESTAMP:
939 return "CMD_WRITETIMESTAMP";
940 case CMD_INITATOMICCOUNTERS:
941 return "CMD_INITATOMICCOUNTERS";
942 case CMD_LOADATOMICCOUNTERS:
943 return "CMD_LOADATOMICCOUNTERS";
944 case CMD_SAVEATOMICCOUNTERS:
945 return "CMD_SAVEATOMICCOUNTERS";
946 case CMD_BEGINRENDERPASS:
947 return "CMD_BEGINRENDERPASS";
948 case CMD_ENDRENDERPASS:
949 return "CMD_ENDRENDERPASS";
950 default:
951 return "UNKNOWN";
952 }
953}
954
955// SPIRV utility functions
956static void build_def_index(shader_module *module) {
957 for (auto insn : *module) {
958 switch (insn.opcode()) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700959 // Types
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700960 case spv::OpTypeVoid:
961 case spv::OpTypeBool:
962 case spv::OpTypeInt:
963 case spv::OpTypeFloat:
964 case spv::OpTypeVector:
965 case spv::OpTypeMatrix:
966 case spv::OpTypeImage:
967 case spv::OpTypeSampler:
968 case spv::OpTypeSampledImage:
969 case spv::OpTypeArray:
970 case spv::OpTypeRuntimeArray:
971 case spv::OpTypeStruct:
972 case spv::OpTypeOpaque:
973 case spv::OpTypePointer:
974 case spv::OpTypeFunction:
975 case spv::OpTypeEvent:
976 case spv::OpTypeDeviceEvent:
977 case spv::OpTypeReserveId:
978 case spv::OpTypeQueue:
979 case spv::OpTypePipe:
980 module->def_index[insn.word(1)] = insn.offset();
981 break;
982
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700983 // Fixed constants
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700984 case spv::OpConstantTrue:
985 case spv::OpConstantFalse:
986 case spv::OpConstant:
987 case spv::OpConstantComposite:
988 case spv::OpConstantSampler:
989 case spv::OpConstantNull:
990 module->def_index[insn.word(2)] = insn.offset();
991 break;
992
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700993 // Specialization constants
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700994 case spv::OpSpecConstantTrue:
995 case spv::OpSpecConstantFalse:
996 case spv::OpSpecConstant:
997 case spv::OpSpecConstantComposite:
998 case spv::OpSpecConstantOp:
999 module->def_index[insn.word(2)] = insn.offset();
1000 break;
1001
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001002 // Variables
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001003 case spv::OpVariable:
1004 module->def_index[insn.word(2)] = insn.offset();
1005 break;
1006
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001007 // Functions
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001008 case spv::OpFunction:
1009 module->def_index[insn.word(2)] = insn.offset();
1010 break;
1011
1012 default:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001013 // We don't care about any other defs for now.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001014 break;
1015 }
1016 }
1017}
1018
1019static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1020 for (auto insn : *src) {
1021 if (insn.opcode() == spv::OpEntryPoint) {
1022 auto entrypointName = (char const *)&insn.word(3);
1023 auto entrypointStageBits = 1u << insn.word(1);
1024
1025 if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1026 return insn;
1027 }
1028 }
1029 }
1030
1031 return src->end();
1032}
1033
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001034static char const *storage_class_name(unsigned sc) {
1035 switch (sc) {
1036 case spv::StorageClassInput:
1037 return "input";
1038 case spv::StorageClassOutput:
1039 return "output";
1040 case spv::StorageClassUniformConstant:
1041 return "const uniform";
1042 case spv::StorageClassUniform:
1043 return "uniform";
1044 case spv::StorageClassWorkgroup:
1045 return "workgroup local";
1046 case spv::StorageClassCrossWorkgroup:
1047 return "workgroup global";
1048 case spv::StorageClassPrivate:
1049 return "private global";
1050 case spv::StorageClassFunction:
1051 return "function";
1052 case spv::StorageClassGeneric:
1053 return "generic";
1054 case spv::StorageClassAtomicCounter:
1055 return "atomic counter";
1056 case spv::StorageClassImage:
1057 return "image";
1058 case spv::StorageClassPushConstant:
1059 return "push constant";
1060 default:
1061 return "unknown";
1062 }
1063}
1064
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001065// Get the value of an integral constant
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001066unsigned get_constant_value(shader_module const *src, unsigned id) {
1067 auto value = src->get_def(id);
1068 assert(value != src->end());
1069
1070 if (value.opcode() != spv::OpConstant) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001071 // TODO: Either ensure that the specialization transform is already performed on a module we're
1072 // considering here, OR -- specialize on the fly now.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001073 return 1;
1074 }
1075
1076 return value.word(3);
1077}
1078
Chris Forbesfa86ce32016-03-18 14:59:39 +13001079
1080static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001081 auto insn = src->get_def(type);
1082 assert(insn != src->end());
1083
1084 switch (insn.opcode()) {
1085 case spv::OpTypeBool:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001086 ss << "bool";
1087 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001088 case spv::OpTypeInt:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001089 ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1090 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001091 case spv::OpTypeFloat:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001092 ss << "float" << insn.word(2);
1093 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001094 case spv::OpTypeVector:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001095 ss << "vec" << insn.word(3) << " of ";
1096 describe_type_inner(ss, src, insn.word(2));
1097 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001098 case spv::OpTypeMatrix:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001099 ss << "mat" << insn.word(3) << " of ";
1100 describe_type_inner(ss, src, insn.word(2));
1101 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001102 case spv::OpTypeArray:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001103 ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1104 describe_type_inner(ss, src, insn.word(2));
1105 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001106 case spv::OpTypePointer:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001107 ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1108 describe_type_inner(ss, src, insn.word(3));
1109 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001110 case spv::OpTypeStruct: {
Chris Forbesfa86ce32016-03-18 14:59:39 +13001111 ss << "struct of (";
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001112 for (unsigned i = 2; i < insn.len(); i++) {
Chris Forbesfa86ce32016-03-18 14:59:39 +13001113 describe_type_inner(ss, src, insn.word(i));
1114 if (i == insn.len() - 1) {
1115 ss << ")";
1116 } else {
1117 ss << ", ";
1118 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001119 }
Chris Forbesfa86ce32016-03-18 14:59:39 +13001120 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001121 }
1122 case spv::OpTypeSampler:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001123 ss << "sampler";
1124 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001125 case spv::OpTypeSampledImage:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001126 ss << "sampler+";
1127 describe_type_inner(ss, src, insn.word(2));
1128 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001129 case spv::OpTypeImage:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001130 ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1131 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001132 default:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001133 ss << "oddtype";
1134 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001135 }
1136}
1137
Chris Forbesfa86ce32016-03-18 14:59:39 +13001138
1139static std::string describe_type(shader_module const *src, unsigned type) {
1140 std::ostringstream ss;
1141 describe_type_inner(ss, src, type);
1142 return ss.str();
1143}
1144
1145
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001146static bool is_narrow_numeric_type(spirv_inst_iter type)
1147{
1148 if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1149 return false;
1150 return type.word(2) < 64;
1151}
1152
1153
1154static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001155 // Walk two type trees together, and complain about differences
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001156 auto a_insn = a->get_def(a_type);
1157 auto b_insn = b->get_def(b_type);
1158 assert(a_insn != a->end());
1159 assert(b_insn != b->end());
1160
Chris Forbes43f01d02016-03-29 16:38:44 +13001161 if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001162 return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
Chris Forbes43f01d02016-03-29 16:38:44 +13001163 }
1164
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001165 if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001166 // We probably just found the extra level of arrayness in b_type: compare the type inside it to a_type
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001167 return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1168 }
1169
1170 if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1171 return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001172 }
1173
1174 if (a_insn.opcode() != b_insn.opcode()) {
1175 return false;
1176 }
1177
Chris Forbes43f01d02016-03-29 16:38:44 +13001178 if (a_insn.opcode() == spv::OpTypePointer) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001179 // Match on pointee type. storage class is expected to differ
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001180 return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
Chris Forbes43f01d02016-03-29 16:38:44 +13001181 }
1182
1183 if (a_arrayed || b_arrayed) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001184 // If we havent resolved array-of-verts by here, we're not going to.
Chris Forbes43f01d02016-03-29 16:38:44 +13001185 return false;
1186 }
1187
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001188 switch (a_insn.opcode()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001189 case spv::OpTypeBool:
Chris Forbes43f01d02016-03-29 16:38:44 +13001190 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001191 case spv::OpTypeInt:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001192 // Match on width, signedness
Chris Forbes43f01d02016-03-29 16:38:44 +13001193 return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001194 case spv::OpTypeFloat:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001195 // Match on width
Chris Forbes43f01d02016-03-29 16:38:44 +13001196 return a_insn.word(2) == b_insn.word(2);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001197 case spv::OpTypeVector:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001198 // Match on element type, count.
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001199 if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1200 return false;
1201 if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1202 return a_insn.word(3) >= b_insn.word(3);
1203 }
1204 else {
1205 return a_insn.word(3) == b_insn.word(3);
1206 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001207 case spv::OpTypeMatrix:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001208 // Match on element type, count.
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001209 return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001210 case spv::OpTypeArray:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001211 // Match on element type, count. these all have the same layout. we don't get here if b_arrayed. This differs from
1212 // vector & matrix types in that the array size is the id of a constant instruction, * not a literal within OpTypeArray
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001213 return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001214 get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1215 case spv::OpTypeStruct:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001216 // Match on all element types
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001217 {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001218 if (a_insn.len() != b_insn.len()) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001219 return false; // Structs cannot match if member counts differ
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001220 }
1221
1222 for (unsigned i = 2; i < a_insn.len(); i++) {
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001223 if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001224 return false;
1225 }
1226 }
1227
1228 return true;
1229 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001230 default:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001231 // Remaining types are CLisms, or may not appear in the interfaces we are interested in. Just claim no match.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001232 return false;
1233 }
1234}
1235
1236static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1237 auto it = map.find(id);
1238 if (it == map.end())
1239 return def;
1240 else
1241 return it->second;
1242}
1243
1244static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1245 auto insn = src->get_def(type);
1246 assert(insn != src->end());
1247
1248 switch (insn.opcode()) {
1249 case spv::OpTypePointer:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001250 // See through the ptr -- this is only ever at the toplevel for graphics shaders we're never actually passing
1251 // pointers around.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001252 return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1253 case spv::OpTypeArray:
1254 if (strip_array_level) {
1255 return get_locations_consumed_by_type(src, insn.word(2), false);
1256 } else {
1257 return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1258 }
1259 case spv::OpTypeMatrix:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001260 // Num locations is the dimension * element size
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001261 return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
Chris Forbesb91daf02016-04-21 14:46:48 +12001262 case spv::OpTypeVector: {
1263 auto scalar_type = src->get_def(insn.word(2));
1264 auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1265 scalar_type.word(2) : 32;
1266
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001267 // Locations are 128-bit wide; 3- and 4-component vectors of 64 bit types require two.
Chris Forbesb91daf02016-04-21 14:46:48 +12001268 return (bit_width * insn.word(3) + 127) / 128;
1269 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001270 default:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001271 // Everything else is just 1.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001272 return 1;
1273
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001274 // TODO: extend to handle 64bit scalar types, whose vectors may need multiple locations.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001275 }
1276}
1277
Chris Forbese32b78a2016-04-21 15:00:58 +12001278static unsigned get_locations_consumed_by_format(VkFormat format) {
1279 switch (format) {
1280 case VK_FORMAT_R64G64B64A64_SFLOAT:
1281 case VK_FORMAT_R64G64B64A64_SINT:
1282 case VK_FORMAT_R64G64B64A64_UINT:
1283 case VK_FORMAT_R64G64B64_SFLOAT:
1284 case VK_FORMAT_R64G64B64_SINT:
1285 case VK_FORMAT_R64G64B64_UINT:
1286 return 2;
1287 default:
1288 return 1;
1289 }
1290}
1291
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001292typedef std::pair<unsigned, unsigned> location_t;
1293typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1294
1295struct interface_var {
1296 uint32_t id;
1297 uint32_t type_id;
1298 uint32_t offset;
Chris Forbes804bae32016-03-29 16:14:02 +13001299 bool is_patch;
Chris Forbesa0ab8152016-04-20 13:34:27 +12001300 bool is_block_member;
Chris Forbesa1152762016-11-30 12:40:54 +13001301 bool is_relaxed_precision;
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001302 // TODO: collect the name, too? Isn't required to be present.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001303};
1304
Chris Forbes23a575d2016-03-29 16:41:07 +13001305struct shader_stage_attributes {
1306 char const *const name;
1307 bool arrayed_input;
1308 bool arrayed_output;
1309};
1310
1311static shader_stage_attributes shader_stage_attribs[] = {
1312 {"vertex shader", false, false},
1313 {"tessellation control shader", true, true},
1314 {"tessellation evaluation shader", true, false},
1315 {"geometry shader", true, false},
1316 {"fragment shader", false, false},
1317};
1318
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001319static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1320 while (true) {
1321
1322 if (def.opcode() == spv::OpTypePointer) {
1323 def = src->get_def(def.word(3));
1324 } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1325 def = src->get_def(def.word(2));
1326 is_array_of_verts = false;
1327 } else if (def.opcode() == spv::OpTypeStruct) {
1328 return def;
1329 } else {
1330 return src->end();
1331 }
1332 }
1333}
1334
Chris Forbes1832a772016-05-10 15:30:22 +12001335static void collect_interface_block_members(shader_module const *src,
Chris Forbesd68e3202016-08-23 13:04:34 +12001336 std::map<location_t, interface_var> *out,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001337 std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
Chris Forbes804bae32016-03-29 16:14:02 +13001338 uint32_t id, uint32_t type_id, bool is_patch) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001339 // Walk down the type_id presented, trying to determine whether it's actually an interface block.
Chris Forbes23a575d2016-03-29 16:41:07 +13001340 auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001341 if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001342 // This isn't an interface block.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001343 return;
1344 }
1345
1346 std::unordered_map<unsigned, unsigned> member_components;
Chris Forbesf929b162016-11-30 12:55:40 +13001347 std::unordered_map<unsigned, unsigned> member_relaxed_precision;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001348
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001349 // Walk all the OpMemberDecorate for type's result id -- first pass, collect components.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001350 for (auto insn : *src) {
1351 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1352 unsigned member_index = insn.word(2);
1353
1354 if (insn.word(3) == spv::DecorationComponent) {
1355 unsigned component = insn.word(4);
1356 member_components[member_index] = component;
1357 }
Chris Forbesf929b162016-11-30 12:55:40 +13001358
1359 if (insn.word(3) == spv::DecorationRelaxedPrecision) {
1360 member_relaxed_precision[member_index] = 1;
1361 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001362 }
1363 }
1364
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001365 // Second pass -- produce the output, from Location decorations
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001366 for (auto insn : *src) {
1367 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1368 unsigned member_index = insn.word(2);
1369 unsigned member_type_id = type.word(2 + member_index);
1370
1371 if (insn.word(3) == spv::DecorationLocation) {
1372 unsigned location = insn.word(4);
1373 unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1374 auto component_it = member_components.find(member_index);
1375 unsigned component = component_it == member_components.end() ? 0 : component_it->second;
Chris Forbesf929b162016-11-30 12:55:40 +13001376 bool is_relaxed_precision = member_relaxed_precision.find(member_index) != member_relaxed_precision.end();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001377
1378 for (unsigned int offset = 0; offset < num_locations; offset++) {
Chris Forbesa1152762016-11-30 12:40:54 +13001379 interface_var v = {};
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001380 v.id = id;
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001381 // TODO: member index in interface_var too?
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001382 v.type_id = member_type_id;
1383 v.offset = offset;
Chris Forbes804bae32016-03-29 16:14:02 +13001384 v.is_patch = is_patch;
Chris Forbesa0ab8152016-04-20 13:34:27 +12001385 v.is_block_member = true;
Chris Forbesf929b162016-11-30 12:55:40 +13001386 v.is_relaxed_precision = is_relaxed_precision;
Chris Forbesd68e3202016-08-23 13:04:34 +12001387 (*out)[std::make_pair(location + offset, component)] = v;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001388 }
1389 }
1390 }
1391 }
1392}
1393
Chris Forbesd68e3202016-08-23 13:04:34 +12001394static std::map<location_t, interface_var> collect_interface_by_location(
1395 shader_module const *src, spirv_inst_iter entrypoint,
1396 spv::StorageClass sinterface, bool is_array_of_verts) {
1397
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001398 std::unordered_map<unsigned, unsigned> var_locations;
1399 std::unordered_map<unsigned, unsigned> var_builtins;
1400 std::unordered_map<unsigned, unsigned> var_components;
1401 std::unordered_map<unsigned, unsigned> blocks;
Chris Forbes804bae32016-03-29 16:14:02 +13001402 std::unordered_map<unsigned, unsigned> var_patch;
Chris Forbesa1152762016-11-30 12:40:54 +13001403 std::unordered_map<unsigned, unsigned> var_relaxed_precision;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001404
1405 for (auto insn : *src) {
1406
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001407 // We consider two interface models: SSO rendezvous-by-location, and builtins. Complain about anything that
1408 // fits neither model.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001409 if (insn.opcode() == spv::OpDecorate) {
1410 if (insn.word(2) == spv::DecorationLocation) {
1411 var_locations[insn.word(1)] = insn.word(3);
1412 }
1413
1414 if (insn.word(2) == spv::DecorationBuiltIn) {
1415 var_builtins[insn.word(1)] = insn.word(3);
1416 }
1417
1418 if (insn.word(2) == spv::DecorationComponent) {
1419 var_components[insn.word(1)] = insn.word(3);
1420 }
1421
1422 if (insn.word(2) == spv::DecorationBlock) {
1423 blocks[insn.word(1)] = 1;
1424 }
Chris Forbes804bae32016-03-29 16:14:02 +13001425
1426 if (insn.word(2) == spv::DecorationPatch) {
1427 var_patch[insn.word(1)] = 1;
1428 }
Chris Forbesa1152762016-11-30 12:40:54 +13001429
1430 if (insn.word(2) == spv::DecorationRelaxedPrecision) {
1431 var_relaxed_precision[insn.word(1)] = 1;
1432 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001433 }
1434 }
1435
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001436 // TODO: handle grouped decorations
1437 // TODO: handle index=1 dual source outputs from FS -- two vars will have the same location, and we DON'T want to clobber.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001438
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001439 // Find the end of the entrypoint's name string. additional zero bytes follow the actual null terminator, to fill out the
1440 // rest of the word - so we only need to look at the last byte in the word to determine which word contains the terminator.
Michael Mc Donnell75ecdb72016-04-03 14:47:51 -07001441 uint32_t word = 3;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001442 while (entrypoint.word(word) & 0xff000000u) {
1443 ++word;
1444 }
1445 ++word;
1446
Chris Forbesd68e3202016-08-23 13:04:34 +12001447 std::map<location_t, interface_var> out;
1448
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001449 for (; word < entrypoint.len(); word++) {
1450 auto insn = src->get_def(entrypoint.word(word));
1451 assert(insn != src->end());
1452 assert(insn.opcode() == spv::OpVariable);
1453
Jamie Madill2b6b8d52016-04-04 15:09:51 -04001454 if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001455 unsigned id = insn.word(2);
1456 unsigned type = insn.word(1);
1457
1458 int location = value_or_default(var_locations, id, -1);
1459 int builtin = value_or_default(var_builtins, id, -1);
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001460 unsigned component = value_or_default(var_components, id, 0); // Unspecified is OK, is 0
Chris Forbes804bae32016-03-29 16:14:02 +13001461 bool is_patch = var_patch.find(id) != var_patch.end();
Chris Forbesa1152762016-11-30 12:40:54 +13001462 bool is_relaxed_precision = var_relaxed_precision.find(id) != var_relaxed_precision.end();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001463
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001464 // All variables and interface block members in the Input or Output storage classes must be decorated with either
1465 // a builtin or an explicit location.
1466 //
1467 // TODO: integrate the interface block support here. For now, don't complain -- a valid SPIRV module will only hit
1468 // this path for the interface block case, as the individual members of the type are decorated, rather than
1469 // variable declarations.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001470
1471 if (location != -1) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001472 // A user-defined interface variable, with a location. Where a variable occupied multiple locations, emit
1473 // one result for each.
Chris Forbes43f01d02016-03-29 16:38:44 +13001474 unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001475 for (unsigned int offset = 0; offset < num_locations; offset++) {
Chris Forbesa1152762016-11-30 12:40:54 +13001476 interface_var v = {};
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001477 v.id = id;
1478 v.type_id = type;
1479 v.offset = offset;
Chris Forbes804bae32016-03-29 16:14:02 +13001480 v.is_patch = is_patch;
Chris Forbesa1152762016-11-30 12:40:54 +13001481 v.is_relaxed_precision = is_relaxed_precision;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001482 out[std::make_pair(location + offset, component)] = v;
1483 }
1484 } else if (builtin == -1) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001485 // An interface block instance
Chris Forbesd68e3202016-08-23 13:04:34 +12001486 collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001487 }
1488 }
1489 }
Chris Forbesd68e3202016-08-23 13:04:34 +12001490
1491 return out;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001492}
1493
Chris Forbesd68e3202016-08-23 13:04:34 +12001494static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
1495 debug_report_data *report_data, shader_module const *src,
1496 std::unordered_set<uint32_t> const &accessible_ids) {
1497
1498 std::vector<std::pair<uint32_t, interface_var>> out;
Chris Forbes07ac1f32016-08-22 14:58:35 +12001499
1500 for (auto insn : *src) {
1501 if (insn.opcode() == spv::OpDecorate) {
1502 if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1503 auto attachment_index = insn.word(3);
1504 auto id = insn.word(1);
1505
1506 if (accessible_ids.count(id)) {
1507 auto def = src->get_def(id);
1508 assert(def != src->end());
1509
1510 if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
Chris Forbes56f28452016-08-22 15:31:18 +12001511 auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1512 for (unsigned int offset = 0; offset < num_locations; offset++) {
Chris Forbesa1152762016-11-30 12:40:54 +13001513 interface_var v = {};
Chris Forbes56f28452016-08-22 15:31:18 +12001514 v.id = id;
1515 v.type_id = def.word(1);
1516 v.offset = offset;
Chris Forbes56f28452016-08-22 15:31:18 +12001517 out.emplace_back(attachment_index + offset, v);
1518 }
Chris Forbes07ac1f32016-08-22 14:58:35 +12001519 }
1520 }
1521 }
1522 }
1523 }
Chris Forbesd68e3202016-08-23 13:04:34 +12001524
1525 return out;
Chris Forbes07ac1f32016-08-22 14:58:35 +12001526}
1527
Chris Forbesd68e3202016-08-23 13:04:34 +12001528static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
1529 debug_report_data *report_data, shader_module const *src,
1530 std::unordered_set<uint32_t> const &accessible_ids) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001531
1532 std::unordered_map<unsigned, unsigned> var_sets;
1533 std::unordered_map<unsigned, unsigned> var_bindings;
1534
1535 for (auto insn : *src) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001536 // All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1537 // DecorationDescriptorSet and DecorationBinding.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001538 if (insn.opcode() == spv::OpDecorate) {
1539 if (insn.word(2) == spv::DecorationDescriptorSet) {
1540 var_sets[insn.word(1)] = insn.word(3);
1541 }
1542
1543 if (insn.word(2) == spv::DecorationBinding) {
1544 var_bindings[insn.word(1)] = insn.word(3);
1545 }
1546 }
1547 }
1548
Chris Forbesd68e3202016-08-23 13:04:34 +12001549 std::vector<std::pair<descriptor_slot_t, interface_var>> out;
1550
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001551 for (auto id : accessible_ids) {
1552 auto insn = src->get_def(id);
1553 assert(insn != src->end());
1554
1555 if (insn.opcode() == spv::OpVariable &&
1556 (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1557 unsigned set = value_or_default(var_sets, insn.word(2), 0);
1558 unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1559
Chris Forbesa1152762016-11-30 12:40:54 +13001560 interface_var v = {};
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001561 v.id = insn.word(2);
1562 v.type_id = insn.word(1);
Chris Forbesbe8986a2016-07-19 15:08:38 +12001563 out.emplace_back(std::make_pair(set, binding), v);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001564 }
1565 }
Chris Forbesd68e3202016-08-23 13:04:34 +12001566
1567 return out;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001568}
1569
Chris Forbes1832a772016-05-10 15:30:22 +12001570static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
Chris Forbes23a575d2016-03-29 16:41:07 +13001571 spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001572 shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
Chris Forbes23a575d2016-03-29 16:41:07 +13001573 shader_stage_attributes const *consumer_stage) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001574 bool pass = true;
1575
Chris Forbesd68e3202016-08-23 13:04:34 +12001576 auto outputs = collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1577 auto inputs = collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001578
1579 auto a_it = outputs.begin();
1580 auto b_it = inputs.begin();
1581
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001582 // Maps sorted by key (location); walk them together to find mismatches
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001583 while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1584 bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1585 bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1586 auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1587 auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1588
1589 if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
Chris Forbes1832a772016-05-10 15:30:22 +12001590 if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Chris Forbesce5be902016-03-30 13:14:22 +13001591 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
Chris Forbes23a575d2016-03-29 16:41:07 +13001592 "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1593 a_first.second, consumer_stage->name)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001594 pass = false;
1595 }
1596 a_it++;
1597 } else if (a_at_end || a_first > b_first) {
Chris Forbes1832a772016-05-10 15:30:22 +12001598 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001599 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
Chris Forbes23a575d2016-03-29 16:41:07 +13001600 "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1601 producer_stage->name)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001602 pass = false;
1603 }
1604 b_it++;
1605 } else {
Chris Forbesa0ab8152016-04-20 13:34:27 +12001606 // subtleties of arrayed interfaces:
1607 // - if is_patch, then the member is not arrayed, even though the interface may be.
1608 // - if is_block_member, then the extra array level of an arrayed interface is not
1609 // expressed in the member type -- it's expressed in the block type.
Chris Forbes218deeb2016-03-29 16:57:02 +13001610 if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
Chris Forbesa0ab8152016-04-20 13:34:27 +12001611 producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1612 consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001613 true)) {
Chris Forbes1832a772016-05-10 15:30:22 +12001614 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001615 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
Chris Forbesfa86ce32016-03-18 14:59:39 +13001616 a_first.first, a_first.second,
1617 describe_type(producer, a_it->second.type_id).c_str(),
1618 describe_type(consumer, b_it->second.type_id).c_str())) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001619 pass = false;
1620 }
1621 }
Chris Forbes218deeb2016-03-29 16:57:02 +13001622 if (a_it->second.is_patch != b_it->second.is_patch) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001623 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
Chris Forbes218deeb2016-03-29 16:57:02 +13001624 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
Chris Forbesa0193bc2016-04-04 19:19:47 +12001625 "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
Chris Forbes218deeb2016-03-29 16:57:02 +13001626 "per-%s in %s stage", a_first.first, a_first.second,
1627 a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1628 b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1629 pass = false;
1630 }
1631 }
Chris Forbes44208392016-11-30 12:45:00 +13001632 if (a_it->second.is_relaxed_precision != b_it->second.is_relaxed_precision) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001633 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
Chris Forbes44208392016-11-30 12:45:00 +13001634 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1635 "Decoration mismatch on location %u.%u: %s and %s stages differ in precision",
1636 a_first.first, a_first.second,
1637 producer_stage->name,
1638 consumer_stage->name)) {
1639 pass = false;
1640 }
1641 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001642 a_it++;
1643 b_it++;
1644 }
1645 }
1646
1647 return pass;
1648}
1649
1650enum FORMAT_TYPE {
1651 FORMAT_TYPE_UNDEFINED,
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001652 FORMAT_TYPE_FLOAT, // UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001653 FORMAT_TYPE_SINT,
1654 FORMAT_TYPE_UINT,
1655};
1656
1657static unsigned get_format_type(VkFormat fmt) {
1658 switch (fmt) {
1659 case VK_FORMAT_UNDEFINED:
1660 return FORMAT_TYPE_UNDEFINED;
1661 case VK_FORMAT_R8_SINT:
1662 case VK_FORMAT_R8G8_SINT:
1663 case VK_FORMAT_R8G8B8_SINT:
1664 case VK_FORMAT_R8G8B8A8_SINT:
1665 case VK_FORMAT_R16_SINT:
1666 case VK_FORMAT_R16G16_SINT:
1667 case VK_FORMAT_R16G16B16_SINT:
1668 case VK_FORMAT_R16G16B16A16_SINT:
1669 case VK_FORMAT_R32_SINT:
1670 case VK_FORMAT_R32G32_SINT:
1671 case VK_FORMAT_R32G32B32_SINT:
1672 case VK_FORMAT_R32G32B32A32_SINT:
Chris Forbesf57a5202016-04-20 14:22:07 +12001673 case VK_FORMAT_R64_SINT:
1674 case VK_FORMAT_R64G64_SINT:
1675 case VK_FORMAT_R64G64B64_SINT:
1676 case VK_FORMAT_R64G64B64A64_SINT:
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001677 case VK_FORMAT_B8G8R8_SINT:
1678 case VK_FORMAT_B8G8R8A8_SINT:
Chris Forbese9a21d42016-04-20 14:16:10 +12001679 case VK_FORMAT_A8B8G8R8_SINT_PACK32:
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001680 case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1681 case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1682 return FORMAT_TYPE_SINT;
1683 case VK_FORMAT_R8_UINT:
1684 case VK_FORMAT_R8G8_UINT:
1685 case VK_FORMAT_R8G8B8_UINT:
1686 case VK_FORMAT_R8G8B8A8_UINT:
1687 case VK_FORMAT_R16_UINT:
1688 case VK_FORMAT_R16G16_UINT:
1689 case VK_FORMAT_R16G16B16_UINT:
1690 case VK_FORMAT_R16G16B16A16_UINT:
1691 case VK_FORMAT_R32_UINT:
1692 case VK_FORMAT_R32G32_UINT:
1693 case VK_FORMAT_R32G32B32_UINT:
1694 case VK_FORMAT_R32G32B32A32_UINT:
Chris Forbesf57a5202016-04-20 14:22:07 +12001695 case VK_FORMAT_R64_UINT:
1696 case VK_FORMAT_R64G64_UINT:
1697 case VK_FORMAT_R64G64B64_UINT:
1698 case VK_FORMAT_R64G64B64A64_UINT:
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001699 case VK_FORMAT_B8G8R8_UINT:
1700 case VK_FORMAT_B8G8R8A8_UINT:
Chris Forbese9a21d42016-04-20 14:16:10 +12001701 case VK_FORMAT_A8B8G8R8_UINT_PACK32:
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001702 case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1703 case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1704 return FORMAT_TYPE_UINT;
1705 default:
1706 return FORMAT_TYPE_FLOAT;
1707 }
1708}
1709
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001710// characterizes a SPIR-V type appearing in an interface to a FF stage, for comparison to a VkFormat's characterization above.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001711static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1712 auto insn = src->get_def(type);
1713 assert(insn != src->end());
1714
1715 switch (insn.opcode()) {
1716 case spv::OpTypeInt:
1717 return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1718 case spv::OpTypeFloat:
1719 return FORMAT_TYPE_FLOAT;
1720 case spv::OpTypeVector:
1721 return get_fundamental_type(src, insn.word(2));
1722 case spv::OpTypeMatrix:
1723 return get_fundamental_type(src, insn.word(2));
1724 case spv::OpTypeArray:
1725 return get_fundamental_type(src, insn.word(2));
1726 case spv::OpTypePointer:
1727 return get_fundamental_type(src, insn.word(3));
Chris Forbes383352e2016-08-22 16:36:54 +12001728 case spv::OpTypeImage:
1729 return get_fundamental_type(src, insn.word(2));
1730
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001731 default:
1732 return FORMAT_TYPE_UNDEFINED;
1733 }
1734}
1735
1736static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1737 uint32_t bit_pos = u_ffs(stage);
1738 return bit_pos - 1;
1739}
1740
Chris Forbes1832a772016-05-10 15:30:22 +12001741static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001742 // Walk the binding descriptions, which describe the step rate and stride of each vertex buffer. Each binding should
1743 // be specified only once.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001744 std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1745 bool pass = true;
1746
1747 for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1748 auto desc = &vi->pVertexBindingDescriptions[i];
1749 auto &binding = bindings[desc->binding];
1750 if (binding) {
Jeremy Hayese2583052016-12-12 11:01:28 -07001751 // TODO: VALIDATION_ERROR_02105 perhaps?
Chris Forbes1832a772016-05-10 15:30:22 +12001752 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001753 __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1754 "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1755 pass = false;
1756 }
1757 } else {
1758 binding = desc;
1759 }
1760 }
1761
1762 return pass;
1763}
1764
Chris Forbes1832a772016-05-10 15:30:22 +12001765static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001766 shader_module const *vs, spirv_inst_iter entrypoint) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001767 bool pass = true;
1768
Chris Forbesd68e3202016-08-23 13:04:34 +12001769 auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001770
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001771 // Build index by location
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001772 std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1773 if (vi) {
Chris Forbese32b78a2016-04-21 15:00:58 +12001774 for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1775 auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1776 for (auto j = 0u; j < num_locations; j++) {
1777 attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1778 }
1779 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001780 }
1781
1782 auto it_a = attribs.begin();
1783 auto it_b = inputs.begin();
Chris Forbes22dd5d22016-07-06 12:18:26 +12001784 bool used = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001785
1786 while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1787 bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1788 bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1789 auto a_first = a_at_end ? 0 : it_a->first;
1790 auto b_first = b_at_end ? 0 : it_b->first.first;
1791 if (!a_at_end && (b_at_end || a_first < b_first)) {
Chris Forbes22dd5d22016-07-06 12:18:26 +12001792 if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Chris Forbesce5be902016-03-30 13:14:22 +13001793 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
Mike Weiblencce7ec72016-10-17 19:33:05 -06001794 "Vertex attribute at location %d not consumed by vertex shader", a_first)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001795 pass = false;
1796 }
Chris Forbes22dd5d22016-07-06 12:18:26 +12001797 used = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001798 it_a++;
1799 } else if (!b_at_end && (a_at_end || b_first < a_first)) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001800 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
Mike Weiblen15bd38e2016-10-03 19:19:41 -06001801 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Vertex shader consumes input at location %d but not provided",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001802 b_first)) {
1803 pass = false;
1804 }
1805 it_b++;
1806 } else {
1807 unsigned attrib_type = get_format_type(it_a->second->format);
1808 unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1809
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001810 // Type checking
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001811 if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
Chris Forbes1832a772016-05-10 15:30:22 +12001812 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001813 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
Mike Weiblen15bd38e2016-10-03 19:19:41 -06001814 "Attribute type of `%s` at location %d does not match vertex shader input type of `%s`",
Chris Forbesfa86ce32016-03-18 14:59:39 +13001815 string_VkFormat(it_a->second->format), a_first,
1816 describe_type(vs, it_b->second.type_id).c_str())) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001817 pass = false;
1818 }
1819 }
1820
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001821 // OK!
Chris Forbes22dd5d22016-07-06 12:18:26 +12001822 used = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001823 it_b++;
1824 }
1825 }
1826
1827 return pass;
1828}
1829
Chris Forbes1832a772016-05-10 15:30:22 +12001830static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
Tobin Ehlisc677a092016-06-27 12:57:05 -06001831 spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
1832 uint32_t subpass_index) {
Chris Forbes7cf00d42016-04-29 17:33:03 +12001833 std::map<uint32_t, VkFormat> color_attachments;
Tobin Ehlisc677a092016-06-27 12:57:05 -06001834 auto subpass = rpci->pSubpasses[subpass_index];
1835 for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
Tobin Ehlis02656182016-07-01 13:54:41 -06001836 uint32_t attachment = subpass.pColorAttachments[i].attachment;
1837 if (attachment == VK_ATTACHMENT_UNUSED)
1838 continue;
1839 if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
1840 color_attachments[i] = rpci->pAttachments[attachment].format;
Chris Forbes7cf00d42016-04-29 17:33:03 +12001841 }
1842 }
1843
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001844 bool pass = true;
1845
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001846 // TODO: dual source blend index (spv::DecIndex, zero if not provided)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001847
Chris Forbesd68e3202016-08-23 13:04:34 +12001848 auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001849
Chris Forbes7cf00d42016-04-29 17:33:03 +12001850 auto it_a = outputs.begin();
1851 auto it_b = color_attachments.begin();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001852
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001853 // Walk attachment list and outputs together
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001854
Chris Forbes7cf00d42016-04-29 17:33:03 +12001855 while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1856 bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1857 bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1858
1859 if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
Chris Forbes1832a772016-05-10 15:30:22 +12001860 if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001861 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
Mike Weiblencce7ec72016-10-17 19:33:05 -06001862 "fragment shader writes to output location %d with no matching attachment", it_a->first.first)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001863 pass = false;
1864 }
Chris Forbes7cf00d42016-04-29 17:33:03 +12001865 it_a++;
1866 } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
Chris Forbes1832a772016-05-10 15:30:22 +12001867 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Mike Weiblencce7ec72016-10-17 19:33:05 -06001868 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by fragment shader",
1869 it_b->first)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001870 pass = false;
1871 }
Chris Forbes7cf00d42016-04-29 17:33:03 +12001872 it_b++;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001873 } else {
Chris Forbes7cf00d42016-04-29 17:33:03 +12001874 unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1875 unsigned att_type = get_format_type(it_b->second);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001876
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001877 // Type checking
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001878 if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
Chris Forbes1832a772016-05-10 15:30:22 +12001879 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001880 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
Mike Weiblencce7ec72016-10-17 19:33:05 -06001881 "Attachment %d of type `%s` does not match fragment shader output type of `%s`", it_b->first,
Chris Forbes7cf00d42016-04-29 17:33:03 +12001882 string_VkFormat(it_b->second),
1883 describe_type(fs, it_a->second.type_id).c_str())) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001884 pass = false;
1885 }
1886 }
1887
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001888 // OK!
Chris Forbes7cf00d42016-04-29 17:33:03 +12001889 it_a++;
1890 it_b++;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001891 }
1892 }
1893
1894 return pass;
1895}
1896
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001897// For some analyses, we need to know about all ids referenced by the static call tree of a particular entrypoint. This is
1898// important for identifying the set of shader resources actually used by an entrypoint, for example.
1899// Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1900// - NOT the shader input/output interfaces.
1901//
1902// TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1903// converting parts of this to be generated from the machine-readable spec instead.
Chris Forbesd68e3202016-08-23 13:04:34 +12001904static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
1905 std::unordered_set<uint32_t> ids;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001906 std::unordered_set<uint32_t> worklist;
1907 worklist.insert(entrypoint.word(2));
1908
1909 while (!worklist.empty()) {
1910 auto id_iter = worklist.begin();
1911 auto id = *id_iter;
1912 worklist.erase(id_iter);
1913
1914 auto insn = src->get_def(id);
1915 if (insn == src->end()) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001916 // ID is something we didn't collect in build_def_index. that's OK -- we'll stumble across all kinds of things here
1917 // that we may not care about.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001918 continue;
1919 }
1920
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001921 // Try to add to the output set
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001922 if (!ids.insert(id).second) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001923 continue; // If we already saw this id, we don't want to walk it again.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001924 }
1925
1926 switch (insn.opcode()) {
1927 case spv::OpFunction:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001928 // Scan whole body of the function, enlisting anything interesting
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001929 while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1930 switch (insn.opcode()) {
1931 case spv::OpLoad:
1932 case spv::OpAtomicLoad:
1933 case spv::OpAtomicExchange:
1934 case spv::OpAtomicCompareExchange:
1935 case spv::OpAtomicCompareExchangeWeak:
1936 case spv::OpAtomicIIncrement:
1937 case spv::OpAtomicIDecrement:
1938 case spv::OpAtomicIAdd:
1939 case spv::OpAtomicISub:
1940 case spv::OpAtomicSMin:
1941 case spv::OpAtomicUMin:
1942 case spv::OpAtomicSMax:
1943 case spv::OpAtomicUMax:
1944 case spv::OpAtomicAnd:
1945 case spv::OpAtomicOr:
1946 case spv::OpAtomicXor:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001947 worklist.insert(insn.word(3)); // ptr
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001948 break;
1949 case spv::OpStore:
1950 case spv::OpAtomicStore:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001951 worklist.insert(insn.word(1)); // ptr
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001952 break;
1953 case spv::OpAccessChain:
1954 case spv::OpInBoundsAccessChain:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001955 worklist.insert(insn.word(3)); // base ptr
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001956 break;
1957 case spv::OpSampledImage:
1958 case spv::OpImageSampleImplicitLod:
1959 case spv::OpImageSampleExplicitLod:
1960 case spv::OpImageSampleDrefImplicitLod:
1961 case spv::OpImageSampleDrefExplicitLod:
1962 case spv::OpImageSampleProjImplicitLod:
1963 case spv::OpImageSampleProjExplicitLod:
1964 case spv::OpImageSampleProjDrefImplicitLod:
1965 case spv::OpImageSampleProjDrefExplicitLod:
1966 case spv::OpImageFetch:
1967 case spv::OpImageGather:
1968 case spv::OpImageDrefGather:
1969 case spv::OpImageRead:
1970 case spv::OpImage:
1971 case spv::OpImageQueryFormat:
1972 case spv::OpImageQueryOrder:
1973 case spv::OpImageQuerySizeLod:
1974 case spv::OpImageQuerySize:
1975 case spv::OpImageQueryLod:
1976 case spv::OpImageQueryLevels:
1977 case spv::OpImageQuerySamples:
1978 case spv::OpImageSparseSampleImplicitLod:
1979 case spv::OpImageSparseSampleExplicitLod:
1980 case spv::OpImageSparseSampleDrefImplicitLod:
1981 case spv::OpImageSparseSampleDrefExplicitLod:
1982 case spv::OpImageSparseSampleProjImplicitLod:
1983 case spv::OpImageSparseSampleProjExplicitLod:
1984 case spv::OpImageSparseSampleProjDrefImplicitLod:
1985 case spv::OpImageSparseSampleProjDrefExplicitLod:
1986 case spv::OpImageSparseFetch:
1987 case spv::OpImageSparseGather:
1988 case spv::OpImageSparseDrefGather:
1989 case spv::OpImageTexelPointer:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001990 worklist.insert(insn.word(3)); // Image or sampled image
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001991 break;
1992 case spv::OpImageWrite:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001993 worklist.insert(insn.word(1)); // Image -- different operand order to above
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001994 break;
1995 case spv::OpFunctionCall:
Michael Mc Donnell75ecdb72016-04-03 14:47:51 -07001996 for (uint32_t i = 3; i < insn.len(); i++) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001997 worklist.insert(insn.word(i)); // fn itself, and all args
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001998 }
1999 break;
2000
2001 case spv::OpExtInst:
Michael Mc Donnell75ecdb72016-04-03 14:47:51 -07002002 for (uint32_t i = 5; i < insn.len(); i++) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002003 worklist.insert(insn.word(i)); // Operands to ext inst
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002004 }
2005 break;
2006 }
2007 }
2008 break;
2009 }
2010 }
Chris Forbesd68e3202016-08-23 13:04:34 +12002011
2012 return ids;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002013}
2014
Chris Forbes1832a772016-05-10 15:30:22 +12002015static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
Tobin Ehlis3df41292016-07-07 09:23:38 -06002016 std::vector<VkPushConstantRange> const *push_constant_ranges,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002017 shader_module const *src, spirv_inst_iter type,
2018 VkShaderStageFlagBits stage) {
2019 bool pass = true;
2020
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002021 // Strip off ptrs etc
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002022 type = get_struct_type(src, type, false);
2023 assert(type != src->end());
2024
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002025 // Validate directly off the offsets. this isn't quite correct for arrays and matrices, but is a good first step.
2026 // TODO: arrays, matrices, weird sizes
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002027 for (auto insn : *src) {
2028 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2029
2030 if (insn.word(3) == spv::DecorationOffset) {
2031 unsigned offset = insn.word(4);
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002032 auto size = 4; // Bytes; TODO: calculate this based on the type
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002033
2034 bool found_range = false;
Tobin Ehlis3df41292016-07-07 09:23:38 -06002035 for (auto const &range : *push_constant_ranges) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002036 if (range.offset <= offset && range.offset + range.size >= offset + size) {
2037 found_range = true;
2038
2039 if ((range.stageFlags & stage) == 0) {
Chris Forbes1832a772016-05-10 15:30:22 +12002040 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Chris Forbesce5be902016-03-30 13:14:22 +13002041 __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002042 "Push constant range covering variable starting at "
2043 "offset %u not accessible from stage %s",
2044 offset, string_VkShaderStageFlagBits(stage))) {
2045 pass = false;
2046 }
2047 }
2048
2049 break;
2050 }
2051 }
2052
2053 if (!found_range) {
Chris Forbes1832a772016-05-10 15:30:22 +12002054 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Chris Forbesce5be902016-03-30 13:14:22 +13002055 __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002056 "Push constant range covering variable starting at "
2057 "offset %u not declared in layout",
2058 offset)) {
2059 pass = false;
2060 }
2061 }
2062 }
2063 }
2064 }
2065
2066 return pass;
2067}
2068
Chris Forbes1832a772016-05-10 15:30:22 +12002069static bool validate_push_constant_usage(debug_report_data *report_data,
Tobin Ehlis3df41292016-07-07 09:23:38 -06002070 std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002071 std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2072 bool pass = true;
2073
2074 for (auto id : accessible_ids) {
2075 auto def_insn = src->get_def(id);
2076 if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
Tobin Ehlis3df41292016-07-07 09:23:38 -06002077 pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
2078 src->get_def(def_insn.word(1)), stage);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002079 }
2080 }
2081
2082 return pass;
2083}
2084
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06002085// For given pipelineLayout verify that the set_layout_node at slot.first
2086// has the requested binding at slot.second and return ptr to that binding
Chris Forbes81d95212016-05-20 18:27:28 +12002087static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002088
2089 if (!pipelineLayout)
Chris Forbes4e4191b2016-03-18 11:14:27 +13002090 return nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002091
Tobin Ehlis3df41292016-07-07 09:23:38 -06002092 if (slot.first >= pipelineLayout->set_layouts.size())
Chris Forbes4e4191b2016-03-18 11:14:27 +13002093 return nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002094
Tobin Ehlis3df41292016-07-07 09:23:38 -06002095 return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002096}
2097
2098// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2099
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002100// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2101// Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2102// to that same cmd buffer by separate thread are not changing state from underneath us
2103// Track the last cmd buffer touched by this thread
2104
Dustin Graves8f1eab92016-04-05 09:41:17 -06002105static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002106 for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2107 if (pCB->drawCount[i])
Dustin Graves8f1eab92016-04-05 09:41:17 -06002108 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002109 }
Dustin Graves8f1eab92016-04-05 09:41:17 -06002110 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002111}
2112
2113// Check object status for selected flag state
Dustin Graves8f1eab92016-04-05 09:41:17 -06002114static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
Jeremy Hayese2583052016-12-12 11:01:28 -07002115 const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002116 if (!(pNode->status & status_mask)) {
Jeremy Hayese2583052016-12-12 11:01:28 -07002117 char const *const message = validation_error_map[msg_code];
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002118 return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002119 reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, msg_code, "DS",
2120 "command buffer object 0x%p: %s. %s.", pNode->commandBuffer, fail_msg, message);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002121 }
Dustin Graves8f1eab92016-04-05 09:41:17 -06002122 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002123}
2124
2125// Retrieve pipeline node ptr for given pipeline object
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002126static PIPELINE_STATE *getPipelineState(layer_data const *my_data, VkPipeline pipeline) {
Chris Forbes4e16d882016-05-06 15:54:55 +12002127 auto it = my_data->pipelineMap.find(pipeline);
2128 if (it == my_data->pipelineMap.end()) {
2129 return nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002130 }
Chris Forbes4e16d882016-05-06 15:54:55 +12002131 return it->second;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002132}
2133
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06002134static RENDER_PASS_STATE *getRenderPassState(layer_data const *my_data, VkRenderPass renderpass) {
Chris Forbes967c4682016-05-17 11:36:23 +12002135 auto it = my_data->renderPassMap.find(renderpass);
2136 if (it == my_data->renderPassMap.end()) {
2137 return nullptr;
2138 }
Chris Forbesef730462016-09-27 12:03:31 +13002139 return it->second.get();
Chris Forbes967c4682016-05-17 11:36:23 +12002140}
2141
Tobin Ehlis04c04272016-10-12 11:54:09 -06002142static FRAMEBUFFER_STATE *getFramebufferState(const layer_data *my_data, VkFramebuffer framebuffer) {
Chris Forbes05e03b72016-05-17 15:27:58 +12002143 auto it = my_data->frameBufferMap.find(framebuffer);
2144 if (it == my_data->frameBufferMap.end()) {
2145 return nullptr;
2146 }
Tobin Ehlis82d2db32016-06-22 08:29:24 -06002147 return it->second.get();
Chris Forbes05e03b72016-05-17 15:27:58 +12002148}
2149
Tobin Ehlis815e8132016-06-02 13:02:17 -06002150cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
Chris Forbes056d9922016-05-20 17:04:07 +12002151 auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2152 if (it == my_data->descriptorSetLayoutMap.end()) {
2153 return nullptr;
2154 }
2155 return it->second;
2156}
2157
Tobin Ehlisc1d9be12016-10-13 10:18:18 -06002158static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
Chris Forbes81d95212016-05-20 18:27:28 +12002159 auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2160 if (it == my_data->pipelineLayoutMap.end()) {
2161 return nullptr;
2162 }
2163 return &it->second;
2164}
2165
Dustin Graves8f1eab92016-04-05 09:41:17 -06002166// Return true if for a given PSO, the given state enum is dynamic, else return false
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002167static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002168 if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2169 for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2170 if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
Dustin Graves8f1eab92016-04-05 09:41:17 -06002171 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002172 }
2173 }
Dustin Graves8f1eab92016-04-05 09:41:17 -06002174 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002175}
2176
2177// Validate state stored as flags at time of draw call
Jeremy Hayese2583052016-12-12 11:01:28 -07002178static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
2179 UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
Chris Forbesb2ba95b2016-09-16 17:11:50 +12002180 bool result = false;
Tobin Ehlis5f4cef12016-04-01 13:51:33 -06002181 if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2182 ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2183 (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002184 result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002185 "Dynamic line width state not set for this command buffer", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002186 }
Dustin Gravesbd9c1a92016-04-05 15:15:40 -06002187 if (pPipe->graphicsPipelineCI.pRasterizationState &&
2188 (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002189 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002190 "Dynamic depth bias state not set for this command buffer", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002191 }
2192 if (pPipe->blendConstantsEnabled) {
2193 result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002194 "Dynamic blend constants state not set for this command buffer", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002195 }
Dustin Gravesbd9c1a92016-04-05 15:15:40 -06002196 if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2197 (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002198 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002199 "Dynamic depth bounds state not set for this command buffer", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002200 }
Dustin Gravesbd9c1a92016-04-05 15:15:40 -06002201 if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2202 (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002203 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002204 "Dynamic stencil read mask state not set for this command buffer", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002205 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002206 "Dynamic stencil write mask state not set for this command buffer", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002207 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002208 "Dynamic stencil reference state not set for this command buffer", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002209 }
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07002210 if (indexed) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002211 result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002212 "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002213 }
Jeremy Hayese2583052016-12-12 11:01:28 -07002214
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002215 return result;
2216}
2217
2218// Verify attachment reference compatibility according to spec
2219// If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
Mark Lobodzinski4c94c282016-06-20 18:49:25 -06002220// If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002221// to make sure that format and samples counts match.
2222// If not, they are not compatible.
2223static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2224 const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2225 const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2226 const VkAttachmentDescription *pSecondaryAttachments) {
Tobin Ehlisc231aae2016-06-10 02:36:25 -06002227 // Check potential NULL cases first to avoid nullptr issues later
2228 if (pPrimary == nullptr) {
2229 if (pSecondary == nullptr) {
2230 return true;
2231 }
2232 return false;
2233 } else if (pSecondary == nullptr) {
2234 return false;
2235 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002236 if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
Mark Youngeeafb152016-03-24 10:14:35 -06002237 if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2238 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002239 } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
Mark Youngeeafb152016-03-24 10:14:35 -06002240 if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2241 return true;
Mark Lobodzinski4c94c282016-06-20 18:49:25 -06002242 } else { // Format and sample count must match
2243 if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2244 return true;
2245 } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2246 return false;
2247 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002248 if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2249 pSecondaryAttachments[pSecondary[index].attachment].format) &&
2250 (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2251 pSecondaryAttachments[pSecondary[index].attachment].samples))
2252 return true;
2253 }
2254 // Format and sample counts didn't match
2255 return false;
2256}
Tobin Ehlis4ca15c72016-06-30 09:29:18 -06002257// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
Tobin Ehlisc677a092016-06-27 12:57:05 -06002258// For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible
Tobin Ehlis4ca15c72016-06-30 09:29:18 -06002259static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI,
Tobin Ehlisc677a092016-06-27 12:57:05 -06002260 const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002261 if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
Chris Forbesba2c4662016-04-14 10:30:01 +12002262 stringstream errorStr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002263 errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2264 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2265 errorMsg = errorStr.str();
2266 return false;
2267 }
2268 uint32_t spIndex = 0;
2269 for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2270 // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2271 uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2272 uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2273 uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2274 for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2275 if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2276 primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2277 secondaryColorCount, secondaryRPCI->pAttachments)) {
Chris Forbesba2c4662016-04-14 10:30:01 +12002278 stringstream errorStr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002279 errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2280 errorMsg = errorStr.str();
2281 return false;
2282 } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2283 primaryColorCount, primaryRPCI->pAttachments,
2284 secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2285 secondaryColorCount, secondaryRPCI->pAttachments)) {
Chris Forbesba2c4662016-04-14 10:30:01 +12002286 stringstream errorStr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002287 errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2288 errorMsg = errorStr.str();
2289 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002290 }
2291 }
Chris Forbesb442e562016-04-11 18:32:23 +12002292
2293 if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2294 1, primaryRPCI->pAttachments,
2295 secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2296 1, secondaryRPCI->pAttachments)) {
Chris Forbesba2c4662016-04-14 10:30:01 +12002297 stringstream errorStr;
Chris Forbesb442e562016-04-11 18:32:23 +12002298 errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2299 errorMsg = errorStr.str();
2300 return false;
2301 }
2302
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002303 uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2304 uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2305 uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2306 for (uint32_t i = 0; i < inputMax; ++i) {
2307 if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2308 primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2309 secondaryColorCount, secondaryRPCI->pAttachments)) {
Chris Forbesba2c4662016-04-14 10:30:01 +12002310 stringstream errorStr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002311 errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2312 errorMsg = errorStr.str();
2313 return false;
2314 }
2315 }
2316 }
2317 return true;
2318}
2319
Tobin Ehlis05be5df2016-05-05 08:25:02 -06002320// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2321// pipelineLayout[layoutIndex]
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07002322static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *descriptor_set,
Tobin Ehlis0fc85672016-07-07 11:06:26 -06002323 PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2324 string &errorMsg) {
Tobin Ehlis3df41292016-07-07 09:23:38 -06002325 auto num_sets = pipeline_layout->set_layouts.size();
Tobin Ehlisa382a952016-07-07 09:20:13 -06002326 if (layoutIndex >= num_sets) {
Chris Forbesba2c4662016-04-14 10:30:01 +12002327 stringstream errorStr;
Tobin Ehlis0fc85672016-07-07 11:06:26 -06002328 errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2329 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2330 << layoutIndex;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002331 errorMsg = errorStr.str();
2332 return false;
2333 }
Tobin Ehlis3df41292016-07-07 09:23:38 -06002334 auto layout_node = pipeline_layout->set_layouts[layoutIndex];
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07002335 return descriptor_set->IsCompatible(layout_node, &errorMsg);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002336}
2337
2338// Validate that data for each specialization entry is fully contained within the buffer.
Chris Forbes1832a772016-05-10 15:30:22 +12002339static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06002340 bool pass = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002341
2342 VkSpecializationInfo const *spec = info->pSpecializationInfo;
2343
2344 if (spec) {
2345 for (auto i = 0u; i < spec->mapEntryCount; i++) {
Jeremy Hayese2583052016-12-12 11:01:28 -07002346 // TODO: This is a good place for VALIDATION_ERROR_00589.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002347 if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
Jeremy Hayese2583052016-12-12 11:01:28 -07002348 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
2349 VALIDATION_ERROR_00590, "SC",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002350 "Specialization entry %u (for constant id %u) references memory outside provided "
2351 "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
Jeremy Hayese2583052016-12-12 11:01:28 -07002352 " bytes provided). %s.",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002353 i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
Jeremy Hayese2583052016-12-12 11:01:28 -07002354 spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize,
2355 validation_error_map[VALIDATION_ERROR_00590])) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002356
Dustin Graves8f1eab92016-04-05 09:41:17 -06002357 pass = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002358 }
2359 }
2360 }
2361 }
2362
2363 return pass;
2364}
2365
Chris Forbes1832a772016-05-10 15:30:22 +12002366static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
Chris Forbesb9fb5fc2016-03-18 11:21:35 +13002367 VkDescriptorType descriptor_type, unsigned &descriptor_count) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002368 auto type = module->get_def(type_id);
2369
Chris Forbesb9fb5fc2016-03-18 11:21:35 +13002370 descriptor_count = 1;
2371
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002372 // Strip off any array or ptrs. Where we remove array levels, adjust the descriptor count for each dimension.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002373 while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
Chris Forbesa1361092016-03-18 11:26:06 +13002374 if (type.opcode() == spv::OpTypeArray) {
2375 descriptor_count *= get_constant_value(module, type.word(3));
2376 type = module->get_def(type.word(2));
2377 }
2378 else {
2379 type = module->get_def(type.word(3));
2380 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002381 }
2382
2383 switch (type.opcode()) {
2384 case spv::OpTypeStruct: {
2385 for (auto insn : *module) {
2386 if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2387 if (insn.word(2) == spv::DecorationBlock) {
2388 return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2389 descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2390 } else if (insn.word(2) == spv::DecorationBufferBlock) {
2391 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2392 descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2393 }
2394 }
2395 }
2396
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002397 // Invalid
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002398 return false;
2399 }
2400
2401 case spv::OpTypeSampler:
Chris Forbesb9e3a082016-07-19 14:50:04 +12002402 return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
2403 descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002404
2405 case spv::OpTypeSampledImage:
Chris Forbesccf300b2016-03-24 14:14:45 +13002406 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002407 // Slight relaxation for some GLSL historical madness: samplerBuffer doesn't really have a sampler, and a texel
2408 // buffer descriptor doesn't really provide one. Allow this slight mismatch.
Chris Forbesccf300b2016-03-24 14:14:45 +13002409 auto image_type = module->get_def(type.word(2));
2410 auto dim = image_type.word(3);
2411 auto sampled = image_type.word(7);
2412 return dim == spv::DimBuffer && sampled == 1;
2413 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002414 return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2415
2416 case spv::OpTypeImage: {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002417 // Many descriptor types backing image types-- depends on dimension and whether the image will be used with a sampler.
2418 // SPIRV for Vulkan requires that sampled be 1 or 2 -- leaving the decision to runtime is unacceptable.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002419 auto dim = type.word(3);
2420 auto sampled = type.word(7);
2421
2422 if (dim == spv::DimSubpassData) {
2423 return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2424 } else if (dim == spv::DimBuffer) {
2425 if (sampled == 1) {
2426 return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2427 } else {
2428 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2429 }
2430 } else if (sampled == 1) {
Chris Forbesb9e3a082016-07-19 14:50:04 +12002431 return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2432 descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002433 } else {
2434 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2435 }
2436 }
2437
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002438 // We shouldn't really see any other junk types -- but if we do, they're a mismatch.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002439 default:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002440 return false; // Mismatch
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002441 }
2442}
2443
Chris Forbes34bbe942016-05-10 16:47:02 +12002444static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002445 if (!feature) {
Chris Forbes34bbe942016-05-10 16:47:02 +12002446 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Chris Forbesce5be902016-03-30 13:14:22 +13002447 __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002448 "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2449 "enabled on the device",
2450 feature_name)) {
2451 return false;
2452 }
2453 }
2454
2455 return true;
2456}
2457
Chris Forbesc9b826c2016-05-13 13:17:42 +12002458static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2459 VkPhysicalDeviceFeatures const *enabledFeatures) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06002460 bool pass = true;
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002461
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002462
2463 for (auto insn : *src) {
2464 if (insn.opcode() == spv::OpCapability) {
2465 switch (insn.word(1)) {
2466 case spv::CapabilityMatrix:
2467 case spv::CapabilityShader:
2468 case spv::CapabilityInputAttachment:
2469 case spv::CapabilitySampled1D:
2470 case spv::CapabilityImage1D:
2471 case spv::CapabilitySampledBuffer:
2472 case spv::CapabilityImageBuffer:
2473 case spv::CapabilityImageQuery:
2474 case spv::CapabilityDerivativeControl:
2475 // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2476 break;
2477
2478 case spv::CapabilityGeometry:
Chris Forbes34bbe942016-05-10 16:47:02 +12002479 pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002480 break;
2481
2482 case spv::CapabilityTessellation:
Chris Forbes34bbe942016-05-10 16:47:02 +12002483 pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002484 break;
2485
2486 case spv::CapabilityFloat64:
Chris Forbes34bbe942016-05-10 16:47:02 +12002487 pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002488 break;
2489
2490 case spv::CapabilityInt64:
Chris Forbes34bbe942016-05-10 16:47:02 +12002491 pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002492 break;
2493
2494 case spv::CapabilityTessellationPointSize:
2495 case spv::CapabilityGeometryPointSize:
Chris Forbes34bbe942016-05-10 16:47:02 +12002496 pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002497 "shaderTessellationAndGeometryPointSize");
2498 break;
2499
2500 case spv::CapabilityImageGatherExtended:
Chris Forbes34bbe942016-05-10 16:47:02 +12002501 pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002502 break;
2503
2504 case spv::CapabilityStorageImageMultisample:
Chris Forbes34bbe942016-05-10 16:47:02 +12002505 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002506 break;
2507
2508 case spv::CapabilityUniformBufferArrayDynamicIndexing:
Chris Forbes34bbe942016-05-10 16:47:02 +12002509 pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002510 "shaderUniformBufferArrayDynamicIndexing");
2511 break;
2512
2513 case spv::CapabilitySampledImageArrayDynamicIndexing:
Chris Forbes34bbe942016-05-10 16:47:02 +12002514 pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002515 "shaderSampledImageArrayDynamicIndexing");
2516 break;
2517
2518 case spv::CapabilityStorageBufferArrayDynamicIndexing:
Chris Forbes34bbe942016-05-10 16:47:02 +12002519 pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002520 "shaderStorageBufferArrayDynamicIndexing");
2521 break;
2522
2523 case spv::CapabilityStorageImageArrayDynamicIndexing:
Chris Forbes34bbe942016-05-10 16:47:02 +12002524 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002525 "shaderStorageImageArrayDynamicIndexing");
2526 break;
2527
2528 case spv::CapabilityClipDistance:
Chris Forbes34bbe942016-05-10 16:47:02 +12002529 pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002530 break;
2531
2532 case spv::CapabilityCullDistance:
Chris Forbes34bbe942016-05-10 16:47:02 +12002533 pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002534 break;
2535
2536 case spv::CapabilityImageCubeArray:
Chris Forbes34bbe942016-05-10 16:47:02 +12002537 pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002538 break;
2539
2540 case spv::CapabilitySampleRateShading:
Chris Forbes34bbe942016-05-10 16:47:02 +12002541 pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002542 break;
2543
2544 case spv::CapabilitySparseResidency:
Chris Forbes34bbe942016-05-10 16:47:02 +12002545 pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002546 break;
2547
2548 case spv::CapabilityMinLod:
Chris Forbes34bbe942016-05-10 16:47:02 +12002549 pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002550 break;
2551
2552 case spv::CapabilitySampledCubeArray:
Chris Forbes34bbe942016-05-10 16:47:02 +12002553 pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002554 break;
2555
2556 case spv::CapabilityImageMSArray:
Chris Forbes34bbe942016-05-10 16:47:02 +12002557 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002558 break;
2559
2560 case spv::CapabilityStorageImageExtendedFormats:
Chris Forbes34bbe942016-05-10 16:47:02 +12002561 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002562 "shaderStorageImageExtendedFormats");
2563 break;
2564
2565 case spv::CapabilityInterpolationFunction:
Chris Forbes34bbe942016-05-10 16:47:02 +12002566 pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002567 break;
2568
2569 case spv::CapabilityStorageImageReadWithoutFormat:
Chris Forbes34bbe942016-05-10 16:47:02 +12002570 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002571 "shaderStorageImageReadWithoutFormat");
2572 break;
2573
2574 case spv::CapabilityStorageImageWriteWithoutFormat:
Chris Forbes34bbe942016-05-10 16:47:02 +12002575 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002576 "shaderStorageImageWriteWithoutFormat");
2577 break;
2578
2579 case spv::CapabilityMultiViewport:
Chris Forbes34bbe942016-05-10 16:47:02 +12002580 pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002581 break;
2582
2583 default:
Chris Forbes34bbe942016-05-10 16:47:02 +12002584 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002585 __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2586 "Shader declares capability %u, not supported in Vulkan.",
2587 insn.word(1)))
Dustin Graves8f1eab92016-04-05 09:41:17 -06002588 pass = false;
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002589 break;
2590 }
2591 }
2592 }
2593
2594 return pass;
2595}
2596
Chris Forbesc7090a82016-07-25 18:10:41 +12002597
2598static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
Chris Forbes0b389472016-07-25 18:12:05 +12002599 auto type = module->get_def(type_id);
2600
2601 while (true) {
2602 switch (type.opcode()) {
2603 case spv::OpTypeArray:
2604 case spv::OpTypeSampledImage:
2605 type = module->get_def(type.word(2));
2606 break;
2607 case spv::OpTypePointer:
2608 type = module->get_def(type.word(3));
2609 break;
2610 case spv::OpTypeImage: {
2611 auto dim = type.word(3);
2612 auto arrayed = type.word(5);
2613 auto msaa = type.word(6);
2614
2615 switch (dim) {
2616 case spv::Dim1D:
Chris Forbes0d250c22016-08-02 08:08:16 +12002617 return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
Chris Forbes0b389472016-07-25 18:12:05 +12002618 case spv::Dim2D:
2619 return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
Chris Forbes0d250c22016-08-02 08:08:16 +12002620 (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
Chris Forbes0b389472016-07-25 18:12:05 +12002621 case spv::Dim3D:
2622 return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2623 case spv::DimCube:
Chris Forbes0d250c22016-08-02 08:08:16 +12002624 return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
Chris Forbes48a1b2e2016-08-31 11:59:48 -07002625 case spv::DimSubpassData:
2626 return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
2627 default: // buffer, etc.
Chris Forbes0b389472016-07-25 18:12:05 +12002628 return 0;
2629 }
2630 }
2631 default:
2632 return 0;
2633 }
2634 }
Chris Forbesc7090a82016-07-25 18:10:41 +12002635}
2636
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002637static bool
2638validate_pipeline_shader_stage(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *pStage,
2639 PIPELINE_STATE *pipeline, shader_module **out_module, spirv_inst_iter *out_entrypoint,
2640 VkPhysicalDeviceFeatures const *enabledFeatures,
2641 std::unordered_map<VkShaderModule, std::unique_ptr<shader_module>> const &shaderModuleMap) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06002642 bool pass = true;
Chris Forbesc9b826c2016-05-13 13:17:42 +12002643 auto module_it = shaderModuleMap.find(pStage->module);
2644 auto module = *out_module = module_it->second.get();
Chris Forbes961cee72016-03-30 12:12:01 +13002645
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002646 // Find the entrypoint
Chris Forbes961cee72016-03-30 12:12:01 +13002647 auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2648 if (entrypoint == module->end()) {
Jeremy Hayese2583052016-12-12 11:01:28 -07002649 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, VALIDATION_ERROR_00510,
2650 "SC", "No entrypoint found named `%s` for stage %s. %s.", pStage->pName,
2651 string_VkShaderStageFlagBits(pStage->stage), validation_error_map[VALIDATION_ERROR_00510])) {
Chris Forbes1a89b3f2016-09-09 11:23:39 +12002652 return false; // no point continuing beyond here, any analysis is just going to be garbage.
Chris Forbes961cee72016-03-30 12:12:01 +13002653 }
2654 }
2655
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002656 // Validate shader capabilities against enabled device features
Chris Forbes19b60582016-05-13 13:27:58 +12002657 pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
Chris Forbes961cee72016-03-30 12:12:01 +13002658
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002659 // Mark accessible ids
Chris Forbesd68e3202016-08-23 13:04:34 +12002660 auto accessible_ids = mark_accessible_ids(module, entrypoint);
Chris Forbes961cee72016-03-30 12:12:01 +13002661
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002662 // Validate descriptor set layout against what the entrypoint actually uses
Chris Forbesd68e3202016-08-23 13:04:34 +12002663 auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
Chris Forbes961cee72016-03-30 12:12:01 +13002664
Tobin Ehlis97f401a2016-07-07 08:12:03 -06002665 auto pipelineLayout = pipeline->pipeline_layout;
Chris Forbes337dbbb2016-05-13 13:39:17 +12002666
Chris Forbes1a89b3f2016-09-09 11:23:39 +12002667 pass &= validate_specialization_offsets(report_data, pStage);
Tobin Ehlis3df41292016-07-07 09:23:38 -06002668 pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
Chris Forbes961cee72016-03-30 12:12:01 +13002669
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002670 // Validate descriptor use
Chris Forbes961cee72016-03-30 12:12:01 +13002671 for (auto use : descriptor_uses) {
2672 // While validating shaders capture which slots are used by the pipeline
Chris Forbesc7090a82016-07-25 18:10:41 +12002673 auto & reqs = pipeline->active_slots[use.first.first][use.first.second];
2674 reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
Chris Forbes961cee72016-03-30 12:12:01 +13002675
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002676 // Verify given pipelineLayout has requested setLayout with requested binding
Tobin Ehlisb73587d2016-07-07 08:14:48 -06002677 const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
Chris Forbes961cee72016-03-30 12:12:01 +13002678 unsigned required_descriptor_count;
2679
2680 if (!binding) {
Chris Forbes19b60582016-05-13 13:27:58 +12002681 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Chris Forbesce5be902016-03-30 13:14:22 +13002682 __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
Chris Forbes961cee72016-03-30 12:12:01 +13002683 "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2684 use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06002685 pass = false;
Chris Forbes961cee72016-03-30 12:12:01 +13002686 }
2687 } else if (~binding->stageFlags & pStage->stage) {
Chris Forbes19b60582016-05-13 13:27:58 +12002688 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002689 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
Chris Forbes961cee72016-03-30 12:12:01 +13002690 "Shader uses descriptor slot %u.%u (used "
2691 "as type `%s`) but descriptor not "
2692 "accessible from stage %s",
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06002693 use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
Chris Forbes961cee72016-03-30 12:12:01 +13002694 string_VkShaderStageFlagBits(pStage->stage))) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06002695 pass = false;
Chris Forbes961cee72016-03-30 12:12:01 +13002696 }
Chris Forbes1832a772016-05-10 15:30:22 +12002697 } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002698 required_descriptor_count)) {
Chris Forbes19b60582016-05-13 13:27:58 +12002699 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06002700 SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2701 "%u.%u (used as type `%s`) but "
2702 "descriptor of type %s",
2703 use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
Chris Forbes961cee72016-03-30 12:12:01 +13002704 string_VkDescriptorType(binding->descriptorType))) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06002705 pass = false;
Chris Forbes961cee72016-03-30 12:12:01 +13002706 }
2707 } else if (binding->descriptorCount < required_descriptor_count) {
Chris Forbes19b60582016-05-13 13:27:58 +12002708 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06002709 SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
Chris Forbes961cee72016-03-30 12:12:01 +13002710 "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2711 required_descriptor_count, use.first.first, use.first.second,
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06002712 describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06002713 pass = false;
Chris Forbes961cee72016-03-30 12:12:01 +13002714 }
2715 }
2716 }
2717
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002718 // Validate use of input attachments against subpass structure
Chris Forbes8420fea2016-08-22 15:20:11 +12002719 if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
Chris Forbesd68e3202016-08-23 13:04:34 +12002720 auto input_attachment_uses = collect_interface_by_input_attachment_index(report_data, module, accessible_ids);
Chris Forbes8420fea2016-08-22 15:20:11 +12002721
2722 auto rpci = pipeline->render_pass_ci.ptr();
2723 auto subpass = pipeline->graphicsPipelineCI.subpass;
2724
2725 for (auto use : input_attachment_uses) {
2726 auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2727 auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount) ?
2728 input_attachments[use.first].attachment : VK_ATTACHMENT_UNUSED;
2729
2730 if (index == VK_ATTACHMENT_UNUSED) {
2731 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2732 SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2733 "Shader consumes input attachment index %d but not provided in subpass",
2734 use.first)) {
2735 pass = false;
2736 }
2737 }
Chris Forbes74e7bf52016-08-22 16:11:22 +12002738 else if (get_format_type(rpci->pAttachments[index].format) !=
2739 get_fundamental_type(module, use.second.type_id)) {
2740 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2741 SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2742 "Subpass input attachment %u format of %s does not match type used in shader `%s`",
2743 use.first, string_VkFormat(rpci->pAttachments[index].format),
2744 describe_type(module, use.second.type_id).c_str())) {
2745 pass = false;
2746 }
2747 }
Chris Forbes8420fea2016-08-22 15:20:11 +12002748 }
2749 }
2750
Chris Forbes961cee72016-03-30 12:12:01 +13002751 return pass;
2752}
2753
2754
Tobin Ehlis81e8ca42016-03-24 09:17:25 -06002755// Validate that the shaders used by the given pipeline and store the active_slots
2756// that are actually used by the pipeline into pPipeline->active_slots
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002757static bool
2758validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2759 VkPhysicalDeviceFeatures const *enabledFeatures,
2760 std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
Chris Forbes6f6844a2016-04-27 14:00:44 +12002761 auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002762 int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2763 int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2764
2765 shader_module *shaders[5];
2766 memset(shaders, 0, sizeof(shaders));
2767 spirv_inst_iter entrypoints[5];
2768 memset(entrypoints, 0, sizeof(entrypoints));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002769 VkPipelineVertexInputStateCreateInfo const *vi = 0;
Dustin Graves8f1eab92016-04-05 09:41:17 -06002770 bool pass = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002771
2772 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
Chris Forbes6f6844a2016-04-27 14:00:44 +12002773 auto pStage = &pCreateInfo->pStages[i];
Chris Forbes961cee72016-03-30 12:12:01 +13002774 auto stage_id = get_shader_stage_id(pStage->stage);
Chris Forbes399c7342016-05-13 13:42:12 +12002775 pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
Chris Forbesc9b826c2016-05-13 13:17:42 +12002776 &shaders[stage_id], &entrypoints[stage_id],
2777 enabledFeatures, shaderModuleMap);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002778 }
2779
Chris Forbese7210092016-09-13 11:22:22 +12002780 // if the shader stages are no good individually, cross-stage validation is pointless.
Chris Forbesddb7d712016-09-09 11:26:20 +12002781 if (!pass)
2782 return false;
2783
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002784 vi = pCreateInfo->pVertexInputState;
2785
2786 if (vi) {
Chris Forbes399c7342016-05-13 13:42:12 +12002787 pass &= validate_vi_consistency(report_data, vi);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002788 }
2789
2790 if (shaders[vertex_stage]) {
Chris Forbes399c7342016-05-13 13:42:12 +12002791 pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002792 }
2793
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002794 int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2795 int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2796
2797 while (!shaders[producer] && producer != fragment_stage) {
2798 producer++;
2799 consumer++;
2800 }
2801
2802 for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2803 assert(shaders[producer]);
2804 if (shaders[consumer]) {
Chris Forbes399c7342016-05-13 13:42:12 +12002805 pass &= validate_interface_between_stages(report_data,
Chris Forbes23a575d2016-03-29 16:41:07 +13002806 shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2807 shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002808
2809 producer = consumer;
2810 }
2811 }
2812
Tobin Ehlisc677a092016-06-27 12:57:05 -06002813 if (shaders[fragment_stage]) {
Chris Forbes399c7342016-05-13 13:42:12 +12002814 pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
Tobin Ehlisc677a092016-06-27 12:57:05 -06002815 pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002816 }
2817
2818 return pass;
2819}
2820
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002821static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2822 VkPhysicalDeviceFeatures const *enabledFeatures,
2823 std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
Chris Forbes6f6844a2016-04-27 14:00:44 +12002824 auto pCreateInfo = pPipeline->computePipelineCI.ptr();
Chris Forbesb029e6f2016-03-30 14:04:36 +13002825
Chris Forbesb029e6f2016-03-30 14:04:36 +13002826 shader_module *module;
2827 spirv_inst_iter entrypoint;
2828
Chris Forbes399c7342016-05-13 13:42:12 +12002829 return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
Chris Forbesc9b826c2016-05-13 13:17:42 +12002830 &module, &entrypoint, enabledFeatures, shaderModuleMap);
Chris Forbesb029e6f2016-03-30 14:04:36 +13002831}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002832// Return Set node ptr for specified set or else NULL
Tobin Ehlise83a46a2016-06-02 12:48:25 -06002833cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
Tobin Ehlis6a72dc72016-06-01 16:41:17 -06002834 auto set_it = my_data->setMap.find(set);
2835 if (set_it == my_data->setMap.end()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002836 return NULL;
2837 }
Tobin Ehlis6a72dc72016-06-01 16:41:17 -06002838 return set_it->second;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002839}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002840
Mark Young29927482016-05-04 14:38:51 -06002841// For given pipeline, return number of MSAA samples, or one if MSAA disabled
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002842static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
Chris Forbes4e16d882016-05-06 15:54:55 +12002843 if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2844 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
Mark Young29927482016-05-04 14:38:51 -06002845 return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2846 }
2847 return VK_SAMPLE_COUNT_1_BIT;
2848}
2849
Chris Forbes5fc77832016-07-28 14:15:38 +12002850static void list_bits(std::ostream& s, uint32_t bits) {
2851 for (int i = 0; i < 32 && bits; i++) {
2852 if (bits & (1 << i)) {
2853 s << i;
2854 bits &= ~(1 << i);
2855 if (bits) {
2856 s << ",";
2857 }
2858 }
2859 }
2860}
2861
Mark Young29927482016-05-04 14:38:51 -06002862// Validate draw-time state related to the PSO
Tobin Ehlis288cb7e2016-12-21 08:30:22 -07002863static bool ValidatePipelineDrawtimeState(layer_data const *my_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002864 PIPELINE_STATE const *pPipeline) {
Mark Young29927482016-05-04 14:38:51 -06002865 bool skip_call = false;
Mark Young29927482016-05-04 14:38:51 -06002866
Mike Weiblencce7ec72016-10-17 19:33:05 -06002867 // Verify vertex binding
Chris Forbesdbc66322016-05-31 16:33:48 +12002868 if (pPipeline->vertexBindingDescriptions.size() > 0) {
2869 for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
Tobin Ehlis9b9fdd32016-08-03 09:59:17 -06002870 auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
2871 if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
2872 (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
2873 skip_call |= log_msg(
2874 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2875 DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2876 "The Pipeline State Object (0x%" PRIxLEAST64 ") expects that this Command Buffer's vertex binding Index %u "
2877 "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
2878 "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002879 (uint64_t)state.pipeline_state->pipeline, vertex_binding, i, vertex_binding);
Chris Forbesdbc66322016-05-31 16:33:48 +12002880 }
2881 }
2882 } else {
Tobin Ehlis232017e2016-12-21 10:28:54 -07002883 if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
Chris Forbesdbc66322016-05-31 16:33:48 +12002884 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
Tobin Ehliseb00b0d2016-08-17 07:55:55 -06002885 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07002886 "Vertex buffers are bound to command buffer (0x%p"
Tobin Ehliseb00b0d2016-08-17 07:55:55 -06002887 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07002888 pCB->commandBuffer, (uint64_t)state.pipeline_state->pipeline);
Chris Forbesdbc66322016-05-31 16:33:48 +12002889 }
2890 }
2891 // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2892 // Skip check if rasterization is disabled or there is no viewport.
2893 if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
2894 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2895 pPipeline->graphicsPipelineCI.pViewportState) {
2896 bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
2897 bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
Chris Forbes5fc77832016-07-28 14:15:38 +12002898
Chris Forbesdbc66322016-05-31 16:33:48 +12002899 if (dynViewport) {
Chris Forbes5fc77832016-07-28 14:15:38 +12002900 auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
2901 auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
2902 if (missingViewportMask) {
2903 std::stringstream ss;
2904 ss << "Dynamic viewport(s) ";
2905 list_bits(ss, missingViewportMask);
Mike Weiblencce7ec72016-10-17 19:33:05 -06002906 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
Chris Forbes5fc77832016-07-28 14:15:38 +12002907 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2908 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2909 "%s", ss.str().c_str());
Chris Forbesdbc66322016-05-31 16:33:48 +12002910 }
2911 }
Chris Forbes5fc77832016-07-28 14:15:38 +12002912
Chris Forbesdbc66322016-05-31 16:33:48 +12002913 if (dynScissor) {
Chris Forbes5fc77832016-07-28 14:15:38 +12002914 auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
2915 auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
2916 if (missingScissorMask) {
2917 std::stringstream ss;
2918 ss << "Dynamic scissor(s) ";
2919 list_bits(ss, missingScissorMask);
Mike Weiblencce7ec72016-10-17 19:33:05 -06002920 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
Chris Forbes5fc77832016-07-28 14:15:38 +12002921 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2922 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2923 "%s", ss.str().c_str());
Chris Forbesdbc66322016-05-31 16:33:48 +12002924 }
2925 }
2926 }
2927
2928 // Verify that any MSAA request in PSO matches sample# in bound FB
2929 // Skip the check if rasterization is disabled.
2930 if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
2931 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
2932 VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
2933 if (pCB->activeRenderPass) {
Chris Forbesef730462016-09-27 12:03:31 +13002934 auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
Chris Forbesdbc66322016-05-31 16:33:48 +12002935 const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
Chris Forbesdbc66322016-05-31 16:33:48 +12002936 uint32_t i;
2937
2938 const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
2939 if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
2940 (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
2941 skip_call |=
Mark Young29927482016-05-04 14:38:51 -06002942 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
Chris Forbes4e16d882016-05-06 15:54:55 +12002943 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
Mark Young29927482016-05-04 14:38:51 -06002944 "Render pass subpass %u mismatch with blending state defined and blend state attachment "
Mark Muelleraab36502016-05-03 13:17:29 -06002945 "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")! These "
Mark Young29927482016-05-04 14:38:51 -06002946 "must be the same at draw-time.",
2947 pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
Chris Forbes4e16d882016-05-06 15:54:55 +12002948 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
Chris Forbesdbc66322016-05-31 16:33:48 +12002949 }
2950
Chris Forbes34379222016-06-29 18:15:41 +12002951 unsigned subpass_num_samples = 0;
Chris Forbes074ec142016-06-29 10:50:41 +12002952
Chris Forbesdbc66322016-05-31 16:33:48 +12002953 for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
Chris Forbes34379222016-06-29 18:15:41 +12002954 auto attachment = subpass_desc->pColorAttachments[i].attachment;
2955 if (attachment != VK_ATTACHMENT_UNUSED)
2956 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
Chris Forbesdbc66322016-05-31 16:33:48 +12002957 }
Chris Forbes074ec142016-06-29 10:50:41 +12002958
Chris Forbes34379222016-06-29 18:15:41 +12002959 if (subpass_desc->pDepthStencilAttachment &&
2960 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
2961 auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
2962 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
Chris Forbesdbc66322016-05-31 16:33:48 +12002963 }
Mark Young29927482016-05-04 14:38:51 -06002964
Jamie Madill5738e272016-07-06 13:37:33 -04002965 if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
Chris Forbesdbc66322016-05-31 16:33:48 +12002966 skip_call |=
Mark Young29927482016-05-04 14:38:51 -06002967 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
Chris Forbes4e16d882016-05-06 15:54:55 +12002968 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06002969 "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
2970 ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
Chris Forbes4e16d882016-05-06 15:54:55 +12002971 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
Chris Forbesa4937a72016-05-06 16:31:14 +12002972 reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
Mark Young29927482016-05-04 14:38:51 -06002973 }
Chris Forbesdbc66322016-05-31 16:33:48 +12002974 } else {
2975 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2976 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2977 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
2978 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
Mark Young29927482016-05-04 14:38:51 -06002979 }
Mark Young29927482016-05-04 14:38:51 -06002980 }
Tobin Ehlis09d67d02016-06-07 06:06:01 -06002981 // Verify that PSO creation renderPass is compatible with active renderPass
2982 if (pCB->activeRenderPass) {
2983 std::string err_string;
Tobin Ehlis4ca15c72016-06-30 09:29:18 -06002984 if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
Chris Forbesef730462016-09-27 12:03:31 +13002985 !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
Tobin Ehlis09d67d02016-06-07 06:06:01 -06002986 err_string)) {
2987 // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
2988 skip_call |=
2989 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2990 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2991 "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
2992 "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
Chris Forbes82494442016-10-17 15:45:49 +13002993 reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass),
2994 reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
Tobin Ehlis09d67d02016-06-07 06:06:01 -06002995 reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
2996 }
Chris Forbesa58c4522016-09-28 15:19:39 +13002997
2998 if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
2999 skip_call |=
3000 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3001 reinterpret_cast<uint64_t const &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
3002 "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
3003 pCB->activeSubpass);
3004 }
Tobin Ehlis09d67d02016-06-07 06:06:01 -06003005 }
Chris Forbesdbc66322016-05-31 16:33:48 +12003006 // TODO : Add more checks here
3007
Mark Young29927482016-05-04 14:38:51 -06003008 return skip_call;
3009}
3010
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003011// Validate overall state at the time of a draw call
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003012static bool ValidateDrawState(layer_data *my_data, GLOBAL_CB_NODE *cb_node, const bool indexed,
Jeremy Hayese2583052016-12-12 11:01:28 -07003013 const VkPipelineBindPoint bind_point, const char *function,
3014 UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06003015 bool result = false;
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003016 auto const &state = cb_node->lastBound[bind_point];
Tobin Ehlis52c76a32016-10-12 09:05:51 -06003017 PIPELINE_STATE *pPipe = state.pipeline_state;
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003018 if (nullptr == pPipe) {
3019 result |= log_msg(
3020 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
3021 DRAWSTATE_INVALID_PIPELINE, "DS",
3022 "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
3023 // Early return as any further checks below will be busted w/o a pipeline
3024 if (result)
3025 return true;
3026 }
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06003027 // First check flag states
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003028 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
Jeremy Hayese2583052016-12-12 11:01:28 -07003029 result = validate_draw_state_flags(my_data, cb_node, pPipe, indexed, msg_code);
Tobin Ehlis2e319d42016-03-25 11:49:51 -06003030
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003031 // Now complete other state checks
Tobin Ehlis0fc85672016-07-07 11:06:26 -06003032 if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003033 string errorString;
Tobin Ehlis0fc85672016-07-07 11:06:26 -06003034 auto pipeline_layout = pPipe->pipeline_layout;
Chris Forbese96eb492016-05-31 16:45:13 +12003035
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003036 for (const auto &set_binding_pair : pPipe->active_slots) {
3037 uint32_t setIndex = set_binding_pair.first;
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003038 // If valid set is not bound throw an error
3039 if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
3040 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3041 DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06003042 "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003043 setIndex);
Tobin Ehlis0fc85672016-07-07 11:06:26 -06003044 } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
3045 errorString)) {
3046 // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
Tobin Ehlis09d16712016-05-17 10:41:55 -06003047 VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003048 result |=
3049 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3050 (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06003051 "VkDescriptorSet (0x%" PRIxLEAST64
3052 ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
Tobin Ehlis0fc85672016-07-07 11:06:26 -06003053 reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
3054 errorString.c_str());
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003055 } else { // Valid set is bound and layout compatible, validate that it's updated
3056 // Pull the set node
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003057 cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
Tobin Ehlisf9519102016-08-17 09:49:13 -06003058 // Gather active bindings
Tobin Ehlis022528b2016-12-29 12:22:32 -07003059 std::unordered_set<uint32_t> active_bindings;
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003060 for (auto binding : set_binding_pair.second) {
Tobin Ehlis022528b2016-12-29 12:22:32 -07003061 active_bindings.insert(binding.first);
Tobin Ehlisf9519102016-08-17 09:49:13 -06003062 }
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003063 // Make sure set has been updated if it has no immutable samplers
3064 // If it has immutable samplers, we'll flag error later as needed depending on binding
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003065 if (!descriptor_set->IsUpdated()) {
Tobin Ehlis022528b2016-12-29 12:22:32 -07003066 for (auto binding : active_bindings) {
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003067 if (!descriptor_set->GetImmutableSamplerPtrFromBinding(binding)) {
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003068 result |= log_msg(
3069 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003070 (uint64_t)descriptor_set->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
Mike Weiblencce7ec72016-10-17 19:33:05 -06003071 "Descriptor Set 0x%" PRIxLEAST64 " bound but was never updated. It is now being used to draw so "
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003072 "this will result in undefined behavior.",
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003073 (uint64_t)descriptor_set->GetSet());
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003074 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003075 }
3076 }
Tobin Ehlis205f0032016-12-29 11:39:10 -07003077 // Validate the draw-time state for this descriptor set
3078 std::string err_str;
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003079 if (!descriptor_set->ValidateDrawState(set_binding_pair.second, state.dynamicOffsets[setIndex], &err_str)) {
3080 auto set = descriptor_set->GetSet();
Tobin Ehlis205f0032016-12-29 11:39:10 -07003081 result |=
3082 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3083 reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3084 "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s",
3085 reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
3086 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003087 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003088 }
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003089 }
Mark Young29927482016-05-04 14:38:51 -06003090
3091 // Check general pipeline state that needs to be validated at drawtime
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003092 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
Tobin Ehlis288cb7e2016-12-21 08:30:22 -07003093 result |= ValidatePipelineDrawtimeState(my_data, state, cb_node, pPipe);
Mark Young29927482016-05-04 14:38:51 -06003094
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003095 return result;
3096}
3097
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003098static void UpdateDrawState(layer_data *my_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
3099 auto const &state = cb_state->lastBound[bind_point];
Tobin Ehlis276d3d32016-12-21 09:21:06 -07003100 PIPELINE_STATE *pPipe = state.pipeline_state;
3101 if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003102 for (const auto &set_binding_pair : pPipe->active_slots) {
3103 uint32_t setIndex = set_binding_pair.first;
Tobin Ehlis276d3d32016-12-21 09:21:06 -07003104 // Pull the set node
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003105 cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
Tobin Ehlis276d3d32016-12-21 09:21:06 -07003106 // Bind this set and its active descriptor resources to the command buffer
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003107 descriptor_set->BindCommandBuffer(cb_state, set_binding_pair.second);
Tobin Ehlis205f0032016-12-29 11:39:10 -07003108 // For given active slots record updated images & buffers
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003109 descriptor_set->GetStorageUpdates(set_binding_pair.second, &cb_state->updateBuffers, &cb_state->updateImages);
Tobin Ehlis276d3d32016-12-21 09:21:06 -07003110 }
Tobin Ehlis276d3d32016-12-21 09:21:06 -07003111 }
Tobin Ehlis232017e2016-12-21 10:28:54 -07003112 if (pPipe->vertexBindingDescriptions.size() > 0) {
3113 cb_state->vertex_buffer_used = true;
3114 }
Tobin Ehlis276d3d32016-12-21 09:21:06 -07003115}
3116
Mark Young7394fdd2016-03-31 14:56:43 -06003117// Validate HW line width capabilities prior to setting requested line width.
3118static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
3119 bool skip_call = false;
3120
3121 // First check to see if the physical device supports wide lines.
Chris Forbes94c5f532016-10-03 17:42:38 +13003122 if ((VK_FALSE == my_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
Mark Young7394fdd2016-03-31 14:56:43 -06003123 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
3124 dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
3125 "not supported/enabled so lineWidth must be 1.0f!",
3126 lineWidth);
3127 } else {
3128 // Otherwise, make sure the width falls in the valid range.
3129 if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3130 (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3131 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3132 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
3133 "to between [%f, %f]!",
3134 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3135 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3136 }
3137 }
3138
3139 return skip_call;
3140}
3141
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003142// Verify that create state for a pipeline is valid
Mark Lobodzinski44e59992016-11-16 09:51:44 -07003143static bool verifyPipelineCreateState(layer_data *my_data, std::vector<PIPELINE_STATE *> pPipelines, int pipelineIndex) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003144 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003145
Tobin Ehlis52c76a32016-10-12 09:05:51 -06003146 PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003147
3148 // If create derivative bit is set, check that we've specified a base
3149 // pipeline correctly, and that the base pipeline was created to allow
3150 // derivatives.
3151 if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
Tobin Ehlis52c76a32016-10-12 09:05:51 -06003152 PIPELINE_STATE *pBasePipeline = nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003153 if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3154 (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003155 // This check is a superset of VALIDATION_ERROR_00526 and VALIDATION_ERROR_00528
Tobin Ehlisfe871282016-06-28 10:28:02 -06003156 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3157 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3158 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003159 } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3160 if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003161 skip_call |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003162 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003163 VALIDATION_ERROR_00518, "DS",
3164 "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline. %s",
3165 validation_error_map[VALIDATION_ERROR_00518]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003166 } else {
3167 pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3168 }
3169 } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
Tobin Ehlis52c76a32016-10-12 09:05:51 -06003170 pBasePipeline = getPipelineState(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003171 }
3172
3173 if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003174 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3175 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3176 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003177 }
3178 }
3179
3180 if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
Chris Forbes94c5f532016-10-03 17:42:38 +13003181 if (!my_data->enabled_features.independentBlend) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06003182 if (pPipeline->attachments.size() > 1) {
Chris Forbes1ae3d402016-03-24 11:42:09 +13003183 VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
Mark Lobodzinski2f02dc92016-03-23 14:34:52 -06003184 for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
Mark Mueller35d41312016-07-13 14:49:35 -06003185 // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
3186 // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
3187 // only attachment state, so memcmp is best suited for the comparison
3188 if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
3189 sizeof(pAttachments[0]))) {
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003190 skip_call |=
3191 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3192 VALIDATION_ERROR_01532, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3193 "enabled, all elements of pAttachments must be identical. %s",
3194 validation_error_map[VALIDATION_ERROR_01532]);
Mark Mueller35d41312016-07-13 14:49:35 -06003195 break;
Mark Lobodzinski2f02dc92016-03-23 14:34:52 -06003196 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003197 }
3198 }
3199 }
Chris Forbes94c5f532016-10-03 17:42:38 +13003200 if (!my_data->enabled_features.logicOp &&
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003201 (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003202 skip_call |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003203 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003204 VALIDATION_ERROR_01533, "DS",
3205 "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE. %s",
3206 validation_error_map[VALIDATION_ERROR_01533]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003207 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003208 }
3209
Tobin Ehlis81e8ca42016-03-24 09:17:25 -06003210 // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003211 // produces nonsense errors that confuse users. Other layers should already
3212 // emit errors for renderpass being invalid.
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06003213 auto renderPass = getRenderPassState(my_data, pPipeline->graphicsPipelineCI.renderPass);
Chris Forbesef730462016-09-27 12:03:31 +13003214 if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003215 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003216 VALIDATION_ERROR_02122, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3217 "is out of range for this renderpass (0..%u). %s",
3218 pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1,
3219 validation_error_map[VALIDATION_ERROR_02122]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003220 }
3221
Chris Forbes94c5f532016-10-03 17:42:38 +13003222 if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->enabled_features,
Chris Forbesc9b826c2016-05-13 13:17:42 +12003223 my_data->shaderModuleMap)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003224 skip_call = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003225 }
Chris Forbes3001c772016-04-06 11:21:28 +12003226 // Each shader's stage must be unique
3227 if (pPipeline->duplicate_shaders) {
3228 for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3229 if (pPipeline->duplicate_shaders & stage) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003230 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3231 __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3232 "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3233 string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
Chris Forbes3001c772016-04-06 11:21:28 +12003234 }
3235 }
3236 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003237 // VS is required
3238 if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003239 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3240 VALIDATION_ERROR_00532, "DS", "Invalid Pipeline CreateInfo State: Vertex Shader required. %s",
3241 validation_error_map[VALIDATION_ERROR_00532]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003242 }
3243 // Either both or neither TC/TE shaders should be defined
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003244 if ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) &&
3245 !(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003246 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003247 VALIDATION_ERROR_00534, "DS",
3248 "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
3249 validation_error_map[VALIDATION_ERROR_00534]);
3250 }
3251 if (!(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) &&
3252 (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
3253 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3254 VALIDATION_ERROR_00535, "DS",
3255 "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
3256 validation_error_map[VALIDATION_ERROR_00535]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003257 }
3258 // Compute shaders should be specified independent of Gfx shaders
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003259 if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003260 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003261 VALIDATION_ERROR_00533, "DS",
3262 "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline. %s",
3263 validation_error_map[VALIDATION_ERROR_00533]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003264 }
3265 // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3266 // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3267 if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
Tobin Ehlis5f4cef12016-04-01 13:51:33 -06003268 (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3269 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003270 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003271 VALIDATION_ERROR_02099, "DS", "Invalid Pipeline CreateInfo State: "
3272 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3273 "topology for tessellation pipelines. %s",
3274 validation_error_map[VALIDATION_ERROR_02099]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003275 }
Tobin Ehlis5f4cef12016-04-01 13:51:33 -06003276 if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3277 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003278 if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003279 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003280 VALIDATION_ERROR_02100, "DS", "Invalid Pipeline CreateInfo State: "
3281 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3282 "topology is only valid for tessellation pipelines. %s",
3283 validation_error_map[VALIDATION_ERROR_02100]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003284 }
3285 }
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003286
3287 if (pPipeline->graphicsPipelineCI.pTessellationState &&
3288 ((pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints == 0) ||
3289 (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints >
3290 my_data->phys_dev_properties.properties.limits.maxTessellationPatchSize))) {
3291 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3292 VALIDATION_ERROR_01426, "DS", "Invalid Pipeline CreateInfo State: "
3293 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3294 "topology used with patchControlPoints value %u."
3295 " patchControlPoints should be >0 and <=%u. %s",
3296 pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints,
3297 my_data->phys_dev_properties.properties.limits.maxTessellationPatchSize,
3298 validation_error_map[VALIDATION_ERROR_01426]);
3299 }
3300
Mark Young7394fdd2016-03-31 14:56:43 -06003301 // If a rasterization state is provided, make sure that the line width conforms to the HW.
3302 if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3303 if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
Chris Forbes82494442016-10-17 15:45:49 +13003304 skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
3305 reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
Tobin Ehlisfe871282016-06-28 10:28:02 -06003306 pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
Mark Young7394fdd2016-03-31 14:56:43 -06003307 }
3308 }
Chris Forbesd39376d2016-09-08 17:55:54 +12003309
Mark Lobodzinskib23aa452016-12-16 13:06:41 -07003310 // If rasterization is not disabled and subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a
3311 // valid structure
3312 if (pPipeline->graphicsPipelineCI.pRasterizationState &&
3313 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
Chris Forbesef730462016-09-27 12:03:31 +13003314 auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
Chris Forbesd39376d2016-09-08 17:55:54 +12003315 if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
3316 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3317 if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003318 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
3319 0, __LINE__, VALIDATION_ERROR_02115, "DS",
Chris Forbesd39376d2016-09-08 17:55:54 +12003320 "Invalid Pipeline CreateInfo State: "
3321 "pDepthStencilState is NULL when rasterization is enabled and subpass uses a "
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003322 "depth/stencil attachment. %s",
3323 validation_error_map[VALIDATION_ERROR_02115]);
Chris Forbesd39376d2016-09-08 17:55:54 +12003324 }
3325 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003326 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06003327 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003328}
3329
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003330// Free the Pipeline nodes
3331static void deletePipelines(layer_data *my_data) {
3332 if (my_data->pipelineMap.size() <= 0)
3333 return;
Tobin Ehlis5f4cef12016-04-01 13:51:33 -06003334 for (auto &pipe_map_pair : my_data->pipelineMap) {
3335 delete pipe_map_pair.second;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003336 }
3337 my_data->pipelineMap.clear();
3338}
3339
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003340// Block of code at start here specifically for managing/tracking DSs
3341
3342// Return Pool node ptr for specified pool or else NULL
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06003343DESCRIPTOR_POOL_STATE *getDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
Tobin Ehlis71a93d32016-05-26 13:12:38 -06003344 auto pool_it = dev_data->descriptorPoolMap.find(pool);
3345 if (pool_it == dev_data->descriptorPoolMap.end()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003346 return NULL;
3347 }
Tobin Ehlis71a93d32016-05-26 13:12:38 -06003348 return pool_it->second;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003349}
3350
Dustin Graves8f1eab92016-04-05 09:41:17 -06003351// Return false if update struct is of valid type, otherwise flag error and return code from callback
3352static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003353 switch (pUpdateStruct->sType) {
3354 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3355 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
Dustin Graves8f1eab92016-04-05 09:41:17 -06003356 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003357 default:
3358 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3359 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3360 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3361 string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3362 }
3363}
3364
3365// Set count for given update struct in the last parameter
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003366static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3367 switch (pUpdateStruct->sType) {
3368 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3369 return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3370 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3371 // TODO : Need to understand this case better and make sure code is correct
3372 return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3373 default:
3374 return 0;
3375 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003376}
3377
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003378// For given layout and update, return the first overall index of the layout that is updated
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003379static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003380 const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003381 return binding_start_index + arrayIndex;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003382}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003383// For given layout and update, return the last overall index of the layout that is updated
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003384static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003385 const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3386 uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003387 return binding_start_index + arrayIndex + count - 1;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003388}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003389// Verify that the descriptor type in the update struct matches what's expected by the layout
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003390static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
Dustin Graves8f1eab92016-04-05 09:41:17 -06003391 const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003392 // First get actual type of update
Tobin Ehlisfe871282016-06-28 10:28:02 -06003393 bool skip_call = false;
Jamie Madill2b6b8d52016-04-04 15:09:51 -04003394 VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003395 switch (pUpdateStruct->sType) {
3396 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3397 actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3398 break;
3399 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07003400 // No need to validate
Dustin Graves8f1eab92016-04-05 09:41:17 -06003401 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003402 break;
3403 default:
Tobin Ehlisfe871282016-06-28 10:28:02 -06003404 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3405 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3406 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3407 string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003408 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06003409 if (!skip_call) {
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003410 if (layout_type != actualType) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003411 skip_call |= log_msg(
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003412 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3413 DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3414 "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3415 string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003416 }
3417 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06003418 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003419}
Michael Lentinef7bf6842016-03-30 15:57:52 -05003420//TODO: Consolidate functions
3421bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3422 layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3423 if (!(imgpair.subresource.aspectMask & aspectMask)) {
3424 return false;
3425 }
3426 VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3427 imgpair.subresource.aspectMask = aspectMask;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003428 auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3429 if (imgsubIt == pCB->imageLayoutMap.end()) {
Michael Lentinef7bf6842016-03-30 15:57:52 -05003430 return false;
3431 }
3432 if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3433 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3434 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3435 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3436 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3437 }
3438 if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3439 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3440 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3441 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3442 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003443 }
3444 node = imgsubIt->second;
3445 return true;
3446}
3447
Michael Lentinef7bf6842016-03-30 15:57:52 -05003448bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3449 if (!(imgpair.subresource.aspectMask & aspectMask)) {
3450 return false;
3451 }
3452 VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3453 imgpair.subresource.aspectMask = aspectMask;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003454 auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3455 if (imgsubIt == my_data->imageLayoutMap.end()) {
Michael Lentinef7bf6842016-03-30 15:57:52 -05003456 return false;
3457 }
3458 if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3459 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3460 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3461 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3462 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003463 }
3464 layout = imgsubIt->second.layout;
3465 return true;
3466}
3467
Michael Lentinef7bf6842016-03-30 15:57:52 -05003468// find layout(s) on the cmd buf level
3469bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3470 ImageSubresourcePair imgpair = {image, true, range};
3471 node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3472 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3473 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3474 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3475 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3476 if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3477 imgpair = {image, false, VkImageSubresource()};
3478 auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3479 if (imgsubIt == pCB->imageLayoutMap.end())
3480 return false;
3481 node = imgsubIt->second;
3482 }
3483 return true;
3484}
3485
3486// find layout(s) on the global level
3487bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3488 layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3489 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3490 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3491 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3492 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3493 if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3494 imgpair = {imgpair.image, false, VkImageSubresource()};
3495 auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3496 if (imgsubIt == my_data->imageLayoutMap.end())
3497 return false;
3498 layout = imgsubIt->second.layout;
3499 }
3500 return true;
3501}
3502
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003503bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3504 ImageSubresourcePair imgpair = {image, true, range};
3505 return FindLayout(my_data, imgpair, layout);
3506}
3507
3508bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3509 auto sub_data = my_data->imageSubresourceMap.find(image);
3510 if (sub_data == my_data->imageSubresourceMap.end())
3511 return false;
Tobin Ehlis30df15c2016-10-12 17:17:57 -06003512 auto image_state = getImageState(my_data, image);
3513 if (!image_state)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003514 return false;
3515 bool ignoreGlobal = false;
3516 // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3517 // potential errors in this case.
Tobin Ehlis30df15c2016-10-12 17:17:57 -06003518 if (sub_data->second.size() >= (image_state->createInfo.arrayLayers * image_state->createInfo.mipLevels + 1)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003519 ignoreGlobal = true;
3520 }
3521 for (auto imgsubpair : sub_data->second) {
3522 if (ignoreGlobal && !imgsubpair.hasSubresource)
3523 continue;
3524 auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3525 if (img_data != my_data->imageLayoutMap.end()) {
3526 layouts.push_back(img_data->second.layout);
3527 }
3528 }
3529 return true;
3530}
3531
3532// Set the layout on the global level
3533void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3534 VkImage &image = imgpair.image;
3535 // TODO (mlentine): Maybe set format if new? Not used atm.
3536 my_data->imageLayoutMap[imgpair].layout = layout;
3537 // TODO (mlentine): Maybe make vector a set?
3538 auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3539 if (subresource == my_data->imageSubresourceMap[image].end()) {
3540 my_data->imageSubresourceMap[image].push_back(imgpair);
3541 }
3542}
3543
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003544// Set the layout on the cmdbuf level
Michael Lentine60063c22016-03-24 15:36:27 -05003545void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003546 pCB->imageLayoutMap[imgpair] = node;
3547 // TODO (mlentine): Maybe make vector a set?
Michael Lentine60063c22016-03-24 15:36:27 -05003548 auto subresource =
3549 std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3550 if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3551 pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003552 }
3553}
3554
Michael Lentine60063c22016-03-24 15:36:27 -05003555void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003556 // TODO (mlentine): Maybe make vector a set?
Michael Lentine60063c22016-03-24 15:36:27 -05003557 if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3558 pCB->imageSubresourceMap[imgpair.image].end()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003559 pCB->imageLayoutMap[imgpair].layout = layout;
3560 } else {
3561 // TODO (mlentine): Could be expensive and might need to be removed.
3562 assert(imgpair.hasSubresource);
3563 IMAGE_CMD_BUF_LAYOUT_NODE node;
Mark Lobodzinski41bfce02016-03-28 14:34:40 -06003564 if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3565 node.initialLayout = layout;
3566 }
Michael Lentine60063c22016-03-24 15:36:27 -05003567 SetLayout(pCB, imgpair, {node.initialLayout, layout});
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003568 }
3569}
3570
Michael Lentine60063c22016-03-24 15:36:27 -05003571template <class OBJECT, class LAYOUT>
3572void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3573 if (imgpair.subresource.aspectMask & aspectMask) {
3574 imgpair.subresource.aspectMask = aspectMask;
3575 SetLayout(pObject, imgpair, layout);
3576 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003577}
3578
Michael Lentine60063c22016-03-24 15:36:27 -05003579template <class OBJECT, class LAYOUT>
3580void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003581 ImageSubresourcePair imgpair = {image, true, range};
Michael Lentine60063c22016-03-24 15:36:27 -05003582 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3583 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3584 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3585 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003586}
3587
Michael Lentine60063c22016-03-24 15:36:27 -05003588template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003589 ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
Michael Lentine60063c22016-03-24 15:36:27 -05003590 SetLayout(pObject, image, imgpair, layout);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003591}
3592
3593void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
Tobin Ehlis8b26a382016-09-14 08:02:49 -06003594 auto view_state = getImageViewState(dev_data, imageView);
3595 assert(view_state);
Tobin Ehlisc8ca0312016-09-22 07:30:05 -06003596 auto image = view_state->create_info.image;
Tobin Ehlis8b26a382016-09-14 08:02:49 -06003597 const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003598 // TODO: Do not iterate over every possibility - consolidate where possible
3599 for (uint32_t j = 0; j < subRange.levelCount; j++) {
3600 uint32_t level = subRange.baseMipLevel + j;
3601 for (uint32_t k = 0; k < subRange.layerCount; k++) {
3602 uint32_t layer = subRange.baseArrayLayer + k;
3603 VkImageSubresource sub = {subRange.aspectMask, level, layer};
Mark Lobodzinski2af8e3f2016-08-01 15:51:39 -06003604 // TODO: If ImageView was created with depth or stencil, transition both layouts as
3605 // the aspectMask is ignored and both are used. Verify that the extra implicit layout
3606 // is OK for descriptor set layout validation
Mark Lobodzinski77a5d6f2016-08-05 09:38:18 -06003607 if (subRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
Tobin Ehlis8b26a382016-09-14 08:02:49 -06003608 if (vk_format_is_depth_and_stencil(view_state->create_info.format)) {
Mark Lobodzinski77a5d6f2016-08-05 09:38:18 -06003609 sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
3610 }
3611 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003612 SetLayout(pCB, image, sub, layout);
3613 }
3614 }
3615}
3616
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003617// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3618// func_str is the name of the calling function
Dustin Graves8f1eab92016-04-05 09:41:17 -06003619// Return false if no errors occur
3620// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
Tobin Ehlisf0e83a32016-10-06 14:16:14 -06003621static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
Chris Forbesa13fe522016-10-13 15:34:59 +13003622 if (dev_data->instance_data->disabled.idle_descriptor_set)
Tobin Ehlisf0e83a32016-10-06 14:16:14 -06003623 return false;
Dustin Graves8f1eab92016-04-05 09:41:17 -06003624 bool skip_call = false;
Tobin Ehlisf0e83a32016-10-06 14:16:14 -06003625 auto set_node = dev_data->setMap.find(set);
3626 if (set_node == dev_data->setMap.end()) {
3627 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003628 (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06003629 "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003630 (uint64_t)(set));
3631 } else {
Tobin Ehlisbf98b692016-10-06 12:58:06 -06003632 // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003633 if (set_node->second->in_use.load()) {
Tobin Ehlisbf98b692016-10-06 12:58:06 -06003634 skip_call |=
Tobin Ehlisf0e83a32016-10-06 14:16:14 -06003635 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Tobin Ehlisbf98b692016-10-06 12:58:06 -06003636 (uint64_t)(set), __LINE__, VALIDATION_ERROR_00919, "DS",
3637 "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
3638 func_str.c_str(), (uint64_t)(set), validation_error_map[VALIDATION_ERROR_00919]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003639 }
3640 }
3641 return skip_call;
3642}
Chris Forbes367c0322016-05-16 11:19:50 +12003643
Tobin Ehlisffbfe3f2016-05-26 13:39:11 -06003644// Remove set from setMap and delete the set
Tobin Ehlise3354532016-05-05 13:49:42 -06003645static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
Tobin Ehlise3354532016-05-05 13:49:42 -06003646 dev_data->setMap.erase(descriptor_set->GetSet());
3647 delete descriptor_set;
3648}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003649// Free all DS Pools including their Sets & related sub-structs
3650// NOTE : Calls to this function should be wrapped in mutex
3651static void deletePools(layer_data *my_data) {
3652 if (my_data->descriptorPoolMap.size() <= 0)
3653 return;
3654 for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
Tobin Ehlis5789b8f2016-05-05 13:18:20 -06003655 // Remove this pools' sets from setMap and delete them
Tobin Ehlis0a43bde2016-05-03 08:31:08 -06003656 for (auto ds : (*ii).second->sets) {
Tobin Ehlise3354532016-05-05 13:49:42 -06003657 freeDescriptorSet(my_data, ds);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003658 }
Tobin Ehlis9bfd4492016-05-05 15:09:11 -06003659 (*ii).second->sets.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003660 }
3661 my_data->descriptorPoolMap.clear();
3662}
3663
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003664static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3665 VkDescriptorPoolResetFlags flags) {
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06003666 DESCRIPTOR_POOL_STATE *pPool = getDescriptorPoolState(my_data, pool);
Tobin Ehlis5d749ea2016-07-18 13:14:01 -06003667 // TODO: validate flags
3668 // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3669 for (auto ds : pPool->sets) {
3670 freeDescriptorSet(my_data, ds);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003671 }
Tobin Ehlis5d749ea2016-07-18 13:14:01 -06003672 pPool->sets.clear();
3673 // Reset available count for each type and available sets for this pool
3674 for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3675 pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3676 }
3677 pPool->availableSets = pPool->maxSets;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003678}
3679
3680// For given CB object, fetch associated CB Node from map
Chris Forbes664ca7f2016-05-06 16:55:18 +12003681static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3682 auto it = my_data->commandBufferMap.find(cb);
3683 if (it == my_data->commandBufferMap.end()) {
Tobin Ehlis223b01e2016-03-21 14:14:44 -06003684 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3685 reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07003686 "Attempt to use CommandBuffer 0x%p that doesn't exist!", cb);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003687 return NULL;
3688 }
Chris Forbes664ca7f2016-05-06 16:55:18 +12003689 return it->second;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003690}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003691// Free all CB Nodes
3692// NOTE : Calls to this function should be wrapped in mutex
3693static void deleteCommandBuffers(layer_data *my_data) {
Tobin Ehlis4c522322016-04-11 16:39:29 -06003694 if (my_data->commandBufferMap.empty()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003695 return;
3696 }
3697 for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3698 delete (*ii).second;
3699 }
3700 my_data->commandBufferMap.clear();
3701}
3702
Dustin Graves8f1eab92016-04-05 09:41:17 -06003703static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003704 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3705 (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3706 "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3707}
3708
Tobin Ehlis1c883a02016-12-19 15:59:16 -07003709// If a renderpass is active, verify that the given command type is appropriate for current subpass state
3710bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003711 if (!pCB->activeRenderPass)
Dustin Graves8f1eab92016-04-05 09:41:17 -06003712 return false;
3713 bool skip_call = false;
Tobin Ehlisf5d1a092016-05-17 14:50:07 -06003714 if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3715 (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003716 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3717 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3718 "Commands cannot be called in a subpass using secondary command buffers.");
3719 } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3720 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3721 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3722 "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3723 }
3724 return skip_call;
3725}
3726
3727static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3728 if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3729 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3730 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3731 "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3732 return false;
3733}
3734
3735static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3736 if (!(flags & VK_QUEUE_COMPUTE_BIT))
3737 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3738 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3739 "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3740 return false;
3741}
3742
3743static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3744 if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3745 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3746 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3747 "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3748 return false;
3749}
3750
Tobin Ehlis1c883a02016-12-19 15:59:16 -07003751// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003752// in the recording state or if there's an issue with the Cmd ordering
Tobin Ehlis1c883a02016-12-19 15:59:16 -07003753static bool ValidateCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003754 bool skip_call = false;
Chris Forbes07811fa2016-06-21 13:18:44 +12003755 auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3756 if (pPool) {
3757 VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003758 switch (cmd) {
3759 case CMD_BINDPIPELINE:
3760 case CMD_BINDPIPELINEDELTA:
3761 case CMD_BINDDESCRIPTORSETS:
3762 case CMD_FILLBUFFER:
3763 case CMD_CLEARCOLORIMAGE:
3764 case CMD_SETEVENT:
3765 case CMD_RESETEVENT:
3766 case CMD_WAITEVENTS:
3767 case CMD_BEGINQUERY:
3768 case CMD_ENDQUERY:
3769 case CMD_RESETQUERYPOOL:
3770 case CMD_COPYQUERYPOOLRESULTS:
3771 case CMD_WRITETIMESTAMP:
Tobin Ehlisfe871282016-06-28 10:28:02 -06003772 skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003773 break;
3774 case CMD_SETVIEWPORTSTATE:
3775 case CMD_SETSCISSORSTATE:
3776 case CMD_SETLINEWIDTHSTATE:
3777 case CMD_SETDEPTHBIASSTATE:
3778 case CMD_SETBLENDSTATE:
3779 case CMD_SETDEPTHBOUNDSSTATE:
3780 case CMD_SETSTENCILREADMASKSTATE:
3781 case CMD_SETSTENCILWRITEMASKSTATE:
3782 case CMD_SETSTENCILREFERENCESTATE:
3783 case CMD_BINDINDEXBUFFER:
3784 case CMD_BINDVERTEXBUFFER:
3785 case CMD_DRAW:
3786 case CMD_DRAWINDEXED:
3787 case CMD_DRAWINDIRECT:
3788 case CMD_DRAWINDEXEDINDIRECT:
3789 case CMD_BLITIMAGE:
3790 case CMD_CLEARATTACHMENTS:
3791 case CMD_CLEARDEPTHSTENCILIMAGE:
3792 case CMD_RESOLVEIMAGE:
3793 case CMD_BEGINRENDERPASS:
3794 case CMD_NEXTSUBPASS:
3795 case CMD_ENDRENDERPASS:
Tobin Ehlisfe871282016-06-28 10:28:02 -06003796 skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003797 break;
3798 case CMD_DISPATCH:
3799 case CMD_DISPATCHINDIRECT:
Tobin Ehlisfe871282016-06-28 10:28:02 -06003800 skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003801 break;
3802 case CMD_COPYBUFFER:
3803 case CMD_COPYIMAGE:
3804 case CMD_COPYBUFFERTOIMAGE:
3805 case CMD_COPYIMAGETOBUFFER:
3806 case CMD_CLONEIMAGEDATA:
3807 case CMD_UPDATEBUFFER:
3808 case CMD_PIPELINEBARRIER:
3809 case CMD_EXECUTECOMMANDS:
Tobin Ehlis244fe582016-05-16 11:23:01 -06003810 case CMD_END:
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003811 break;
3812 default:
3813 break;
3814 }
3815 }
3816 if (pCB->state != CB_RECORDING) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003817 skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
Tobin Ehlis244fe582016-05-16 11:23:01 -06003818 } else {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07003819 skip_call |= ValidateCmdSubpassState(my_data, pCB, cmd);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003820 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06003821 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003822}
Tobin Ehlis1c883a02016-12-19 15:59:16 -07003823
3824static void UpdateCmdBufferLastCmd(layer_data *my_data, GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd) {
3825 if (cb_state->state == CB_RECORDING) {
3826 cb_state->last_cmd = cmd;
3827 }
3828}
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003829// For given object struct return a ptr of BASE_NODE type for its wrapping struct
3830BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
3831 BASE_NODE *base_ptr = nullptr;
3832 switch (object_struct.type) {
3833 case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
3834 base_ptr = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
3835 break;
3836 }
3837 case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
Tobin Ehlisfad7adf2016-10-20 06:50:37 -06003838 base_ptr = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003839 break;
3840 }
3841 case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
3842 base_ptr = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
3843 break;
3844 }
3845 case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
Tobin Ehlis52c76a32016-10-12 09:05:51 -06003846 base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003847 break;
3848 }
3849 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07003850 base_ptr = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003851 break;
3852 }
3853 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
3854 base_ptr = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
3855 break;
3856 }
3857 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06003858 base_ptr = getImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003859 break;
3860 }
3861 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
3862 base_ptr = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
3863 break;
3864 }
3865 case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
3866 base_ptr = getEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
3867 break;
3868 }
3869 case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06003870 base_ptr = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003871 break;
3872 }
3873 case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
3874 base_ptr = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
3875 break;
3876 }
3877 case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
Tobin Ehlis04c04272016-10-12 11:54:09 -06003878 base_ptr = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003879 break;
3880 }
3881 case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06003882 base_ptr = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003883 break;
3884 }
3885 case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
3886 base_ptr = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
3887 break;
3888 }
3889 default:
3890 // TODO : Any other objects to be handled here?
3891 assert(0);
3892 break;
3893 }
3894 return base_ptr;
3895}
3896
Tobin Ehlisbfb8cc32016-07-08 13:27:48 -06003897// Tie the VK_OBJECT to the cmd buffer which includes:
3898// Add object_binding to cmd buffer
3899// Add cb_binding to object
3900static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
3901 cb_bindings->insert(cb_node);
3902 cb_node->object_bindings.insert(obj);
3903}
Tobin Ehlis96f1d602016-07-08 12:33:45 -06003904// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
3905static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003906 BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
3907 if (base_obj)
3908 base_obj->cb_bindings.erase(cb_node);
Tobin Ehlis96f1d602016-07-08 12:33:45 -06003909}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003910// Reset the command buffer state
3911// Maintain the createInfo and set state to CB_NEW, but clear all other state
Tobin Ehlis4c522322016-04-11 16:39:29 -06003912static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3913 GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003914 if (pCB) {
Michael Lentinefebf20b2016-04-20 23:01:26 -05003915 pCB->in_use.store(0);
Chris Forbese46e0a12016-12-20 11:33:11 +13003916 pCB->last_cmd = CMD_NONE;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003917 // Reset CB state (note that createInfo is not cleared)
3918 pCB->commandBuffer = cb;
3919 memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3920 memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003921 pCB->numCmds = 0;
3922 memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
3923 pCB->state = CB_NEW;
3924 pCB->submitCount = 0;
3925 pCB->status = 0;
Chris Forbes5fc77832016-07-28 14:15:38 +12003926 pCB->viewportMask = 0;
3927 pCB->scissorMask = 0;
Mark Lobodzinski188b2302016-04-12 10:41:59 -06003928
Tobin Ehlis223b01e2016-03-21 14:14:44 -06003929 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
Tobin Ehlis223b01e2016-03-21 14:14:44 -06003930 pCB->lastBound[i].reset();
3931 }
Mark Lobodzinski188b2302016-04-12 10:41:59 -06003932
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003933 memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
Chris Forbesa4937a72016-05-06 16:31:14 +12003934 pCB->activeRenderPass = nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003935 pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3936 pCB->activeSubpass = 0;
Tobin Ehlis2556f5b2016-06-24 17:22:16 -06003937 pCB->broken_bindings.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003938 pCB->waitedEvents.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003939 pCB->events.clear();
Mark Lobodzinskic54d24e2016-06-13 16:56:26 -06003940 pCB->writeEventsBeforeWait.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003941 pCB->waitedEventsBeforeQueryReset.clear();
3942 pCB->queryToStateMap.clear();
3943 pCB->activeQueries.clear();
3944 pCB->startedQueries.clear();
Michael Lentine11fe3772016-04-20 11:39:50 -05003945 pCB->imageSubresourceMap.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003946 pCB->imageLayoutMap.clear();
3947 pCB->eventToStageMap.clear();
3948 pCB->drawData.clear();
3949 pCB->currentDrawData.buffers.clear();
Tobin Ehlis232017e2016-12-21 10:28:54 -07003950 pCB->vertex_buffer_used = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003951 pCB->primaryCommandBuffer = VK_NULL_HANDLE;
Tobin Ehlis739d62a2016-04-14 12:22:03 -06003952 // Make sure any secondaryCommandBuffers are removed from globalInFlight
3953 for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3954 dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3955 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003956 pCB->secondaryCommandBuffers.clear();
Tobin Ehlis2e319d42016-03-25 11:49:51 -06003957 pCB->updateImages.clear();
3958 pCB->updateBuffers.clear();
Tobin Ehlis4c522322016-04-11 16:39:29 -06003959 clear_cmd_buf_and_mem_references(dev_data, pCB);
Michael Lentineb653eb22016-03-18 14:11:44 -05003960 pCB->eventUpdates.clear();
Michael Lentine5627e692016-05-20 17:45:02 -05003961 pCB->queryUpdates.clear();
Mark Lobodzinski188b2302016-04-12 10:41:59 -06003962
Tobin Ehlis96f1d602016-07-08 12:33:45 -06003963 // Remove object bindings
3964 for (auto obj : pCB->object_bindings) {
3965 removeCommandBufferBinding(dev_data, &obj, pCB);
3966 }
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06003967 pCB->object_bindings.clear();
Mark Lobodzinski188b2302016-04-12 10:41:59 -06003968 // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
3969 for (auto framebuffer : pCB->framebuffers) {
Tobin Ehlis04c04272016-10-12 11:54:09 -06003970 auto fb_state = getFramebufferState(dev_data, framebuffer);
3971 if (fb_state)
3972 fb_state->cb_bindings.erase(pCB);
Mark Lobodzinski188b2302016-04-12 10:41:59 -06003973 }
3974 pCB->framebuffers.clear();
Tobin Ehlis51b5f052016-05-16 08:36:40 -06003975 pCB->activeFramebuffer = VK_NULL_HANDLE;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003976 }
3977}
3978
3979// Set PSO-related status bits for CB, including dynamic state set via PSO
Tobin Ehlis52c76a32016-10-12 09:05:51 -06003980static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003981 // Account for any dynamic state not set via this PSO
Tobin Ehlis5f4cef12016-04-01 13:51:33 -06003982 if (!pPipe->graphicsPipelineCI.pDynamicState ||
3983 !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
Tobin Ehliseebcc652016-10-25 15:23:08 -06003984 pCB->status |= CBSTATUS_ALL_STATE_SET;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003985 } else {
3986 // First consider all state on
3987 // Then unset any state that's noted as dynamic in PSO
3988 // Finally OR that into CB statemask
Tobin Ehliseebcc652016-10-25 15:23:08 -06003989 CBStatusFlags psoDynStateMask = CBSTATUS_ALL_STATE_SET;
Tobin Ehlis5f4cef12016-04-01 13:51:33 -06003990 for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
3991 switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003992 case VK_DYNAMIC_STATE_LINE_WIDTH:
3993 psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
3994 break;
3995 case VK_DYNAMIC_STATE_DEPTH_BIAS:
3996 psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
3997 break;
3998 case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06003999 psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004000 break;
4001 case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4002 psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4003 break;
4004 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4005 psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4006 break;
4007 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4008 psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4009 break;
4010 case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4011 psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4012 break;
4013 default:
4014 // TODO : Flag error here
4015 break;
4016 }
4017 }
4018 pCB->status |= psoDynStateMask;
4019 }
4020}
4021
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004022// Flags validation error if the associated call is made inside a render pass. The apiName
4023// routine should ONLY be called outside a render pass.
Mike Weiblen6daea5b2016-12-19 20:41:58 -07004024static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName,
4025 UNIQUE_VALIDATION_ERROR_CODE msgCode) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06004026 bool inside = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004027 if (pCB->activeRenderPass) {
4028 inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Mike Weiblen6daea5b2016-12-19 20:41:58 -07004029 (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS",
4030 "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 "). %s", apiName,
4031 (uint64_t)pCB->activeRenderPass->renderPass, validation_error_map[msgCode]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004032 }
4033 return inside;
4034}
4035
4036// Flags validation error if the associated call is made outside a render pass. The apiName
4037// routine should ONLY be called inside a render pass.
Mike Weiblen6daea5b2016-12-19 20:41:58 -07004038static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName,
4039 UNIQUE_VALIDATION_ERROR_CODE msgCode) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06004040 bool outside = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004041 if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4042 ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4043 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4044 outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Mike Weiblen6daea5b2016-12-19 20:41:58 -07004045 (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS",
4046 "%s: This call must be issued inside an active render pass. %s", apiName, validation_error_map[msgCode]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004047 }
4048 return outside;
4049}
4050
Chris Forbesfb06dd62016-10-03 19:14:25 +13004051static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004052
Tobin Ehlise54be7b2016-04-11 14:49:55 -06004053 layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004054
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004055}
4056
Chris Forbesf9f87832016-10-04 17:42:54 +13004057static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, instance_layer_data *instance_data) {
4058 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4059 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME))
4060 instance_data->surfaceExtensionEnabled = true;
4061 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME))
4062 instance_data->displayExtensionEnabled = true;
4063#ifdef VK_USE_PLATFORM_ANDROID_KHR
4064 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME))
4065 instance_data->androidSurfaceExtensionEnabled = true;
4066#endif
4067#ifdef VK_USE_PLATFORM_MIR_KHR
4068 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME))
4069 instance_data->mirSurfaceExtensionEnabled = true;
4070#endif
4071#ifdef VK_USE_PLATFORM_WAYLAND_KHR
4072 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME))
4073 instance_data->waylandSurfaceExtensionEnabled = true;
4074#endif
4075#ifdef VK_USE_PLATFORM_WIN32_KHR
4076 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME))
4077 instance_data->win32SurfaceExtensionEnabled = true;
4078#endif
4079#ifdef VK_USE_PLATFORM_XCB_KHR
4080 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME))
4081 instance_data->xcbSurfaceExtensionEnabled = true;
4082#endif
4083#ifdef VK_USE_PLATFORM_XLIB_KHR
4084 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME))
4085 instance_data->xlibSurfaceExtensionEnabled = true;
4086#endif
4087 }
4088}
4089
Chia-I Wu629d7cd2016-05-06 11:32:54 +08004090VKAPI_ATTR VkResult VKAPI_CALL
4091CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004092 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4093
4094 assert(chain_info->u.pLayerInfo);
4095 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4096 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4097 if (fpCreateInstance == NULL)
4098 return VK_ERROR_INITIALIZATION_FAILED;
4099
4100 // Advance the link info for the next element on the chain
4101 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4102
4103 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4104 if (result != VK_SUCCESS)
4105 return result;
4106
Chris Forbesfb06dd62016-10-03 19:14:25 +13004107 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), instance_layer_data_map);
Chia-I Wub2c32ea2016-05-06 12:19:33 +08004108 instance_data->instance = *pInstance;
Chris Forbes65724852016-10-03 19:54:31 +13004109 layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
Chris Forbes65724852016-10-03 19:54:31 +13004110 instance_data->report_data = debug_report_create_instance(
4111 &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
Chris Forbesf9f87832016-10-04 17:42:54 +13004112 checkInstanceRegisterExtensions(pCreateInfo, instance_data);
Tobin Ehlise54be7b2016-04-11 14:49:55 -06004113 init_core_validation(instance_data, pAllocator);
Mark Lobodzinskibbe3d972016-06-29 10:53:51 -06004114
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004115 ValidateLayerOrdering(*pCreateInfo);
4116
4117 return result;
4118}
4119
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07004120// Hook DestroyInstance to remove tableInstanceMap entry
Chia-I Wu629d7cd2016-05-06 11:32:54 +08004121VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004122 // TODOSC : Shouldn't need any customization here
4123 dispatch_key key = get_dispatch_key(instance);
4124 // TBD: Need any locking this early, in case this function is called at the
4125 // same time by more than one thread?
Chris Forbes65724852016-10-03 19:54:31 +13004126 instance_layer_data *instance_data = get_my_data_ptr(key, instance_layer_data_map);
4127 instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004128
Jeremy Hayesda8797f2016-04-13 16:20:24 -06004129 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004130 // Clean up logging callback, if any
Chris Forbes65724852016-10-03 19:54:31 +13004131 while (instance_data->logging_callback.size() > 0) {
4132 VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
4133 layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
4134 instance_data->logging_callback.pop_back();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004135 }
4136
Chris Forbes65724852016-10-03 19:54:31 +13004137 layer_debug_report_destroy_instance(instance_data->report_data);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004138 layer_data_map.erase(key);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004139}
4140
Mark Youngaa1aa3a2016-07-05 16:41:50 -06004141static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004142 uint32_t i;
4143 // TBD: Need any locking, in case this function is called at the same time
4144 // by more than one thread?
4145 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4146 dev_data->device_extensions.wsi_enabled = false;
Mark Young1a867442016-07-01 15:18:27 -06004147 dev_data->device_extensions.wsi_display_swapchain_enabled = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004148
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004149 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4150 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4151 dev_data->device_extensions.wsi_enabled = true;
Mark Young1a867442016-07-01 15:18:27 -06004152 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME) == 0)
4153 dev_data->device_extensions.wsi_display_swapchain_enabled = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004154 }
4155}
4156
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004157// Verify that queue family has been properly requested
Mike Weiblen6daea5b2016-12-19 20:41:58 -07004158static bool ValidateRequestedQueueFamilyProperties(instance_layer_data *instance_data, VkPhysicalDevice gpu,
4159 const VkDeviceCreateInfo *create_info) {
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004160 bool skip_call = false;
Chris Forbes7ff421e2016-10-03 17:55:48 +13004161 auto physical_device_state = getPhysicalDeviceState(instance_data, gpu);
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004162 // First check is app has actually requested queueFamilyProperties
Chris Forbes7ff421e2016-10-03 17:55:48 +13004163 if (!physical_device_state) {
4164 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004165 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
4166 "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
Chris Forbes7ff421e2016-10-03 17:55:48 +13004167 } else if (QUERY_DETAILS != physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004168 // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
Chris Forbes7ff421e2016-10-03 17:55:48 +13004169 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004170 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
4171 "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
4172 } else {
4173 // Check that the requested queue properties are valid
4174 for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
4175 uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
Chris Forbes8c09adb2016-10-03 18:06:20 +13004176 if (requestedIndex >= physical_device_state->queue_family_properties.size()) {
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004177 skip_call |= log_msg(
Chris Forbes7ff421e2016-10-03 17:55:48 +13004178 instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004179 __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4180 "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
4181 } else if (create_info->pQueueCreateInfos[i].queueCount >
Chris Forbes8c09adb2016-10-03 18:06:20 +13004182 physical_device_state->queue_family_properties[requestedIndex].queueCount) {
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004183 skip_call |=
Chris Forbes7ff421e2016-10-03 17:55:48 +13004184 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004185 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4186 "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
4187 "requested queueCount is %u.",
Chris Forbes8c09adb2016-10-03 18:06:20 +13004188 requestedIndex, physical_device_state->queue_family_properties[requestedIndex].queueCount,
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004189 create_info->pQueueCreateInfos[i].queueCount);
4190 }
4191 }
4192 }
4193 return skip_call;
4194}
4195
Mark Lobodzinski51695432016-06-27 16:47:24 -06004196// Verify that features have been queried and that they are available
Chris Forbesfb06dd62016-10-03 19:14:25 +13004197static bool ValidateRequestedFeatures(instance_layer_data *dev_data, VkPhysicalDevice phys, const VkPhysicalDeviceFeatures *requested_features) {
Mark Lobodzinski51695432016-06-27 16:47:24 -06004198 bool skip_call = false;
4199
Chris Forbes8245eba2016-10-03 17:36:32 +13004200 auto phys_device_state = getPhysicalDeviceState(dev_data, phys);
4201 const VkBool32 *actual = reinterpret_cast<VkBool32 *>(&phys_device_state->features);
Mark Lobodzinskibbe3d972016-06-29 10:53:51 -06004202 const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
Mark Lobodzinski51695432016-06-27 16:47:24 -06004203 // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
4204 // Need to provide the struct member name with the issue. To do that seems like we'll
4205 // have to loop through each struct member which should be done w/ codegen to keep in synch.
4206 uint32_t errors = 0;
4207 uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
4208 for (uint32_t i = 0; i < total_bools; i++) {
4209 if (requested[i] > actual[i]) {
4210 // TODO: Add index to struct member name helper to be able to include a feature name
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004211 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Mark Lobodzinski51695432016-06-27 16:47:24 -06004212 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4213 "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
4214 "which is not available on this device.",
4215 i);
4216 errors++;
4217 }
4218 }
Chris Forbes8245eba2016-10-03 17:36:32 +13004219 if (errors && (UNCALLED == phys_device_state->vkGetPhysicalDeviceFeaturesState)) {
Mark Lobodzinski51695432016-06-27 16:47:24 -06004220 // If user didn't request features, notify them that they should
4221 // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004222 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -06004223 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4224 "DL", "You requested features that are unavailable on this device. You should first query feature "
4225 "availability by calling vkGetPhysicalDeviceFeatures().");
Mark Lobodzinski51695432016-06-27 16:47:24 -06004226 }
4227 return skip_call;
4228}
4229
Chia-I Wu629d7cd2016-05-06 11:32:54 +08004230VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4231 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
Chris Forbesfb06dd62016-10-03 19:14:25 +13004232 instance_layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), instance_layer_data_map);
Mark Lobodzinski51695432016-06-27 16:47:24 -06004233 bool skip_call = false;
4234
4235 // Check that any requested features are available
4236 if (pCreateInfo->pEnabledFeatures) {
Chris Forbes8245eba2016-10-03 17:36:32 +13004237 skip_call |= ValidateRequestedFeatures(my_instance_data, gpu, pCreateInfo->pEnabledFeatures);
Mark Lobodzinski51695432016-06-27 16:47:24 -06004238 }
Chris Forbes7ff421e2016-10-03 17:55:48 +13004239 skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, gpu, pCreateInfo);
Mark Lobodzinski51695432016-06-27 16:47:24 -06004240
Mark Mueller49b32002016-08-16 09:47:31 -06004241 if (skip_call) {
4242 return VK_ERROR_VALIDATION_FAILED_EXT;
4243 }
4244
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004245 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4246
4247 assert(chain_info->u.pLayerInfo);
4248 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4249 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
Chia-I Wub2c32ea2016-05-06 12:19:33 +08004250 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004251 if (fpCreateDevice == NULL) {
4252 return VK_ERROR_INITIALIZATION_FAILED;
4253 }
4254
4255 // Advance the link info for the next element on the chain
4256 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4257
4258 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4259 if (result != VK_SUCCESS) {
4260 return result;
4261 }
4262
Jeremy Hayesda8797f2016-04-13 16:20:24 -06004263 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004264 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4265
Chris Forbes2e6c4942016-10-03 17:44:52 +13004266 my_device_data->instance_data = my_instance_data;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004267 // Setup device dispatch table
Chris Forbesaaa9c282016-10-03 20:01:14 +13004268 layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr);
Chris Forbes15864502016-03-30 11:35:21 +13004269 my_device_data->device = *pDevice;
Mark Lobodzinskicf0f7b62016-11-16 12:12:56 -07004270 // Save PhysicalDevice handle
4271 my_device_data->physical_device = gpu;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004272
4273 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
Mark Youngaa1aa3a2016-07-05 16:41:50 -06004274 checkDeviceRegisterExtensions(pCreateInfo, *pDevice);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004275 // Get physical device limits for this device
Chris Forbes65724852016-10-03 19:54:31 +13004276 my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004277 uint32_t count;
Chris Forbes65724852016-10-03 19:54:31 +13004278 my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
Tobin Ehlise54be7b2016-04-11 14:49:55 -06004279 my_device_data->phys_dev_properties.queue_family_properties.resize(count);
Chris Forbes65724852016-10-03 19:54:31 +13004280 my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
Tobin Ehlise54be7b2016-04-11 14:49:55 -06004281 gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004282 // TODO: device limits should make sure these are compatible
4283 if (pCreateInfo->pEnabledFeatures) {
Chris Forbes94c5f532016-10-03 17:42:38 +13004284 my_device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004285 } else {
Chris Forbes94c5f532016-10-03 17:42:38 +13004286 memset(&my_device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004287 }
Mark Lobodzinskif6858592017-01-11 09:34:01 -07004288 // Store physical device properties and physical device mem limits into device layer_data structs
Chris Forbes65724852016-10-03 19:54:31 +13004289 my_instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
Mark Lobodzinskif6858592017-01-11 09:34:01 -07004290 my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &my_device_data->phys_dev_props);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06004291 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004292
4293 ValidateLayerOrdering(*pCreateInfo);
4294
4295 return result;
4296}
4297
4298// prototype
Chia-I Wu629d7cd2016-05-06 11:32:54 +08004299VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004300 // TODOSC : Shouldn't need any customization here
Tobin Ehlisbdacdd82016-10-12 07:25:59 -06004301 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004302 dispatch_key key = get_dispatch_key(device);
4303 layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4304 // Free all the memory
Jeremy Hayesda8797f2016-04-13 16:20:24 -06004305 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004306 deletePipelines(dev_data);
Chris Forbesef730462016-09-27 12:03:31 +13004307 dev_data->renderPassMap.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004308 deleteCommandBuffers(dev_data);
Tobin Ehlis9bfd4492016-05-05 15:09:11 -06004309 // This will also delete all sets in the pool & remove them from setMap
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004310 deletePools(dev_data);
Tobin Ehlis9bfd4492016-05-05 15:09:11 -06004311 // All sets should be removed
4312 assert(dev_data->setMap.empty());
Tobin Ehlis546326f2016-04-26 11:06:05 -06004313 for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4314 delete del_layout.second;
4315 }
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06004316 dev_data->descriptorSetLayoutMap.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004317 dev_data->imageViewMap.clear();
4318 dev_data->imageMap.clear();
4319 dev_data->imageSubresourceMap.clear();
4320 dev_data->imageLayoutMap.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004321 dev_data->bufferViewMap.clear();
4322 dev_data->bufferMap.clear();
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004323 // Queues persist until device is destroyed
4324 dev_data->queueMap.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004325 // Report any memory leaks
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004326 layer_debug_report_destroy_device(device);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06004327 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004328
4329#if DISPATCH_MAP_DEBUG
Mark Muelleraab36502016-05-03 13:17:29 -06004330 fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004331#endif
Tobin Ehlisbdacdd82016-10-12 07:25:59 -06004332 if (!skip) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13004333 dev_data->dispatch_table.DestroyDevice(device, pAllocator);
Tobin Ehlisbdacdd82016-10-12 07:25:59 -06004334 layer_data_map.erase(key);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004335 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004336}
4337
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004338static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4339
Tobin Ehlisb093da82017-01-19 12:05:27 -07004340// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
4341// and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
4342static bool ValidateStageMaskGsTsEnables(layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
4343 UNIQUE_VALIDATION_ERROR_CODE geo_error_id, UNIQUE_VALIDATION_ERROR_CODE tess_error_id) {
4344 bool skip = false;
4345 if (!dev_data->enabled_features.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
4346 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
4347 geo_error_id, "DL", "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when "
4348 "device does not have geometryShader feature enabled. %s",
4349 caller, validation_error_map[geo_error_id]);
4350 }
4351 if (!dev_data->enabled_features.tessellationShader &&
4352 (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
4353 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
4354 tess_error_id, "DL", "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT "
4355 "and/or VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device "
4356 "does not have tessellationShader feature enabled. %s",
4357 caller, validation_error_map[tess_error_id]);
4358 }
4359 return skip;
4360}
4361
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004362// This validates that the initial layout specified in the command buffer for
4363// the IMAGE is the same
4364// as the global IMAGE layout
Chris Forbes35358882016-05-17 17:42:27 +12004365static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06004366 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004367 for (auto cb_image_data : pCB->imageLayoutMap) {
4368 VkImageLayout imageLayout;
4369 if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4370 skip_call |=
4371 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
Mark Muelleraab36502016-05-03 13:17:29 -06004372 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004373 reinterpret_cast<const uint64_t &>(cb_image_data.first));
4374 } else {
4375 if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4376 // TODO: Set memory invalid which is in mem_tracker currently
4377 } else if (imageLayout != cb_image_data.second.initialLayout) {
Mark Young000d14d2016-04-11 16:53:53 -06004378 if (cb_image_data.first.hasSubresource) {
4379 skip_call |= log_msg(
4380 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Chris Forbes35358882016-05-17 17:42:27 +12004381 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06004382 "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
Mark Young000d14d2016-04-11 16:53:53 -06004383 "with layout %s when first use is %s.",
Mark Muelleraab36502016-05-03 13:17:29 -06004384 reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4385 cb_image_data.first.subresource.arrayLayer,
4386 cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
Mark Young000d14d2016-04-11 16:53:53 -06004387 string_VkImageLayout(cb_image_data.second.initialLayout));
4388 } else {
4389 skip_call |= log_msg(
4390 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Chris Forbes35358882016-05-17 17:42:27 +12004391 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06004392 "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
Mark Young000d14d2016-04-11 16:53:53 -06004393 "first use is %s.",
4394 reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4395 string_VkImageLayout(cb_image_data.second.initialLayout));
4396 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004397 }
4398 SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4399 }
4400 }
4401 return skip_call;
4402}
Mark Lobodzinskif7422d92016-03-25 15:22:33 -06004403
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004404// Loop through bound objects and increment their in_use counts
4405// For any unknown objects, flag an error
Tobin Ehlisee3af202016-09-07 11:09:01 -06004406static bool ValidateAndIncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004407 bool skip = false;
4408 DRAW_STATE_ERROR error_code = DRAWSTATE_NONE;
4409 BASE_NODE *base_obj = nullptr;
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004410 for (auto obj : cb_node->object_bindings) {
4411 switch (obj.type) {
4412 case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004413 base_obj = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(obj.handle));
4414 error_code = DRAWSTATE_INVALID_DESCRIPTOR_SET;
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004415 break;
4416 }
Tobin Ehlis8020eea2016-08-17 11:10:41 -06004417 case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
Tobin Ehlisfad7adf2016-10-20 06:50:37 -06004418 base_obj = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(obj.handle));
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004419 error_code = DRAWSTATE_INVALID_SAMPLER;
Tobin Ehlis8020eea2016-08-17 11:10:41 -06004420 break;
4421 }
Tobin Ehlisee3af202016-09-07 11:09:01 -06004422 case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004423 base_obj = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(obj.handle));
4424 error_code = DRAWSTATE_INVALID_QUERY_POOL;
Tobin Ehlisee3af202016-09-07 11:09:01 -06004425 break;
4426 }
4427 case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
Tobin Ehlis52c76a32016-10-12 09:05:51 -06004428 base_obj = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(obj.handle));
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004429 error_code = DRAWSTATE_INVALID_PIPELINE;
Tobin Ehlisee3af202016-09-07 11:09:01 -06004430 break;
4431 }
4432 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07004433 base_obj = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004434 error_code = DRAWSTATE_INVALID_BUFFER;
Tobin Ehlisee3af202016-09-07 11:09:01 -06004435 break;
4436 }
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06004437 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4438 base_obj = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(obj.handle));
4439 error_code = DRAWSTATE_INVALID_BUFFER_VIEW;
4440 break;
4441 }
Tobin Ehlisee3af202016-09-07 11:09:01 -06004442 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06004443 base_obj = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004444 error_code = DRAWSTATE_INVALID_IMAGE;
Tobin Ehlisee3af202016-09-07 11:09:01 -06004445 break;
4446 }
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06004447 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4448 base_obj = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(obj.handle));
4449 error_code = DRAWSTATE_INVALID_IMAGE_VIEW;
4450 break;
4451 }
Tobin Ehlisee3af202016-09-07 11:09:01 -06004452 case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004453 base_obj = getEventNode(dev_data, reinterpret_cast<VkEvent &>(obj.handle));
4454 error_code = DRAWSTATE_INVALID_EVENT;
Tobin Ehlisee3af202016-09-07 11:09:01 -06004455 break;
4456 }
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06004457 case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06004458 base_obj = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(obj.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06004459 error_code = DRAWSTATE_INVALID_DESCRIPTOR_POOL;
4460 break;
4461 }
4462 case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4463 base_obj = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(obj.handle));
4464 error_code = DRAWSTATE_INVALID_COMMAND_POOL;
4465 break;
4466 }
4467 case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
Tobin Ehlis04c04272016-10-12 11:54:09 -06004468 base_obj = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(obj.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06004469 error_code = DRAWSTATE_INVALID_FRAMEBUFFER;
4470 break;
4471 }
4472 case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06004473 base_obj = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(obj.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06004474 error_code = DRAWSTATE_INVALID_RENDERPASS;
4475 break;
4476 }
4477 case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4478 base_obj = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(obj.handle));
4479 error_code = DRAWSTATE_INVALID_DEVICE_MEMORY;
4480 break;
4481 }
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004482 default:
4483 // TODO : Merge handling of other objects types into this code
4484 break;
4485 }
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004486 if (!base_obj) {
4487 skip |=
4488 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj.type, obj.handle, __LINE__, error_code, "DS",
4489 "Cannot submit cmd buffer using deleted %s 0x%" PRIx64 ".", object_type_to_string(obj.type), obj.handle);
4490 } else {
4491 base_obj->in_use.fetch_add(1);
4492 }
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004493 }
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004494 return skip;
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004495}
4496
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004497// Track which resources are in-flight by atomically incrementing their "in_use" count
Tobin Ehlis77416de2016-07-21 17:51:22 -06004498static bool validateAndIncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06004499 bool skip_call = false;
Chris Forbes77753912016-06-23 10:36:09 +12004500
Tobin Ehlis77416de2016-07-21 17:51:22 -06004501 cb_node->in_use.fetch_add(1);
4502 dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer);
Chris Forbes77753912016-06-23 10:36:09 +12004503
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004504 // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
Tobin Ehlis77416de2016-07-21 17:51:22 -06004505 skip_call |= ValidateAndIncrementBoundObjects(dev_data, cb_node);
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004506 // TODO : We should be able to remove the NULL look-up checks from the code below as long as
4507 // all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
4508 // should then be flagged prior to calling this function
Tobin Ehlis77416de2016-07-21 17:51:22 -06004509 for (auto drawDataElement : cb_node->drawData) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004510 for (auto buffer : drawDataElement.buffers) {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07004511 auto buffer_state = getBufferState(dev_data, buffer);
4512 if (!buffer_state) {
Tobin Ehlis77416de2016-07-21 17:51:22 -06004513 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004514 (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06004515 "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004516 } else {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07004517 buffer_state->in_use.fetch_add(1);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004518 }
4519 }
4520 }
Tobin Ehlis77416de2016-07-21 17:51:22 -06004521 for (auto event : cb_node->writeEventsBeforeWait) {
Tobin Ehlis1af17132016-10-20 14:17:21 -06004522 auto event_state = getEventNode(dev_data, event);
4523 if (event_state)
4524 event_state->write_in_use++;
Michael Lentine860b0fe2016-05-20 10:14:00 -05004525 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004526 return skip_call;
4527}
4528
Tobin Ehlis829abd02016-12-06 12:53:43 -07004529// Note: This function assumes that the global lock is held by the calling thread.
4530// For the given queue, verify the queue state up to the given seq number.
4531// Currently the only check is to make sure that if there are events to be waited on prior to
4532// a QueryReset, make sure that all such events have been signalled.
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004533static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *queue, uint64_t seq) {
Tobin Ehlis829abd02016-12-06 12:53:43 -07004534 bool skip = false;
4535 auto queue_seq = queue->seq;
Tobin Ehliscd8e8c82016-12-07 11:20:02 -07004536 std::unordered_map<VkQueue, uint64_t> other_queue_seqs;
4537 auto sub_it = queue->submissions.begin();
Tobin Ehlis829abd02016-12-06 12:53:43 -07004538 while (queue_seq < seq) {
Tobin Ehliscd8e8c82016-12-07 11:20:02 -07004539 for (auto &wait : sub_it->waitSemaphores) {
4540 auto &last_seq = other_queue_seqs[wait.queue];
4541 last_seq = std::max(last_seq, wait.seq);
4542 }
4543 for (auto cb : sub_it->cbs) {
4544 auto cb_node = getCBNode(dev_data, cb);
Tobin Ehlis829abd02016-12-06 12:53:43 -07004545 if (cb_node) {
4546 for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
4547 for (auto event : queryEventsPair.second) {
Tobin Ehliscd8e8c82016-12-07 11:20:02 -07004548 if (dev_data->eventMap[event].needsSignaled) {
4549 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Tobin Ehlis829abd02016-12-06 12:53:43 -07004550 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4551 "Cannot get query results on queryPool 0x%" PRIx64
4552 " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4553 (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4554 }
4555 }
Michael Lentinefebf20b2016-04-20 23:01:26 -05004556 }
4557 }
4558 }
Tobin Ehliscd8e8c82016-12-07 11:20:02 -07004559 sub_it++;
Tobin Ehlis829abd02016-12-06 12:53:43 -07004560 queue_seq++;
Michael Lentinefebf20b2016-04-20 23:01:26 -05004561 }
Tobin Ehliscd8e8c82016-12-07 11:20:02 -07004562 for (auto qs : other_queue_seqs) {
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004563 skip |= VerifyQueueStateToSeq(dev_data, getQueueState(dev_data, qs.first), qs.second);
Tobin Ehliscd8e8c82016-12-07 11:20:02 -07004564 }
Tobin Ehlis829abd02016-12-06 12:53:43 -07004565 return skip;
4566}
4567
4568// When the given fence is retired, verify outstanding queue operations through the point of the fence
4569static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
4570 auto fence_state = getFenceNode(dev_data, fence);
4571 if (VK_NULL_HANDLE != fence_state->signaler.first) {
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004572 return VerifyQueueStateToSeq(dev_data, getQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
Tobin Ehlis829abd02016-12-06 12:53:43 -07004573 }
4574 return false;
Michael Lentinefebf20b2016-04-20 23:01:26 -05004575}
Chris Forbes5e3bfd62016-06-24 17:04:54 +12004576
4577// TODO: nuke this completely.
Michael Lentinefebf20b2016-04-20 23:01:26 -05004578// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4579static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4580 // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4581 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4582 pCB->in_use.fetch_sub(1);
4583 if (!pCB->in_use.load()) {
4584 dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4585 }
4586}
4587
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004588// Decrement in-use count for objects bound to command buffer
Tobin Ehlisee3af202016-09-07 11:09:01 -06004589static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
Tobin Ehlis1dc7d5a2016-09-14 07:01:11 -06004590 BASE_NODE *base_obj = nullptr;
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004591 for (auto obj : cb_node->object_bindings) {
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06004592 base_obj = GetStateStructPtrFromObject(dev_data, obj);
Tobin Ehlis1dc7d5a2016-09-14 07:01:11 -06004593 if (base_obj) {
4594 base_obj->in_use.fetch_sub(1);
4595 }
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004596 }
4597}
Chris Forbesc7d3c782016-06-22 11:57:17 +12004598
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004599static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004600 std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
4601
4602 // Roll this queue forward, one submission at a time.
4603 while (pQueue->seq < seq) {
4604 auto & submission = pQueue->submissions.front();
4605
4606 for (auto & wait : submission.waitSemaphores) {
4607 auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore);
Mark Lobodzinskic3343ae2016-11-11 15:27:12 -07004608 if (pSemaphore) {
4609 pSemaphore->in_use.fetch_sub(1);
4610 }
Chris Forbes8320a8d2016-08-01 15:15:30 +12004611 auto & lastSeq = otherQueueSeqs[wait.queue];
4612 lastSeq = std::max(lastSeq, wait.seq);
4613 }
4614
4615 for (auto & semaphore : submission.signalSemaphores) {
4616 auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
Mark Lobodzinskic3343ae2016-11-11 15:27:12 -07004617 if (pSemaphore) {
4618 pSemaphore->in_use.fetch_sub(1);
4619 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004620 }
Chris Forbesff96dcd2016-06-16 11:47:24 +12004621
Chris Forbes8320a8d2016-08-01 15:15:30 +12004622 for (auto cb : submission.cbs) {
Tobin Ehlis77416de2016-07-21 17:51:22 -06004623 auto cb_node = getCBNode(dev_data, cb);
Mark Lobodzinskic3343ae2016-11-11 15:27:12 -07004624 if (!cb_node) {
4625 continue;
4626 }
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004627 // First perform decrement on general case bound objects
Tobin Ehlis77416de2016-07-21 17:51:22 -06004628 DecrementBoundResources(dev_data, cb_node);
4629 for (auto drawDataElement : cb_node->drawData) {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004630 for (auto buffer : drawDataElement.buffers) {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07004631 auto buffer_state = getBufferState(dev_data, buffer);
4632 if (buffer_state) {
4633 buffer_state->in_use.fetch_sub(1);
Chris Forbes8320a8d2016-08-01 15:15:30 +12004634 }
4635 }
4636 }
Tobin Ehlis77416de2016-07-21 17:51:22 -06004637 for (auto event : cb_node->writeEventsBeforeWait) {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004638 auto eventNode = dev_data->eventMap.find(event);
4639 if (eventNode != dev_data->eventMap.end()) {
4640 eventNode->second.write_in_use--;
4641 }
4642 }
Tobin Ehlis77416de2016-07-21 17:51:22 -06004643 for (auto queryStatePair : cb_node->queryToStateMap) {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004644 dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4645 }
Tobin Ehlis77416de2016-07-21 17:51:22 -06004646 for (auto eventStagePair : cb_node->eventToStageMap) {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004647 dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4648 }
Chris Forbesff96dcd2016-06-16 11:47:24 +12004649
Chris Forbes8320a8d2016-08-01 15:15:30 +12004650 removeInFlightCmdBuffer(dev_data, cb);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004651 }
Chris Forbes8320a8d2016-08-01 15:15:30 +12004652
4653 auto pFence = getFenceNode(dev_data, submission.fence);
4654 if (pFence) {
4655 pFence->state = FENCE_RETIRED;
4656 }
4657
4658 pQueue->submissions.pop_front();
4659 pQueue->seq++;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004660 }
Chris Forbes8320a8d2016-08-01 15:15:30 +12004661
4662 // Roll other queues forward to the highest seq we saw a wait for
4663 for (auto qs : otherQueueSeqs) {
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004664 RetireWorkOnQueue(dev_data, getQueueState(dev_data, qs.first), qs.second);
Michael Lentineb48e1412016-04-29 18:37:32 -05004665 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004666}
Chris Forbesd73299b2016-06-10 15:25:45 +12004667
4668
4669// Submit a fence to a queue, delimiting previous fences and previous untracked
4670// work by it.
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004671static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
Chris Forbesff96dcd2016-06-16 11:47:24 +12004672 pFence->state = FENCE_INFLIGHT;
Chris Forbes8320a8d2016-08-01 15:15:30 +12004673 pFence->signaler.first = pQueue->queue;
4674 pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
Michael Lentinefebf20b2016-04-20 23:01:26 -05004675}
4676
Dustin Graves8f1eab92016-04-05 09:41:17 -06004677static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004678 bool skip_call = false;
4679 if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4680 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004681 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Mike Weiblendcca8592016-12-15 12:24:24 -07004682 0, __LINE__, VALIDATION_ERROR_00133, "DS",
4683 "Command Buffer 0x%p is already in use and is not marked for simultaneous use. %s", pCB->commandBuffer,
4684 validation_error_map[VALIDATION_ERROR_00133]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004685 }
4686 return skip_call;
4687}
4688
Tobin Ehlisf7cf9152016-09-27 13:10:33 -06004689static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *call_source) {
Tobin Ehlisa003ce82016-09-27 17:42:58 -06004690 bool skip = false;
Chris Forbesa13fe522016-10-13 15:34:59 +13004691 if (dev_data->instance_data->disabled.command_buffer_state)
Tobin Ehlisa003ce82016-09-27 17:42:58 -06004692 return skip;
Tobin Ehlise5184af2016-04-14 15:44:20 -06004693 // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4694 if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
Tobin Ehlisa003ce82016-09-27 17:42:58 -06004695 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4696 __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004697 "Commandbuffer 0x%p was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
Tobin Ehlisa003ce82016-09-27 17:42:58 -06004698 "set, but has been submitted 0x%" PRIxLEAST64 " times.",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004699 pCB->commandBuffer, pCB->submitCount);
Tobin Ehlise5184af2016-04-14 15:44:20 -06004700 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004701 // Validate that cmd buffers have been updated
4702 if (CB_RECORDED != pCB->state) {
4703 if (CB_INVALID == pCB->state) {
4704 // Inform app of reason CB invalid
Tobin Ehlis2556f5b2016-06-24 17:22:16 -06004705 for (auto obj : pCB->broken_bindings) {
4706 const char *type_str = object_type_to_string(obj.type);
4707 // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4708 const char *cause_str =
4709 (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004710
Tobin Ehlisa003ce82016-09-27 17:42:58 -06004711 skip |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004712 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4713 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004714 "You are submitting command buffer 0x%p that is invalid because bound %s 0x%" PRIxLEAST64 " was %s.",
4715 pCB->commandBuffer, type_str, obj.handle, cause_str);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004716 }
4717 } else { // Flag error for using CB w/o vkEndCommandBuffer() called
Tobin Ehlisa003ce82016-09-27 17:42:58 -06004718 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4719 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004720 "You must call vkEndCommandBuffer() on command buffer 0x%p before this call to %s!", pCB->commandBuffer,
4721 call_source);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004722 }
4723 }
Tobin Ehlisa003ce82016-09-27 17:42:58 -06004724 return skip;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004725}
4726
Mark Lobodzinskieb30fd72016-08-09 16:42:24 -06004727// Validate that queueFamilyIndices of primary command buffers match this queue
4728// Secondary command buffers were previously validated in vkCmdExecuteCommands().
4729static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4730 bool skip_call = false;
4731 auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004732 auto queue_state = getQueueState(dev_data, queue);
Mark Lobodzinskieb30fd72016-08-09 16:42:24 -06004733
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004734 if (pPool && queue_state && (pPool->queueFamilyIndex != queue_state->queueFamilyIndex)) {
Mark Lobodzinskieb30fd72016-08-09 16:42:24 -06004735 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Mike Weiblendcca8592016-12-15 12:24:24 -07004736 reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_00139, "DS",
4737 "vkQueueSubmit: Primary command buffer 0x%p created in queue family %d is being submitted on queue "
4738 "0x%p from queue family %d. %s",
4739 pCB->commandBuffer, pPool->queueFamilyIndex, queue, queue_state->queueFamilyIndex,
4740 validation_error_map[VALIDATION_ERROR_00139]);
Mark Lobodzinskieb30fd72016-08-09 16:42:24 -06004741 }
4742
4743 return skip_call;
4744}
4745
Chris Forbesc7d3c782016-06-22 11:57:17 +12004746static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004747 // Track in-use for resources off of primary and any secondary CBs
Tobin Ehlisfe871282016-06-28 10:28:02 -06004748 bool skip_call = false;
Chris Forbes77753912016-06-23 10:36:09 +12004749
4750 // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4751 // on device
Tobin Ehlisfe871282016-06-28 10:28:02 -06004752 skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB);
Chris Forbes77753912016-06-23 10:36:09 +12004753
Tobin Ehlisfe871282016-06-28 10:28:02 -06004754 skip_call |= validateAndIncrementResources(dev_data, pCB);
Chris Forbes77753912016-06-23 10:36:09 +12004755
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004756 if (!pCB->secondaryCommandBuffers.empty()) {
4757 for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004758 GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
Tobin Ehlisfe871282016-06-28 10:28:02 -06004759 skip_call |= validateAndIncrementResources(dev_data, pSubCB);
Tobin Ehlis1857d612016-05-09 13:22:50 -06004760 if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4761 !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
Mike Weiblendcca8592016-12-15 12:24:24 -07004762 log_msg(
4763 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4764 __LINE__, VALIDATION_ERROR_00135, "DS",
4765 "Commandbuffer 0x%p was submitted with secondary buffer 0x%p but that buffer has subsequently been bound to "
4766 "primary cmd buffer 0x%p and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set. %s",
4767 pCB->commandBuffer, secondaryCmdBuffer, pSubCB->primaryCommandBuffer,
4768 validation_error_map[VALIDATION_ERROR_00135]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004769 }
4770 }
4771 }
Chris Forbes77753912016-06-23 10:36:09 +12004772
Tobin Ehlisf7cf9152016-09-27 13:10:33 -06004773 skip_call |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()");
Chris Forbes77753912016-06-23 10:36:09 +12004774
Tobin Ehlisfe871282016-06-28 10:28:02 -06004775 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004776}
4777
Chris Forbesfed03c92016-06-10 10:06:42 +12004778static bool
Chris Forbesd73299b2016-06-10 15:25:45 +12004779ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
Chris Forbesfed03c92016-06-10 10:06:42 +12004780{
Tobin Ehlisfe871282016-06-28 10:28:02 -06004781 bool skip_call = false;
Chris Forbesfed03c92016-06-10 10:06:42 +12004782
Chris Forbesd73299b2016-06-10 15:25:45 +12004783 if (pFence) {
Chris Forbesff96dcd2016-06-16 11:47:24 +12004784 if (pFence->state == FENCE_INFLIGHT) {
Mike Weiblendcca8592016-12-15 12:24:24 -07004785 // TODO: opportunities for VALIDATION_ERROR_00127, VALIDATION_ERROR_01647, VALIDATION_ERROR_01953
Tobin Ehlisfe871282016-06-28 10:28:02 -06004786 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4787 (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4788 "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06004789 }
Chris Forbesfed03c92016-06-10 10:06:42 +12004790
Chris Forbesff96dcd2016-06-16 11:47:24 +12004791 else if (pFence->state == FENCE_RETIRED) {
Mike Weiblendcca8592016-12-15 12:24:24 -07004792 // TODO: opportunities for VALIDATION_ERROR_00126, VALIDATION_ERROR_01646, VALIDATION_ERROR_01953
Tobin Ehlisfe871282016-06-28 10:28:02 -06004793 skip_call |=
4794 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4795 reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4796 "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state. Fences must be reset before being submitted",
4797 reinterpret_cast<uint64_t &>(pFence->fence));
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06004798 }
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004799 }
Chris Forbesfed03c92016-06-10 10:06:42 +12004800
Tobin Ehlisfe871282016-06-28 10:28:02 -06004801 return skip_call;
Chris Forbesfed03c92016-06-10 10:06:42 +12004802}
4803
Chris Forbesd73299b2016-06-10 15:25:45 +12004804
Chris Forbesfed03c92016-06-10 10:06:42 +12004805VKAPI_ATTR VkResult VKAPI_CALL
4806QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06004807 bool skip_call = false;
Chris Forbesfed03c92016-06-10 10:06:42 +12004808 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4809 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4810 std::unique_lock<std::mutex> lock(global_lock);
4811
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004812 auto pQueue = getQueueState(dev_data, queue);
Chris Forbesd73299b2016-06-10 15:25:45 +12004813 auto pFence = getFenceNode(dev_data, fence);
Tobin Ehlisfe871282016-06-28 10:28:02 -06004814 skip_call |= ValidateFenceForSubmit(dev_data, pFence);
Chris Forbesfed03c92016-06-10 10:06:42 +12004815
Tobin Ehlisfe871282016-06-28 10:28:02 -06004816 if (skip_call) {
Chris Forbes40028e22016-06-13 09:59:34 +12004817 return VK_ERROR_VALIDATION_FAILED_EXT;
4818 }
4819
Chris Forbesd73299b2016-06-10 15:25:45 +12004820 // Mark the fence in-use.
4821 if (pFence) {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004822 SubmitFence(pQueue, pFence, std::max(1u, submitCount));
Chris Forbesd73299b2016-06-10 15:25:45 +12004823 }
4824
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004825 // Now verify each individual submit
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004826 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4827 const VkSubmitInfo *submit = &pSubmits[submit_idx];
Chris Forbes8320a8d2016-08-01 15:15:30 +12004828 vector<SEMAPHORE_WAIT> semaphore_waits;
4829 vector<VkSemaphore> semaphore_signals;
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004830 for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
Tobin Ehlisb093da82017-01-19 12:05:27 -07004831 skip_call |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
4832 VALIDATION_ERROR_00142, VALIDATION_ERROR_00143);
Chris Forbes220fd472016-06-21 18:59:28 +12004833 VkSemaphore semaphore = submit->pWaitSemaphores[i];
4834 auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
Chris Forbes220fd472016-06-21 18:59:28 +12004835 if (pSemaphore) {
4836 if (pSemaphore->signaled) {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004837 if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
4838 semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
4839 pSemaphore->in_use.fetch_add(1);
4840 }
4841 pSemaphore->signaler.first = VK_NULL_HANDLE;
Chris Forbes220fd472016-06-21 18:59:28 +12004842 pSemaphore->signaled = false;
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004843 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06004844 skip_call |=
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004845 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4846 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004847 "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
4848 reinterpret_cast<const uint64_t &>(semaphore));
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004849 }
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004850 }
4851 }
4852 for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
Chris Forbes220fd472016-06-21 18:59:28 +12004853 VkSemaphore semaphore = submit->pSignalSemaphores[i];
4854 auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4855 if (pSemaphore) {
Chris Forbes220fd472016-06-21 18:59:28 +12004856 if (pSemaphore->signaled) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06004857 skip_call |=
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004858 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4859 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004860 "Queue 0x%p is signaling semaphore 0x%" PRIx64
Mark Muelleraab36502016-05-03 13:17:29 -06004861 " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004862 queue, reinterpret_cast<const uint64_t &>(semaphore),
Chris Forbes8320a8d2016-08-01 15:15:30 +12004863 reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004864 } else {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004865 pSemaphore->signaler.first = queue;
4866 pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
Chris Forbes220fd472016-06-21 18:59:28 +12004867 pSemaphore->signaled = true;
Chris Forbesc7d3c782016-06-22 11:57:17 +12004868 pSemaphore->in_use.fetch_add(1);
Chris Forbes8320a8d2016-08-01 15:15:30 +12004869 semaphore_signals.push_back(semaphore);
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004870 }
4871 }
4872 }
Chris Forbesd73299b2016-06-10 15:25:45 +12004873
Chris Forbesc7d3c782016-06-22 11:57:17 +12004874 std::vector<VkCommandBuffer> cbs;
4875
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004876 for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
Tobin Ehlisc0d36802016-10-20 10:09:44 -06004877 auto cb_node = getCBNode(dev_data, submit->pCommandBuffers[i]);
4878 skip_call |= ValidateCmdBufImageLayouts(dev_data, cb_node);
4879 if (cb_node) {
Chris Forbesc7d3c782016-06-22 11:57:17 +12004880 cbs.push_back(submit->pCommandBuffers[i]);
Tobin Ehlisc0d36802016-10-20 10:09:44 -06004881 for (auto secondaryCmdBuffer : cb_node->secondaryCommandBuffers) {
Chris Forbesc7d3c782016-06-22 11:57:17 +12004882 cbs.push_back(secondaryCmdBuffer);
Chris Forbesd73299b2016-06-10 15:25:45 +12004883 }
4884
Tobin Ehlisc0d36802016-10-20 10:09:44 -06004885 cb_node->submitCount++; // increment submit count
4886 skip_call |= validatePrimaryCommandBufferState(dev_data, cb_node);
4887 skip_call |= validateQueueFamilyIndices(dev_data, cb_node, queue);
Tobin Ehlis545fae32016-09-06 20:37:01 -06004888 // Potential early exit here as bad object state may crash in delayed function calls
4889 if (skip_call)
4890 return result;
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004891 // Call submit-time functions to validate/update state
Tobin Ehlisc0d36802016-10-20 10:09:44 -06004892 for (auto &function : cb_node->validate_functions) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06004893 skip_call |= function();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004894 }
Tobin Ehlisc0d36802016-10-20 10:09:44 -06004895 for (auto &function : cb_node->eventUpdates) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06004896 skip_call |= function(queue);
Michael Lentineb653eb22016-03-18 14:11:44 -05004897 }
Tobin Ehlisc0d36802016-10-20 10:09:44 -06004898 for (auto &function : cb_node->queryUpdates) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06004899 skip_call |= function(queue);
Michael Lentine5627e692016-05-20 17:45:02 -05004900 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004901 }
4902 }
Chris Forbesc7d3c782016-06-22 11:57:17 +12004903
Chris Forbes8320a8d2016-08-01 15:15:30 +12004904 pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
4905 submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004906 }
Chris Forbes8320a8d2016-08-01 15:15:30 +12004907
4908 if (pFence && !submitCount) {
4909 // If no submissions, but just dropping a fence on the end of the queue,
4910 // record an empty submission with just the fence, so we can determine
4911 // its completion.
4912 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
4913 std::vector<SEMAPHORE_WAIT>(),
4914 std::vector<VkSemaphore>(),
4915 fence);
4916 }
4917
Jeremy Hayesda8797f2016-04-13 16:20:24 -06004918 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06004919 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13004920 result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004921
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004922 return result;
4923}
4924
Karl Schultz8e3bccf2016-11-11 16:09:47 -07004925static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
4926 bool skip = false;
4927 if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
4928 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4929 reinterpret_cast<const uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_00611, "MEM",
4930 "Number of currently valid memory objects is not less than the maximum allowed (%u). %s",
4931 dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount,
4932 validation_error_map[VALIDATION_ERROR_00611]);
4933 }
4934 return skip;
4935}
4936
4937static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
4938 add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
Karl Schultz8e3bccf2016-11-11 16:09:47 -07004939 return;
4940}
4941
Chia-I Wu629d7cd2016-05-06 11:32:54 +08004942VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4943 const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
Karl Schultz8e3bccf2016-11-11 16:09:47 -07004944 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4945 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4946 std::unique_lock<std::mutex> lock(global_lock);
4947 bool skip = PreCallValidateAllocateMemory(dev_data);
4948 if (!skip) {
4949 lock.unlock();
4950 result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4951 lock.lock();
4952 if (VK_SUCCESS == result) {
4953 PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
4954 }
Mark Lobodzinski8fbf5712016-11-14 08:18:09 -07004955 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004956 return result;
4957}
4958
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004959// For given obj node, if it is use, flag a validation error and return callback result, else return false
4960bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
4961 UNIQUE_VALIDATION_ERROR_CODE error_code) {
4962 if (dev_data->instance_data->disabled.object_in_use)
4963 return false;
4964 bool skip = false;
4965 if (obj_node->in_use.load()) {
4966 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_struct.type, obj_struct.handle, __LINE__,
4967 error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
4968 object_type_to_string(obj_struct.type), obj_struct.handle, validation_error_map[error_code]);
4969 }
4970 return skip;
4971}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004972
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004973static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06004974 *mem_info = getMemObjInfo(dev_data, mem);
4975 *obj_struct = {reinterpret_cast<uint64_t &>(mem), VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT};
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004976 if (dev_data->instance_data->disabled.free_memory)
4977 return false;
4978 bool skip = false;
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004979 if (*mem_info) {
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004980 skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_00620);
4981 }
4982 return skip;
4983}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004984
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004985static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
4986 // Clear mem binding for any bound objects
Tobin Ehlis5b38d772016-10-25 22:00:47 -06004987 for (auto obj : mem_info->obj_bindings) {
4988 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__, MEMTRACK_FREED_MEM_REF,
4989 "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64, obj.handle,
4990 (uint64_t)mem_info->mem);
4991 switch (obj.type) {
4992 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4993 auto image_state = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4994 assert(image_state); // Any destroyed images should already be removed from bindings
4995 image_state->binding.mem = MEMORY_UNBOUND;
4996 break;
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004997 }
Tobin Ehlis5b38d772016-10-25 22:00:47 -06004998 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07004999 auto buffer_state = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
5000 assert(buffer_state); // Any destroyed buffers should already be removed from bindings
5001 buffer_state->binding.mem = MEMORY_UNBOUND;
Tobin Ehlis5b38d772016-10-25 22:00:47 -06005002 break;
5003 }
5004 default:
5005 // Should only have buffer or image objects bound to memory
5006 assert(0);
5007 }
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06005008 }
5009 // Any bound cmd buffers are now invalid
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07005010 invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06005011 dev_data->memObjMap.erase(mem);
5012}
5013
5014VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5015 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5016 DEVICE_MEM_INFO *mem_info = nullptr;
5017 VK_OBJECT obj_struct;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005018 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06005019 bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
5020 if (!skip) {
5021 lock.unlock();
5022 dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
5023 lock.lock();
5024 PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
Mark Muellerf377ffb2016-07-11 15:03:44 -06005025 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005026}
5027
Tobin Ehlisb495d5f2016-08-04 09:33:02 -06005028// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
5029// and that the size of the map range should be:
5030// 1. Not zero
5031// 2. Within the size of the memory allocation
5032static bool ValidateMapMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06005033 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005034
5035 if (size == 0) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06005036 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5037 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5038 "VkMapMemory: Attempting to map memory range of size zero");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005039 }
5040
5041 auto mem_element = my_data->memObjMap.find(mem);
5042 if (mem_element != my_data->memObjMap.end()) {
Tobin Ehlis997b2582016-06-02 08:43:37 -06005043 auto mem_info = mem_element->second.get();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005044 // It is an application error to call VkMapMemory on an object that is already mapped
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005045 if (mem_info->mem_range.size != 0) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06005046 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5047 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5048 "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005049 }
5050
5051 // Validate that offset + size is within object's allocationSize
5052 if (size == VK_WHOLE_SIZE) {
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005053 if (offset >= mem_info->alloc_info.allocationSize) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06005054 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5055 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5056 "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
5057 " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005058 offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005059 }
5060 } else {
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005061 if ((offset + size) > mem_info->alloc_info.allocationSize) {
Mike Weiblendcca8592016-12-15 12:24:24 -07005062 skip_call = log_msg(
5063 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5064 (uint64_t)mem, __LINE__, VALIDATION_ERROR_00628, "MEM",
5065 "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ". %s", offset,
5066 size + offset, mem_info->alloc_info.allocationSize, validation_error_map[VALIDATION_ERROR_00628]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005067 }
5068 }
5069 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06005070 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005071}
5072
Dustin Graves8f1eab92016-04-05 09:41:17 -06005073static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
Tobin Ehlis997b2582016-06-02 08:43:37 -06005074 auto mem_info = getMemObjInfo(my_data, mem);
5075 if (mem_info) {
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005076 mem_info->mem_range.offset = offset;
5077 mem_info->mem_range.size = size;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005078 }
5079}
5080
Dustin Graves8f1eab92016-04-05 09:41:17 -06005081static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06005082 bool skip_call = false;
Tobin Ehlis997b2582016-06-02 08:43:37 -06005083 auto mem_info = getMemObjInfo(my_data, mem);
5084 if (mem_info) {
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005085 if (!mem_info->mem_range.size) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005086 // Valid Usage: memory must currently be mapped
Tobin Ehlisfe871282016-06-28 10:28:02 -06005087 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
Mike Weiblendcca8592016-12-15 12:24:24 -07005088 (uint64_t)mem, __LINE__, VALIDATION_ERROR_00649, "MEM",
5089 "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64 ". %s", (uint64_t)mem,
5090 validation_error_map[VALIDATION_ERROR_00649]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005091 }
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005092 mem_info->mem_range.size = 0;
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005093 if (mem_info->shadow_copy) {
5094 free(mem_info->shadow_copy_base);
5095 mem_info->shadow_copy_base = 0;
5096 mem_info->shadow_copy = 0;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005097 }
5098 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06005099 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005100}
5101
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005102// Guard value for pad data
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005103static char NoncoherentMemoryFillValue = 0xb;
5104
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005105static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
5106 void **ppData) {
Tobin Ehlis997b2582016-06-02 08:43:37 -06005107 auto mem_info = getMemObjInfo(dev_data, mem);
5108 if (mem_info) {
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005109 mem_info->p_driver_data = *ppData;
5110 uint32_t index = mem_info->alloc_info.memoryTypeIndex;
Tobin Ehlise54be7b2016-04-11 14:49:55 -06005111 if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005112 mem_info->shadow_copy = 0;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005113 } else {
5114 if (size == VK_WHOLE_SIZE) {
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005115 size = mem_info->alloc_info.allocationSize - offset;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005116 }
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005117 mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5118 assert(vk_safe_modulo(mem_info->shadow_pad_size,
5119 dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
5120 // Ensure start of mapped region reflects hardware alignment constraints
5121 uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5122
5123 // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
5124 uint64_t start_offset = offset % map_alignment;
5125 // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
Mark Lobodzinski4dc768c2016-10-03 16:01:12 -06005126 mem_info->shadow_copy_base = malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005127
5128 mem_info->shadow_copy =
5129 reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
5130 ~(map_alignment - 1)) + start_offset;
5131 assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
5132 map_alignment) == 0);
5133
Mark Lobodzinski4dc768c2016-10-03 16:01:12 -06005134 memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005135 *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005136 }
5137 }
5138}
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005139
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06005140// Verify that state for fence being waited on is appropriate. That is,
Chris Forbes8320a8d2016-08-01 15:15:30 +12005141// a fence being waited on should not already be signaled and
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06005142// it should have been submitted on a queue or during acquire next image
Chris Forbes5141e922016-06-15 13:10:28 +12005143static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06005144 bool skip_call = false;
Chris Forbes53a17362016-06-15 13:03:31 +12005145
Chris Forbes5141e922016-06-15 13:10:28 +12005146 auto pFence = getFenceNode(dev_data, fence);
Chris Forbes53a17362016-06-15 13:03:31 +12005147 if (pFence) {
Chris Forbesff96dcd2016-06-16 11:47:24 +12005148 if (pFence->state == FENCE_UNSIGNALED) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06005149 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5150 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5151 "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
5152 "acquire next image.",
5153 apiCall, reinterpret_cast<uint64_t &>(fence));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005154 }
5155 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06005156 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005157}
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06005158
Tobin Ehlis829abd02016-12-06 12:53:43 -07005159static void RetireFence(layer_data *dev_data, VkFence fence) {
Chris Forbes61e8ff52016-09-21 11:18:30 +12005160 auto pFence = getFenceNode(dev_data, fence);
5161 if (pFence->signaler.first != VK_NULL_HANDLE) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07005162 // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07005163 RetireWorkOnQueue(dev_data, getQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
Chris Forbes61e8ff52016-09-21 11:18:30 +12005164 }
Chris Forbes22384062016-09-21 13:36:19 +12005165 else {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07005166 // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
5167 // the fence as retired.
Chris Forbes22384062016-09-21 13:36:19 +12005168 pFence->state = FENCE_RETIRED;
Chris Forbes22384062016-09-21 13:36:19 +12005169 }
Chris Forbes61e8ff52016-09-21 11:18:30 +12005170}
5171
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005172static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
Tobin Ehlis5161de32016-12-06 14:47:52 -07005173 if (dev_data->instance_data->disabled.wait_for_fences)
5174 return false;
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005175 bool skip = false;
5176 for (uint32_t i = 0; i < fence_count; i++) {
5177 skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
Tobin Ehlis829abd02016-12-06 12:53:43 -07005178 skip |= VerifyQueueStateToFence(dev_data, fences[i]);
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005179 }
5180 return skip;
5181}
5182
Tobin Ehlis829abd02016-12-06 12:53:43 -07005183static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
5184 // When we know that all fences are complete we can clean/remove their CBs
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005185 if ((VK_TRUE == wait_all) || (1 == fence_count)) {
5186 for (uint32_t i = 0; i < fence_count; i++) {
Tobin Ehlis829abd02016-12-06 12:53:43 -07005187 RetireFence(dev_data, fences[i]);
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005188 }
5189 }
5190 // NOTE : Alternate case not handled here is when some fences have completed. In
5191 // this case for app to guarantee which fences completed it will have to call
Tobin Ehlis829abd02016-12-06 12:53:43 -07005192 // vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005193}
5194
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005195VKAPI_ATTR VkResult VKAPI_CALL
5196WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005197 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005198 // Verify fence status of submitted fences
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005199 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005200 bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005201 lock.unlock();
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005202 if (skip)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005203 return VK_ERROR_VALIDATION_FAILED_EXT;
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06005204
Chris Forbesaaa9c282016-10-03 20:01:14 +13005205 VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
Mark Muelleraab36502016-05-03 13:17:29 -06005206
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005207 if (result == VK_SUCCESS) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005208 lock.lock();
Tobin Ehlis829abd02016-12-06 12:53:43 -07005209 PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005210 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005211 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005212 return result;
5213}
5214
Tobin Ehlis2ec4bf82016-12-06 08:26:05 -07005215static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
Tobin Ehlis5161de32016-12-06 14:47:52 -07005216 if (dev_data->instance_data->disabled.get_fence_state)
5217 return false;
Tobin Ehlis2ec4bf82016-12-06 08:26:05 -07005218 return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
5219}
5220
Tobin Ehlis829abd02016-12-06 12:53:43 -07005221static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
Tobin Ehlis2ec4bf82016-12-06 08:26:05 -07005222
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005223VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005224 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005225 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis2ec4bf82016-12-06 08:26:05 -07005226 bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005227 lock.unlock();
Tobin Ehlis2ec4bf82016-12-06 08:26:05 -07005228 if (skip)
Tobin Ehlisfe871282016-06-28 10:28:02 -06005229 return VK_ERROR_VALIDATION_FAILED_EXT;
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06005230
Chris Forbesaaa9c282016-10-03 20:01:14 +13005231 VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005232 if (result == VK_SUCCESS) {
Tobin Ehlis2ec4bf82016-12-06 08:26:05 -07005233 lock.lock();
Tobin Ehlis829abd02016-12-06 12:53:43 -07005234 PostCallRecordGetFenceStatus(dev_data, fence);
Tobin Ehlis2ec4bf82016-12-06 08:26:05 -07005235 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005236 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005237 return result;
5238}
5239
Tobin Ehlis75fcac72016-12-15 11:34:33 -07005240static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
5241 // Add queue to tracking set only if it is new
5242 auto result = dev_data->queues.emplace(queue);
5243 if (result.second == true) {
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07005244 QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
Tobin Ehlis75fcac72016-12-15 11:34:33 -07005245 queue_state->queue = queue;
5246 queue_state->queueFamilyIndex = q_family_index;
5247 queue_state->seq = 0;
5248 }
5249}
5250
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005251VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
Mark Lobodzinski600e93d2016-03-29 09:49:15 -06005252 VkQueue *pQueue) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005253 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13005254 dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005255 std::lock_guard<std::mutex> lock(global_lock);
Mark Lobodzinski600e93d2016-03-29 09:49:15 -06005256
Tobin Ehlis75fcac72016-12-15 11:34:33 -07005257 PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005258}
5259
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07005260static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
5261 *queue_state = getQueueState(dev_data, queue);
Tobin Ehlis5161de32016-12-06 14:47:52 -07005262 if (dev_data->instance_data->disabled.queue_wait_idle)
5263 return false;
5264 return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
Tobin Ehlis4d8e6a42016-12-06 13:20:09 -07005265}
5266
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07005267static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
Tobin Ehlis5161de32016-12-06 14:47:52 -07005268 RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
Tobin Ehlis4d8e6a42016-12-06 13:20:09 -07005269}
5270
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005271VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005272 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07005273 QUEUE_STATE *queue_state = nullptr;
Chris Forbes8320a8d2016-08-01 15:15:30 +12005274 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis4d8e6a42016-12-06 13:20:09 -07005275 bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
Chris Forbes8320a8d2016-08-01 15:15:30 +12005276 lock.unlock();
Tobin Ehliscd8e8c82016-12-07 11:20:02 -07005277 if (skip)
5278 return VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbesaaa9c282016-10-03 20:01:14 +13005279 VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
Tobin Ehlis4d8e6a42016-12-06 13:20:09 -07005280 if (VK_SUCCESS == result) {
Tobin Ehlis5161de32016-12-06 14:47:52 -07005281 lock.lock();
5282 PostCallRecordQueueWaitIdle(dev_data, queue_state);
5283 lock.unlock();
Tobin Ehlis4d8e6a42016-12-06 13:20:09 -07005284 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005285 return result;
5286}
5287
Tobin Ehlis08aacd92016-12-06 13:08:18 -07005288static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
Tobin Ehlis5161de32016-12-06 14:47:52 -07005289 if (dev_data->instance_data->disabled.device_wait_idle)
5290 return false;
Tobin Ehlis08aacd92016-12-06 13:08:18 -07005291 bool skip = false;
5292 for (auto &queue : dev_data->queueMap) {
5293 skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5294 }
5295 return skip;
5296}
5297
5298static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
5299 for (auto &queue : dev_data->queueMap) {
Tobin Ehlis829abd02016-12-06 12:53:43 -07005300 RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005301 }
Tobin Ehlis08aacd92016-12-06 13:08:18 -07005302}
5303
5304VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
5305 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5306 std::unique_lock<std::mutex> lock(global_lock);
5307 bool skip = PreCallValidateDeviceWaitIdle(dev_data);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005308 lock.unlock();
Tobin Ehlis08aacd92016-12-06 13:08:18 -07005309 if (skip)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005310 return VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbesaaa9c282016-10-03 20:01:14 +13005311 VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
Tobin Ehlis08aacd92016-12-06 13:08:18 -07005312 if (VK_SUCCESS == result) {
5313 lock.lock();
5314 PostCallRecordDeviceWaitIdle(dev_data);
5315 lock.unlock();
5316 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005317 return result;
5318}
5319
Tobin Ehlisaf62c4a2016-12-15 14:18:39 -07005320static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
5321 *fence_node = getFenceNode(dev_data, fence);
5322 *obj_struct = {reinterpret_cast<uint64_t &>(fence), VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT};
Tobin Ehlis653c5222016-12-15 14:35:21 -07005323 if (dev_data->instance_data->disabled.destroy_fence)
5324 return false;
Tobin Ehlisaf62c4a2016-12-15 14:18:39 -07005325 bool skip = false;
5326 if (*fence_node) {
5327 if ((*fence_node)->state == FENCE_INFLIGHT) {
5328 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
Tobin Ehlisb093da82017-01-19 12:05:27 -07005329 (uint64_t)(fence), __LINE__, VALIDATION_ERROR_00173, "DS", "Fence 0x%" PRIx64 " is in use. %s",
5330 (uint64_t)(fence), validation_error_map[VALIDATION_ERROR_00173]);
Tobin Ehlisaf62c4a2016-12-15 14:18:39 -07005331 }
5332 }
5333 return skip;
5334}
5335
5336static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
5337
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005338VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005339 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisaf62c4a2016-12-15 14:18:39 -07005340 // Common data objects used pre & post call
5341 FENCE_NODE *fence_node = nullptr;
5342 VK_OBJECT obj_struct;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005343 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisaf62c4a2016-12-15 14:18:39 -07005344 bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06005345
Tobin Ehlisaf62c4a2016-12-15 14:18:39 -07005346 if (!skip) {
5347 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005348 dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
Tobin Ehlisaf62c4a2016-12-15 14:18:39 -07005349 lock.lock();
5350 PostCallRecordDestroyFence(dev_data, fence);
5351 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005352}
5353
Tobin Ehlis6ed8e142016-12-15 14:32:22 -07005354static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
5355 VK_OBJECT *obj_struct) {
5356 *sema_node = getSemaphoreNode(dev_data, semaphore);
5357 *obj_struct = {reinterpret_cast<uint64_t &>(semaphore), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT};
Tobin Ehlisfdedd1e2016-12-15 14:36:56 -07005358 if (dev_data->instance_data->disabled.destroy_semaphore)
5359 return false;
Tobin Ehlis6ed8e142016-12-15 14:32:22 -07005360 bool skip = false;
5361 if (*sema_node) {
5362 skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, VALIDATION_ERROR_00199);
5363 }
5364 return skip;
5365}
5366
5367static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
5368
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005369VKAPI_ATTR void VKAPI_CALL
5370DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005371 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis6ed8e142016-12-15 14:32:22 -07005372 SEMAPHORE_NODE *sema_node;
5373 VK_OBJECT obj_struct;
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06005374 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis6ed8e142016-12-15 14:32:22 -07005375 bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
Tobin Ehlis7d1dd142016-08-18 08:23:30 -06005376 if (!skip) {
Tobin Ehlis7d1dd142016-08-18 08:23:30 -06005377 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005378 dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
Tobin Ehlis6ed8e142016-12-15 14:32:22 -07005379 lock.lock();
5380 PostCallRecordDestroySemaphore(dev_data, semaphore);
Mark Mueller0782cea2016-08-24 10:42:17 -06005381 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005382}
5383
Tobin Ehlis1af17132016-10-20 14:17:21 -06005384static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06005385 *event_state = getEventNode(dev_data, event);
5386 *obj_struct = {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT};
Tobin Ehlis66aba772016-10-20 14:13:24 -06005387 if (dev_data->instance_data->disabled.destroy_event)
5388 return false;
5389 bool skip = false;
Tobin Ehlis66aba772016-10-20 14:13:24 -06005390 if (*event_state) {
Tobin Ehlis66aba772016-10-20 14:13:24 -06005391 skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_00213);
5392 }
5393 return skip;
5394}
5395
Tobin Ehlis1af17132016-10-20 14:17:21 -06005396static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07005397 invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
Tobin Ehlis66aba772016-10-20 14:13:24 -06005398 dev_data->eventMap.erase(event);
5399}
5400
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005401VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005402 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis1af17132016-10-20 14:17:21 -06005403 EVENT_STATE *event_state = nullptr;
Tobin Ehlis66aba772016-10-20 14:13:24 -06005404 VK_OBJECT obj_struct;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005405 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis66aba772016-10-20 14:13:24 -06005406 bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06005407 if (!skip) {
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06005408 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005409 dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
Tobin Ehlis66aba772016-10-20 14:13:24 -06005410 lock.lock();
5411 PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06005412 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005413}
5414
Tobin Ehlisd8f55352016-12-16 14:56:41 -07005415static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
5416 VK_OBJECT *obj_struct) {
5417 *qp_state = getQueryPoolNode(dev_data, query_pool);
5418 *obj_struct = {reinterpret_cast<uint64_t &>(query_pool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT};
Tobin Ehlis45315152016-12-16 15:02:44 -07005419 if (dev_data->instance_data->disabled.destroy_query_pool)
5420 return false;
Tobin Ehlisd8f55352016-12-16 14:56:41 -07005421 bool skip = false;
5422 if (*qp_state) {
5423 skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, VALIDATION_ERROR_01012);
5424 }
5425 return skip;
5426}
5427
5428static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state, VK_OBJECT obj_struct) {
5429 invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
5430 dev_data->queryPoolMap.erase(query_pool);
5431}
5432
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005433VKAPI_ATTR void VKAPI_CALL
5434DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlis2e8f5322016-07-08 14:22:01 -06005435 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisd8f55352016-12-16 14:56:41 -07005436 QUERY_POOL_NODE *qp_state = nullptr;
5437 VK_OBJECT obj_struct;
Tobin Ehlis2e8f5322016-07-08 14:22:01 -06005438 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd8f55352016-12-16 14:56:41 -07005439 bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06005440 if (!skip) {
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06005441 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005442 dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
Tobin Ehlisd8f55352016-12-16 14:56:41 -07005443 lock.lock();
5444 PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06005445 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005446}
Tobin Ehlis95f045b2016-12-19 10:11:50 -07005447static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
5448 uint32_t query_count, VkQueryResultFlags flags,
5449 unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
5450 for (auto cmd_buffer : dev_data->globalInFlightCmdBuffers) {
5451 auto cb = getCBNode(dev_data, cmd_buffer);
5452 for (auto query_state_pair : cb->queryToStateMap) {
5453 (*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005454 }
5455 }
Tobin Ehlis4fbe1c22016-12-19 10:20:43 -07005456 if (dev_data->instance_data->disabled.get_query_pool_results)
5457 return false;
Tobin Ehlis95f045b2016-12-19 10:11:50 -07005458 bool skip = false;
5459 for (uint32_t i = 0; i < query_count; ++i) {
5460 QueryObject query = {query_pool, first_query + i};
5461 auto qif_pair = queries_in_flight->find(query);
5462 auto query_state_pair = dev_data->queryToStateMap.find(query);
5463 if (query_state_pair != dev_data->queryToStateMap.end()) {
Mark Lobodzinski6a312182016-04-01 15:58:32 -06005464 // Available and in flight
Tobin Ehlis95f045b2016-12-19 10:11:50 -07005465 if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5466 query_state_pair->second) {
5467 for (auto cmd_buffer : qif_pair->second) {
5468 auto cb = getCBNode(dev_data, cmd_buffer);
5469 auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
5470 if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
5471 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5472 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5473 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5474 (uint64_t)(query_pool), first_query + i);
5475 }
5476 }
5477 // Unavailable and in flight
5478 } else if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5479 !query_state_pair->second) {
5480 // TODO : Can there be the same query in use by multiple command buffers in flight?
5481 bool make_available = false;
5482 for (auto cmd_buffer : qif_pair->second) {
5483 auto cb = getCBNode(dev_data, cmd_buffer);
5484 make_available |= cb->queryToStateMap[query];
5485 }
5486 if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5487 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5488 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5489 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5490 (uint64_t)(query_pool), first_query + i);
5491 }
5492 // Unavailable
5493 } else if (query_state_pair != dev_data->queryToStateMap.end() && !query_state_pair->second) {
5494 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5495 __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5496 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5497 (uint64_t)(query_pool), first_query + i);
5498 // Uninitialized
5499 } else if (query_state_pair == dev_data->queryToStateMap.end()) {
5500 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5501 __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5502 "Cannot get query results on queryPool 0x%" PRIx64
5503 " with index %d as data has not been collected for this index.",
5504 (uint64_t)(query_pool), first_query + i);
5505 }
5506 }
5507 }
5508 return skip;
5509}
5510
5511static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
5512 uint32_t query_count,
5513 unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
5514 for (uint32_t i = 0; i < query_count; ++i) {
5515 QueryObject query = {query_pool, first_query + i};
5516 auto qif_pair = queries_in_flight->find(query);
5517 auto query_state_pair = dev_data->queryToStateMap.find(query);
5518 if (query_state_pair != dev_data->queryToStateMap.end()) {
5519 // Available and in flight
5520 if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5521 query_state_pair->second) {
5522 for (auto cmd_buffer : qif_pair->second) {
5523 auto cb = getCBNode(dev_data, cmd_buffer);
5524 auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
5525 if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
5526 for (auto event : query_event_pair->second) {
Mark Lobodzinski6a312182016-04-01 15:58:32 -06005527 dev_data->eventMap[event].needsSignaled = true;
5528 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005529 }
5530 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005531 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005532 }
5533 }
Tobin Ehlis95f045b2016-12-19 10:11:50 -07005534}
5535
5536VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
5537 size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
5538 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5539 unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
5540 std::unique_lock<std::mutex> lock(global_lock);
5541 bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005542 lock.unlock();
Tobin Ehlis95f045b2016-12-19 10:11:50 -07005543 if (skip)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005544 return VK_ERROR_VALIDATION_FAILED_EXT;
Tobin Ehlis95f045b2016-12-19 10:11:50 -07005545 VkResult result =
5546 dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
5547 lock.lock();
5548 PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
5549 lock.unlock();
5550 return result;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005551}
5552
Dustin Graves8f1eab92016-04-05 09:41:17 -06005553static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5554 bool skip_call = false;
Tobin Ehlis4668dce2016-11-16 09:30:23 -07005555 auto buffer_state = getBufferState(my_data, buffer);
5556 if (!buffer_state) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005557 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5558 (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06005559 "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005560 } else {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07005561 if (buffer_state->in_use.load()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005562 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
Mike Weiblendcca8592016-12-15 12:24:24 -07005563 (uint64_t)(buffer), __LINE__, VALIDATION_ERROR_00676, "DS",
5564 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer. %s", (uint64_t)(buffer),
5565 validation_error_map[VALIDATION_ERROR_00676]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005566 }
5567 }
5568 return skip_call;
5569}
5570
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005571// Return true if given ranges intersect, else false
5572// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5573// in an error so not checking that here
5574// pad_ranges bool indicates a linear and non-linear comparison which requires padding
5575// In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call
5576// may be set by the callback function so caller should merge in skip_call value if padding case is possible.
Tobin Ehlis17b65522016-08-12 14:26:31 -06005577static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) {
5578 *skip_call = false;
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005579 auto r1_start = range1->start;
5580 auto r1_end = range1->end;
5581 auto r2_start = range2->start;
5582 auto r2_end = range2->end;
5583 VkDeviceSize pad_align = 1;
5584 if (range1->linear != range2->linear) {
5585 pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005586 }
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005587 if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1)))
5588 return false;
5589 if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1)))
5590 return false;
5591
5592 if (range1->linear != range2->linear) {
Tobin Ehlis951f6ae2016-12-13 12:16:58 -07005593 // In linear vs. non-linear case, warn of aliasing
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005594 const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5595 const char *r1_type_str = range1->image ? "image" : "buffer";
5596 const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5597 const char *r2_type_str = range2->image ? "image" : "buffer";
5598 auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
Tobin Ehlis951f6ae2016-12-13 12:16:58 -07005599 *skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, 0,
5600 MEMTRACK_INVALID_ALIASING, "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5601 " which may indicate a bug. For further info refer to the "
5602 "Buffer-Image Granularity section of the Vulkan specification. "
5603 "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/"
5604 "xhtml/vkspec.html#resources-bufferimagegranularity)",
5605 r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005606 }
5607 // Ranges intersect
5608 return true;
5609}
Tobin Ehlis0a78ef92016-08-12 14:12:44 -06005610// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
Tobin Ehlis3d2c3162016-08-10 16:08:00 -06005611static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005612 // Create a local MEMORY_RANGE struct to wrap offset/size
5613 MEMORY_RANGE range_wrap;
5614 // Synch linear with range1 to avoid padding and potential validation error case
5615 range_wrap.linear = range1->linear;
5616 range_wrap.start = offset;
Tobin Ehlis3d2c3162016-08-10 16:08:00 -06005617 range_wrap.end = end;
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005618 bool tmp_bool;
Tobin Ehlis17b65522016-08-12 14:26:31 -06005619 return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005620}
Tobin Ehlis3d2c3162016-08-10 16:08:00 -06005621// For given mem_info, set all ranges valid that intersect [offset-end] range
5622// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
5623static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5624 bool tmp_bool = false;
Tobin Ehlis125fc632016-12-13 12:39:44 -07005625 MEMORY_RANGE map_range = {};
Tobin Ehlis3d2c3162016-08-10 16:08:00 -06005626 map_range.linear = true;
5627 map_range.start = offset;
5628 map_range.end = end;
5629 for (auto &handle_range_pair : mem_info->bound_ranges) {
Tobin Ehlis17b65522016-08-12 14:26:31 -06005630 if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) {
Tobin Ehlis3d2c3162016-08-10 16:08:00 -06005631 // TODO : WARN here if tmp_bool true?
5632 handle_range_pair.second.valid = true;
5633 }
5634 }
5635}
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005636// Object with given handle is being bound to memory w/ given mem_info struct.
5637// Track the newly bound memory range with given memoryOffset
5638// Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5639// and non-linear range incorrectly overlap.
5640// Return true if an error is flagged and the user callback returns "true", otherwise false
5641// is_image indicates an image object, otherwise handle is for a buffer
5642// is_linear indicates a buffer or linear image
5643static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5644 VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06005645 bool skip_call = false;
Tobin Ehlis32fe2202016-08-10 09:55:59 -06005646 MEMORY_RANGE range;
Tobin Ehlisb3593a42016-03-16 16:00:36 -06005647
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005648 range.image = is_image;
5649 range.handle = handle;
5650 range.linear = is_linear;
Tobin Ehlisc3e9c7b2016-08-10 17:00:51 -06005651 range.valid = mem_info->global_valid;
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005652 range.memory = mem_info->mem;
5653 range.start = memoryOffset;
5654 range.size = memRequirements.size;
5655 range.end = memoryOffset + memRequirements.size - 1;
Tobin Ehlis32fe2202016-08-10 09:55:59 -06005656 range.aliases.clear();
5657 // Update Memory aliasing
Tobin Ehlis6f3a2d12016-12-13 12:26:44 -07005658 // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
Tobin Ehlis32fe2202016-08-10 09:55:59 -06005659 // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5660 std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005661 for (auto &obj_range_pair : mem_info->bound_ranges) {
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005662 auto check_range = &obj_range_pair.second;
Tobin Ehlis32fe2202016-08-10 09:55:59 -06005663 bool intersection_error = false;
Tobin Ehlis17b65522016-08-12 14:26:31 -06005664 if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) {
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005665 skip_call |= intersection_error;
5666 range.aliases.insert(check_range);
Tobin Ehlis32fe2202016-08-10 09:55:59 -06005667 tmp_alias_ranges.insert(check_range);
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005668 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005669 }
Tobin Ehlis32fe2202016-08-10 09:55:59 -06005670 mem_info->bound_ranges[handle] = std::move(range);
5671 for (auto tmp_range : tmp_alias_ranges) {
5672 tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5673 }
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005674 if (is_image)
5675 mem_info->bound_images.insert(handle);
5676 else
5677 mem_info->bound_buffers.insert(handle);
5678
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005679 return skip_call;
5680}
5681
Tobin Ehlise20d21f2016-08-10 18:01:27 -06005682static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005683 VkMemoryRequirements mem_reqs, bool is_linear) {
Tobin Ehlise20d21f2016-08-10 18:01:27 -06005684 return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005685}
5686
Tobin Ehlise20d21f2016-08-10 18:01:27 -06005687static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005688 VkMemoryRequirements mem_reqs) {
Tobin Ehlise20d21f2016-08-10 18:01:27 -06005689 return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005690}
5691
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005692// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5693// is_image indicates if handle is for image or buffer
5694// This function will also remove the handle-to-index mapping from the appropriate
5695// map and clean up any aliases for range being removed.
5696static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5697 auto erase_range = &mem_info->bound_ranges[handle];
5698 for (auto alias_range : erase_range->aliases) {
5699 alias_range->aliases.erase(erase_range);
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005700 }
Tobin Ehlis32fe2202016-08-10 09:55:59 -06005701 erase_range->aliases.clear();
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005702 mem_info->bound_ranges.erase(handle);
Tobin Ehlisfed999f2016-09-21 15:09:45 -06005703 if (is_image) {
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005704 mem_info->bound_images.erase(handle);
Tobin Ehlisfed999f2016-09-21 15:09:45 -06005705 } else {
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005706 mem_info->bound_buffers.erase(handle);
Tobin Ehlisfed999f2016-09-21 15:09:45 -06005707 }
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005708}
5709
5710static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5711
5712static void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5713
Tobin Ehlis4f94a0c2016-12-19 10:53:58 -07005714static bool PreCallValidateDestroyBuffer(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE **buffer_state,
5715 VK_OBJECT *obj_struct) {
5716 *buffer_state = getBufferState(dev_data, buffer);
5717 *obj_struct = {reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT};
Tobin Ehlis98a94f12016-12-19 10:57:29 -07005718 if (dev_data->instance_data->disabled.destroy_buffer)
5719 return false;
Tobin Ehlis4f94a0c2016-12-19 10:53:58 -07005720 bool skip = false;
5721 if (*buffer_state) {
5722 skip |= validateIdleBuffer(dev_data, buffer);
5723 }
5724 return skip;
5725}
5726
5727static void PostCallRecordDestroyBuffer(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VK_OBJECT obj_struct) {
5728 invalidateCommandBuffers(dev_data, buffer_state->cb_bindings, obj_struct);
5729 for (auto mem_binding : buffer_state->GetBoundMemory()) {
5730 auto mem_info = getMemObjInfo(dev_data, mem_binding);
5731 if (mem_info) {
5732 RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info);
5733 }
5734 }
5735 ClearMemoryObjectBindings(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5736 dev_data->bufferMap.erase(buffer_state->buffer);
5737}
5738
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005739VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5740 const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005741 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis4f94a0c2016-12-19 10:53:58 -07005742 BUFFER_STATE *buffer_state = nullptr;
5743 VK_OBJECT obj_struct;
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005744 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis4f94a0c2016-12-19 10:53:58 -07005745 bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
5746 if (!skip) {
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005747 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005748 dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
Tobin Ehlis4f94a0c2016-12-19 10:53:58 -07005749 lock.lock();
5750 PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005751 }
5752}
5753
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005754static bool PreCallValidateDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE **buffer_view_state,
5755 VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06005756 *buffer_view_state = getBufferViewState(dev_data, buffer_view);
5757 *obj_struct = {reinterpret_cast<uint64_t &>(buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT};
Chris Forbesa13fe522016-10-13 15:34:59 +13005758 if (dev_data->instance_data->disabled.destroy_buffer_view)
Tobin Ehlisb1c2e762016-09-28 14:25:44 -06005759 return false;
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005760 bool skip = false;
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005761 if (*buffer_view_state) {
Tobin Ehlis4a98a712016-10-12 09:58:35 -06005762 skip |= ValidateObjectNotInUse(dev_data, *buffer_view_state, *obj_struct, VALIDATION_ERROR_00701);
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005763 }
5764 return skip;
5765}
5766
5767static void PostCallRecordDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE *buffer_view_state,
5768 VK_OBJECT obj_struct) {
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005769 // Any bound cmd buffers are now invalid
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07005770 invalidateCommandBuffers(dev_data, buffer_view_state->cb_bindings, obj_struct);
Tobin Ehlis7fa33f42016-10-05 21:50:14 -06005771 dev_data->bufferViewMap.erase(buffer_view);
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005772}
5773
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005774VKAPI_ATTR void VKAPI_CALL
5775DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005776 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005777 // Common data objects used pre & post call
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005778 BUFFER_VIEW_STATE *buffer_view_state = nullptr;
5779 VK_OBJECT obj_struct;
Tobin Ehlisd4aef972016-10-12 08:53:27 -06005780 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005781 // Validate state before calling down chain, update common data if we'll be calling down chain
5782 bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
Tobin Ehlisd79532b2016-09-26 17:38:00 -06005783 if (!skip) {
Tobin Ehlisd79532b2016-09-26 17:38:00 -06005784 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005785 dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005786 lock.lock();
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005787 PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
Tobin Ehlisd79532b2016-09-26 17:38:00 -06005788 }
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005789}
5790
Tobin Ehlis30df15c2016-10-12 17:17:57 -06005791static bool PreCallValidateDestroyImage(layer_data *dev_data, VkImage image, IMAGE_STATE **image_state, VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06005792 *image_state = getImageState(dev_data, image);
5793 *obj_struct = {reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT};
Chris Forbesa13fe522016-10-13 15:34:59 +13005794 if (dev_data->instance_data->disabled.destroy_image)
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005795 return false;
5796 bool skip = false;
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005797 if (*image_state) {
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005798 skip |= ValidateObjectNotInUse(dev_data, *image_state, *obj_struct, VALIDATION_ERROR_00743);
5799 }
5800 return skip;
5801}
5802
Tobin Ehlis30df15c2016-10-12 17:17:57 -06005803static void PostCallRecordDestroyImage(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VK_OBJECT obj_struct) {
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07005804 invalidateCommandBuffers(dev_data, image_state->cb_bindings, obj_struct);
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005805 // Clean up memory mapping, bindings and range references for image
Tobin Ehlis640a81c2016-11-15 15:37:18 -07005806 for (auto mem_binding : image_state->GetBoundMemory()) {
5807 auto mem_info = getMemObjInfo(dev_data, mem_binding);
5808 if (mem_info) {
5809 RemoveImageMemoryRange(obj_struct.handle, mem_info);
5810 }
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005811 }
Tobin Ehlis2fca09c2016-10-11 18:46:21 -06005812 ClearMemoryObjectBindings(dev_data, obj_struct.handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005813 // Remove image from imageMap
5814 dev_data->imageMap.erase(image);
5815
5816 const auto &sub_entry = dev_data->imageSubresourceMap.find(image);
5817 if (sub_entry != dev_data->imageSubresourceMap.end()) {
5818 for (const auto &pair : sub_entry->second) {
5819 dev_data->imageLayoutMap.erase(pair);
5820 }
5821 dev_data->imageSubresourceMap.erase(sub_entry);
5822 }
5823}
5824
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005825VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005826 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06005827 IMAGE_STATE *image_state = nullptr;
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005828 VK_OBJECT obj_struct;
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06005829 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005830 bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
Tobin Ehlis6fcd6e42016-09-21 14:28:42 -06005831 if (!skip) {
Tobin Ehlis6fcd6e42016-09-21 14:28:42 -06005832 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005833 dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005834 lock.lock();
5835 PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005836 }
5837}
5838
Mark Lobodzinskib5563d32016-06-14 12:11:29 -06005839static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
Mike Weiblendcca8592016-12-15 12:24:24 -07005840 const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
Mark Lobodzinskib5563d32016-06-14 12:11:29 -06005841 bool skip_call = false;
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005842 if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
Mike Weiblendcca8592016-12-15 12:24:24 -07005843 skip_call =
5844 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5845 reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, msgCode, "MT",
5846 "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5847 "type (0x%X) of this memory object 0x%" PRIx64 ". %s",
5848 funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex,
5849 reinterpret_cast<const uint64_t &>(mem_info->mem), validation_error_map[msgCode]);
Mark Lobodzinskib5563d32016-06-14 12:11:29 -06005850 }
5851 return skip_call;
5852}
5853
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005854VKAPI_ATTR VkResult VKAPI_CALL
5855BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
Tobin Ehlisb3593a42016-03-16 16:00:36 -06005856 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005857 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005858 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005859 // Track objects tied to memory
Tobin Ehlise20d21f2016-08-10 18:01:27 -06005860 uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
Tobin Ehlis4ff58172016-09-22 10:52:00 -06005861 bool skip_call = SetMemBinding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
Tobin Ehlis4668dce2016-11-16 09:30:23 -07005862 auto buffer_state = getBufferState(dev_data, buffer);
5863 if (buffer_state) {
Tobin Ehlis8c189702016-11-17 13:39:57 -07005864 if (!buffer_state->memory_requirements_checked) {
5865 // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
5866 // BindBufferMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
5867 // vkGetBufferMemoryRequirements()
5868 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5869 buffer_handle, __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
5870 "vkBindBufferMemory(): Binding memory to buffer 0x%" PRIxLEAST64
5871 " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
5872 buffer_handle);
5873 // Make the call for them so we can verify the state
5874 lock.unlock();
5875 dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, &buffer_state->requirements);
5876 lock.lock();
5877 }
Tobin Ehlis4668dce2016-11-16 09:30:23 -07005878 buffer_state->binding.mem = mem;
5879 buffer_state->binding.offset = memoryOffset;
Tobin Ehlis8c189702016-11-17 13:39:57 -07005880 buffer_state->binding.size = buffer_state->requirements.size;
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005881
5882 // Track and validate bound memory range information
Tobin Ehlis997b2582016-06-02 08:43:37 -06005883 auto mem_info = getMemObjInfo(dev_data, mem);
5884 if (mem_info) {
Tobin Ehlis8c189702016-11-17 13:39:57 -07005885 skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
Mike Weiblendcca8592016-12-15 12:24:24 -07005886 skip_call |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, "vkBindBufferMemory()",
5887 VALIDATION_ERROR_00797);
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005888 }
5889
Dustin Gravesa97c3942016-03-31 18:01:37 -06005890 // Validate memory requirements alignment
Tobin Ehlis8c189702016-11-17 13:39:57 -07005891 if (vk_safe_modulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
Mike Weiblendcca8592016-12-15 12:24:24 -07005892 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5893 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_02174, "DS",
5894 "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5895 "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5896 ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
5897 memoryOffset, buffer_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_02174]);
Dustin Gravesa97c3942016-03-31 18:01:37 -06005898 }
Mark Muellerec0bafe2016-07-07 11:57:59 -06005899
Dustin Gravesa97c3942016-03-31 18:01:37 -06005900 // Validate device limits alignments
Mark Muellerec0bafe2016-07-07 11:57:59 -06005901 static const VkBufferUsageFlagBits usage_list[3] = {
5902 static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5903 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
5904 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5905 static const char *memory_type[3] = {"texel",
5906 "uniform",
5907 "storage"};
5908 static const char *offset_name[3] = {
5909 "minTexelBufferOffsetAlignment",
5910 "minUniformBufferOffsetAlignment",
5911 "minStorageBufferOffsetAlignment"
5912 };
Mike Weiblendcca8592016-12-15 12:24:24 -07005913 static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = {
5914 VALIDATION_ERROR_00794,
5915 VALIDATION_ERROR_00795,
5916 VALIDATION_ERROR_00796
5917 };
Mark Muellerec0bafe2016-07-07 11:57:59 -06005918
5919 // Keep this one fresh!
5920 const VkDeviceSize offset_requirement[3] = {
5921 dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5922 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5923 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment
5924 };
Tobin Ehlis94bc5d22016-06-02 07:46:52 -06005925 VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
Mark Muellerec0bafe2016-07-07 11:57:59 -06005926
5927 for (int i = 0; i < 3; i++) {
5928 if (usage & usage_list[i]) {
5929 if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) {
Mike Weiblendcca8592016-12-15 12:24:24 -07005930 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5931 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, msgCode[i], "DS",
5932 "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5933 "device limit %s 0x%" PRIxLEAST64 ". %s",
5934 memory_type[i], memoryOffset, offset_name[i], offset_requirement[i],
5935 validation_error_map[msgCode[i]]);
Mark Muellerec0bafe2016-07-07 11:57:59 -06005936 }
Dustin Gravesa97c3942016-03-31 18:01:37 -06005937 }
5938 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005939 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005940 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06005941 if (!skip_call) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13005942 result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005943 }
5944 return result;
5945}
5946
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005947VKAPI_ATTR void VKAPI_CALL
5948GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
Tobin Ehlis530bb0a2016-11-16 08:57:22 -07005949 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5950 dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07005951 auto buffer_state = getBufferState(dev_data, buffer);
Tobin Ehlis530bb0a2016-11-16 08:57:22 -07005952 if (buffer_state) {
5953 buffer_state->requirements = *pMemoryRequirements;
Tobin Ehlis8c189702016-11-17 13:39:57 -07005954 buffer_state->memory_requirements_checked = true;
Tobin Ehlis530bb0a2016-11-16 08:57:22 -07005955 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005956}
5957
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005958VKAPI_ATTR void VKAPI_CALL
5959GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
Tobin Ehlis530bb0a2016-11-16 08:57:22 -07005960 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5961 dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
5962 auto image_state = getImageState(dev_data, image);
5963 if (image_state) {
5964 image_state->requirements = *pMemoryRequirements;
Tobin Ehlis8c189702016-11-17 13:39:57 -07005965 image_state->memory_requirements_checked = true;
Tobin Ehlis530bb0a2016-11-16 08:57:22 -07005966 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005967}
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06005968
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005969static bool PreCallValidateDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE **image_view_state,
5970 VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06005971 *image_view_state = getImageViewState(dev_data, image_view);
5972 *obj_struct = {reinterpret_cast<uint64_t &>(image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT};
Chris Forbesa13fe522016-10-13 15:34:59 +13005973 if (dev_data->instance_data->disabled.destroy_image_view)
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005974 return false;
5975 bool skip = false;
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005976 if (*image_view_state) {
Tobin Ehlis4a98a712016-10-12 09:58:35 -06005977 skip |= ValidateObjectNotInUse(dev_data, *image_view_state, *obj_struct, VALIDATION_ERROR_00776);
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005978 }
5979 return skip;
5980}
5981
5982static void PostCallRecordDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE *image_view_state,
5983 VK_OBJECT obj_struct) {
5984 // Any bound cmd buffers are now invalid
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07005985 invalidateCommandBuffers(dev_data, image_view_state->cb_bindings, obj_struct);
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005986 dev_data->imageViewMap.erase(image_view);
5987}
5988
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005989VKAPI_ATTR void VKAPI_CALL
5990DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisdd9830a2016-09-19 13:10:37 -06005991 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005992 // Common data objects used pre & post call
5993 IMAGE_VIEW_STATE *image_view_state = nullptr;
5994 VK_OBJECT obj_struct;
Tobin Ehlisd4aef972016-10-12 08:53:27 -06005995 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005996 bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
Tobin Ehlisdd9830a2016-09-19 13:10:37 -06005997 if (!skip) {
Tobin Ehlisdd9830a2016-09-19 13:10:37 -06005998 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005999 dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06006000 lock.lock();
6001 PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
Tobin Ehlisdd9830a2016-09-19 13:10:37 -06006002 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006003}
6004
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006005VKAPI_ATTR void VKAPI_CALL
6006DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
Chris Forbes90da2e92016-03-18 16:30:03 +13006007 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6008
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006009 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbes90da2e92016-03-18 16:30:03 +13006010 my_data->shaderModuleMap.erase(shaderModule);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006011 lock.unlock();
Chris Forbes90da2e92016-03-18 16:30:03 +13006012
Chris Forbesaaa9c282016-10-03 20:01:14 +13006013 my_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006014}
6015
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006016static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06006017 VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06006018 *pipeline_state = getPipelineState(dev_data, pipeline);
6019 *obj_struct = {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT};
Chris Forbesa13fe522016-10-13 15:34:59 +13006020 if (dev_data->instance_data->disabled.destroy_pipeline)
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06006021 return false;
6022 bool skip = false;
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06006023 if (*pipeline_state) {
Tobin Ehlis4a98a712016-10-12 09:58:35 -06006024 skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_00555);
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06006025 }
6026 return skip;
6027}
6028
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006029static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06006030 VK_OBJECT obj_struct) {
6031 // Any bound cmd buffers are now invalid
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07006032 invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06006033 dev_data->pipelineMap.erase(pipeline);
6034}
6035
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006036VKAPI_ATTR void VKAPI_CALL
6037DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisaa0bc162016-07-08 15:42:38 -06006038 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006039 PIPELINE_STATE *pipeline_state = nullptr;
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06006040 VK_OBJECT obj_struct;
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06006041 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06006042 bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06006043 if (!skip) {
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06006044 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13006045 dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06006046 lock.lock();
6047 PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06006048 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006049}
6050
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006051VKAPI_ATTR void VKAPI_CALL
6052DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlis7e5b3752016-07-07 12:04:20 -06006053 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis3b4926e2016-07-07 13:40:29 -06006054 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis7e5b3752016-07-07 12:04:20 -06006055 dev_data->pipelineLayoutMap.erase(pipelineLayout);
Tobin Ehlis3b4926e2016-07-07 13:40:29 -06006056 lock.unlock();
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06006057
Chris Forbesaaa9c282016-10-03 20:01:14 +13006058 dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006059}
6060
Tobin Ehlisfad7adf2016-10-20 06:50:37 -06006061static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
Tobin Ehlise030b102016-10-20 06:35:39 -06006062 VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06006063 *sampler_state = getSamplerState(dev_data, sampler);
6064 *obj_struct = {reinterpret_cast<uint64_t &>(sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT};
Tobin Ehlise030b102016-10-20 06:35:39 -06006065 if (dev_data->instance_data->disabled.destroy_sampler)
6066 return false;
6067 bool skip = false;
Tobin Ehlise030b102016-10-20 06:35:39 -06006068 if (*sampler_state) {
Tobin Ehlise030b102016-10-20 06:35:39 -06006069 skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_00837);
6070 }
6071 return skip;
6072}
6073
Tobin Ehlisfad7adf2016-10-20 06:50:37 -06006074static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
Tobin Ehlise030b102016-10-20 06:35:39 -06006075 VK_OBJECT obj_struct) {
6076 // Any bound cmd buffers are now invalid
Tobin Ehlisd3190872016-10-25 21:47:46 -06006077 if (sampler_state)
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07006078 invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
Tobin Ehlise030b102016-10-20 06:35:39 -06006079 dev_data->samplerMap.erase(sampler);
6080}
6081
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006082VKAPI_ATTR void VKAPI_CALL
6083DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlis8020eea2016-08-17 11:10:41 -06006084 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisfad7adf2016-10-20 06:50:37 -06006085 SAMPLER_STATE *sampler_state = nullptr;
Tobin Ehlise030b102016-10-20 06:35:39 -06006086 VK_OBJECT obj_struct;
Tobin Ehlis8020eea2016-08-17 11:10:41 -06006087 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlise030b102016-10-20 06:35:39 -06006088 bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06006089 if (!skip) {
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06006090 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13006091 dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
Tobin Ehlise030b102016-10-20 06:35:39 -06006092 lock.lock();
6093 PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06006094 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006095}
6096
Tobin Ehlis2c140ae2016-12-19 11:16:14 -07006097static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
6098 dev_data->descriptorSetLayoutMap.erase(ds_layout);
6099}
6100
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006101VKAPI_ATTR void VKAPI_CALL
6102DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlis2c140ae2016-12-19 11:16:14 -07006103 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6104 dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6105 std::unique_lock<std::mutex> lock(global_lock);
6106 PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006107}
6108
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006109static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06006110 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06006111 *desc_pool_state = getDescriptorPoolState(dev_data, pool);
6112 *obj_struct = {reinterpret_cast<uint64_t &>(pool), VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT};
Chris Forbesa13fe522016-10-13 15:34:59 +13006113 if (dev_data->instance_data->disabled.destroy_descriptor_pool)
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006114 return false;
6115 bool skip = false;
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006116 if (*desc_pool_state) {
Tobin Ehlis4a98a712016-10-12 09:58:35 -06006117 skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_00901);
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006118 }
6119 return skip;
6120}
6121
6122static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06006123 DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006124 // Any bound cmd buffers are now invalid
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07006125 invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006126 // Free sets that were in this pool
6127 for (auto ds : desc_pool_state->sets) {
6128 freeDescriptorSet(dev_data, ds);
6129 }
6130 dev_data->descriptorPoolMap.erase(descriptorPool);
6131}
6132
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006133VKAPI_ATTR void VKAPI_CALL
6134DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006135 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06006136 DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006137 VK_OBJECT obj_struct;
6138 std::unique_lock<std::mutex> lock(global_lock);
6139 bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
6140 if (!skip) {
6141 lock.unlock();
6142 dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
6143 lock.lock();
6144 PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
6145 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006146}
Tobin Ehlis739d62a2016-04-14 12:22:03 -06006147// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
6148// If this is a secondary command buffer, then make sure its primary is also in-flight
6149// If primary is not in-flight, then remove secondary from global in-flight set
6150// This function is only valid at a point when cmdBuffer is being reset or freed
Tobin Ehlis9a9f7a22016-10-20 07:43:15 -06006151static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
6152 UNIQUE_VALIDATION_ERROR_CODE error_code) {
Tobin Ehlis739d62a2016-04-14 12:22:03 -06006153 bool skip_call = false;
6154 if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
6155 // Primary CB or secondary where primary is also in-flight is an error
6156 if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
6157 (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
Tobin Ehlis9a9f7a22016-10-20 07:43:15 -06006158 skip_call |=
6159 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6160 reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, error_code, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07006161 "Attempt to %s command buffer (0x%p) which is in use. %s", action, cb_node->commandBuffer,
6162 validation_error_map[error_code]);
Tobin Ehlis739d62a2016-04-14 12:22:03 -06006163 }
6164 }
6165 return skip_call;
6166}
Chris Forbese30fb982016-06-21 12:35:16 +12006167
Tobin Ehlis739d62a2016-04-14 12:22:03 -06006168// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
Tobin Ehlis9a9f7a22016-10-20 07:43:15 -06006169static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
6170 UNIQUE_VALIDATION_ERROR_CODE error_code) {
Tobin Ehlis739d62a2016-04-14 12:22:03 -06006171 bool skip_call = false;
Chris Forbes07811fa2016-06-21 13:18:44 +12006172 for (auto cmd_buffer : pPool->commandBuffers) {
6173 if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
Tobin Ehlis9a9f7a22016-10-20 07:43:15 -06006174 skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action, error_code);
Tobin Ehlis739d62a2016-04-14 12:22:03 -06006175 }
6176 }
6177 return skip_call;
6178}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006179
Chris Forbes07811fa2016-06-21 13:18:44 +12006180static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
6181 for (auto cmd_buffer : pPool->commandBuffers) {
6182 dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
Chris Forbese30fb982016-06-21 12:35:16 +12006183 }
6184}
6185
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006186VKAPI_ATTR void VKAPI_CALL
6187FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006188 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006189 bool skip_call = false;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006190 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbes4e355d02016-06-21 13:24:04 +12006191
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006192 for (uint32_t i = 0; i < commandBufferCount; i++) {
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006193 auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006194 // Delete CB information structure, and remove from commandBufferMap
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006195 if (cb_node) {
Tobin Ehlis9a9f7a22016-10-20 07:43:15 -06006196 skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_00096);
Chris Forbes4e355d02016-06-21 13:24:04 +12006197 }
6198 }
Chris Forbes4e355d02016-06-21 13:24:04 +12006199
6200 if (skip_call)
6201 return;
6202
Chris Forbes4e355d02016-06-21 13:24:04 +12006203 auto pPool = getCommandPoolNode(dev_data, commandPool);
6204 for (uint32_t i = 0; i < commandBufferCount; i++) {
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006205 auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
Chris Forbes4e355d02016-06-21 13:24:04 +12006206 // Delete CB information structure, and remove from commandBufferMap
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006207 if (cb_node) {
6208 dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006209 // reset prior to delete for data clean-up
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006210 resetCB(dev_data, cb_node->commandBuffer);
6211 dev_data->commandBufferMap.erase(cb_node->commandBuffer);
6212 delete cb_node;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006213 }
6214
6215 // Remove commandBuffer reference from commandPoolMap
Chris Forbes4e355d02016-06-21 13:24:04 +12006216 pPool->commandBuffers.remove(pCommandBuffers[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006217 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006218 lock.unlock();
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06006219
Chris Forbesaaa9c282016-10-03 20:01:14 +13006220 dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006221}
6222
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006223VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6224 const VkAllocationCallbacks *pAllocator,
6225 VkCommandPool *pCommandPool) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006226 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6227
Chris Forbesaaa9c282016-10-03 20:01:14 +13006228 VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006229
6230 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006231 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006232 dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6233 dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006234 }
6235 return result;
6236}
6237
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006238VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6239 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006240
6241 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis8a7519b2016-11-21 12:30:06 -07006242 bool skip = false;
6243 if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
6244 if (!dev_data->enabled_features.pipelineStatisticsQuery) {
6245 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
6246 __LINE__, VALIDATION_ERROR_01006, "DS",
6247 "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device "
6248 "with VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE. %s",
6249 validation_error_map[VALIDATION_ERROR_01006]);
6250 }
6251 }
6252
6253 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6254 if (!skip) {
6255 result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6256 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006257 if (result == VK_SUCCESS) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006258 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlis7d1dd142016-08-18 08:23:30 -06006259 QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
6260 qp_node->createInfo = *pCreateInfo;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006261 }
6262 return result;
6263}
6264
Tobin Ehlisa4b4ce22016-10-20 09:40:35 -06006265static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06006266 *cp_state = getCommandPoolNode(dev_data, pool);
Tobin Ehlisa4b4ce22016-10-20 09:40:35 -06006267 if (dev_data->instance_data->disabled.destroy_command_pool)
6268 return false;
6269 bool skip = false;
Tobin Ehlisa4b4ce22016-10-20 09:40:35 -06006270 if (*cp_state) {
6271 // Verify that command buffers in pool are complete (not in-flight)
6272 skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_00077);
6273 }
6274 return skip;
6275}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006276
Tobin Ehlisa4b4ce22016-10-20 09:40:35 -06006277static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) {
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006278 // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
Tobin Ehlisa4b4ce22016-10-20 09:40:35 -06006279 clearCommandBuffersInFlight(dev_data, cp_state);
6280 for (auto cb : cp_state->commandBuffers) {
Chris Forbes07811fa2016-06-21 13:18:44 +12006281 clear_cmd_buf_and_mem_references(dev_data, cb);
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006282 auto cb_node = getCBNode(dev_data, cb);
Tobin Ehlis1589d542016-07-22 16:55:19 -06006283 // Remove references to this cb_node prior to delete
6284 // TODO : Need better solution here, resetCB?
Mark Lobodzinski3c9a3842016-07-15 13:53:44 -06006285 for (auto obj : cb_node->object_bindings) {
6286 removeCommandBufferBinding(dev_data, &obj, cb_node);
6287 }
Tobin Ehlis1589d542016-07-22 16:55:19 -06006288 for (auto framebuffer : cb_node->framebuffers) {
Tobin Ehlis04c04272016-10-12 11:54:09 -06006289 auto fb_state = getFramebufferState(dev_data, framebuffer);
6290 if (fb_state)
6291 fb_state->cb_bindings.erase(cb_node);
Tobin Ehlis1589d542016-07-22 16:55:19 -06006292 }
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006293 dev_data->commandBufferMap.erase(cb); // Remove this command buffer
6294 delete cb_node; // delete CB info structure
Chris Forbes07811fa2016-06-21 13:18:44 +12006295 }
Tobin Ehlisa4b4ce22016-10-20 09:40:35 -06006296 dev_data->commandPoolMap.erase(pool);
6297}
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06006298
Tobin Ehlisa4b4ce22016-10-20 09:40:35 -06006299// Destroy commandPool along with all of the commandBuffers allocated from that pool
6300VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6301 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6302 COMMAND_POOL_NODE *cp_state = nullptr;
6303 std::unique_lock<std::mutex> lock(global_lock);
6304 bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state);
6305 if (!skip) {
6306 lock.unlock();
6307 dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
6308 lock.lock();
6309 PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state);
6310 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006311}
6312
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006313VKAPI_ATTR VkResult VKAPI_CALL
6314ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006315 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisfe871282016-06-28 10:28:02 -06006316 bool skip_call = false;
Tobin Ehlis4c522322016-04-11 16:39:29 -06006317
Chris Forbes6be1b5a2016-06-21 18:17:41 +12006318 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbes07811fa2016-06-21 13:18:44 +12006319 auto pPool = getCommandPoolNode(dev_data, commandPool);
Tobin Ehlis9a9f7a22016-10-20 07:43:15 -06006320 skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_00072);
Chris Forbes6be1b5a2016-06-21 18:17:41 +12006321 lock.unlock();
Chris Forbes07811fa2016-06-21 13:18:44 +12006322
Tobin Ehlisfe871282016-06-28 10:28:02 -06006323 if (skip_call)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006324 return VK_ERROR_VALIDATION_FAILED_EXT;
6325
Chris Forbesaaa9c282016-10-03 20:01:14 +13006326 VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006327
6328 // Reset all of the CBs allocated from this pool
6329 if (VK_SUCCESS == result) {
Chris Forbes6be1b5a2016-06-21 18:17:41 +12006330 lock.lock();
Chris Forbes07811fa2016-06-21 13:18:44 +12006331 clearCommandBuffersInFlight(dev_data, pPool);
6332 for (auto cmdBuffer : pPool->commandBuffers) {
6333 resetCB(dev_data, cmdBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006334 }
Chris Forbes6be1b5a2016-06-21 18:17:41 +12006335 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006336 }
6337 return result;
6338}
6339
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006340VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006341 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisfe871282016-06-28 10:28:02 -06006342 bool skip_call = false;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006343 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006344 for (uint32_t i = 0; i < fenceCount; ++i) {
Chris Forbes0111ee62016-06-15 15:48:52 +12006345 auto pFence = getFenceNode(dev_data, pFences[i]);
Chris Forbes9c457b92016-06-21 18:10:47 +12006346 if (pFence && pFence->state == FENCE_INFLIGHT) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06006347 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
Dave Houlton197211a2016-12-23 15:26:29 -07006348 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, VALIDATION_ERROR_00183, "DS",
6349 "Fence 0x%" PRIx64 " is in use. %s", reinterpret_cast<const uint64_t &>(pFences[i]),
6350 validation_error_map[VALIDATION_ERROR_00183]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006351 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006352 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006353 lock.unlock();
Chris Forbes9c457b92016-06-21 18:10:47 +12006354
Tobin Ehlisfe871282016-06-28 10:28:02 -06006355 if (skip_call)
Chris Forbes9c457b92016-06-21 18:10:47 +12006356 return VK_ERROR_VALIDATION_FAILED_EXT;
6357
Chris Forbesaaa9c282016-10-03 20:01:14 +13006358 VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
Chris Forbes9c457b92016-06-21 18:10:47 +12006359
6360 if (result == VK_SUCCESS) {
6361 lock.lock();
6362 for (uint32_t i = 0; i < fenceCount; ++i) {
6363 auto pFence = getFenceNode(dev_data, pFences[i]);
6364 if (pFence) {
6365 pFence->state = FENCE_UNSIGNALED;
Chris Forbes9c457b92016-06-21 18:10:47 +12006366 }
6367 }
6368 lock.unlock();
6369 }
6370
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006371 return result;
6372}
6373
Tobin Ehlis2556f5b2016-06-24 17:22:16 -06006374// For given cb_nodes, invalidate them and track object causing invalidation
Tobin Ehlisab294d82016-11-21 15:23:51 -07006375void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
Tobin Ehlis2556f5b2016-06-24 17:22:16 -06006376 for (auto cb_node : cb_nodes) {
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07006377 if (cb_node->state == CB_RECORDING) {
6378 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Tobin Ehlisfda58e22016-11-21 15:15:52 -07006379 (uint64_t)(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07006380 "Invalidating a command buffer that's currently being recorded: 0x%p.", cb_node->commandBuffer);
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07006381 }
Tobin Ehlis2556f5b2016-06-24 17:22:16 -06006382 cb_node->state = CB_INVALID;
6383 cb_node->broken_bindings.push_back(obj);
6384 }
6385}
6386
Tobin Ehlis04c04272016-10-12 11:54:09 -06006387static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
6388 FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06006389 *framebuffer_state = getFramebufferState(dev_data, framebuffer);
6390 *obj_struct = {reinterpret_cast<uint64_t &>(framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT};
Chris Forbesa13fe522016-10-13 15:34:59 +13006391 if (dev_data->instance_data->disabled.destroy_framebuffer)
Tobin Ehlis53332f02016-10-12 11:48:21 -06006392 return false;
6393 bool skip = false;
Tobin Ehlis53332f02016-10-12 11:48:21 -06006394 if (*framebuffer_state) {
Tobin Ehlis53332f02016-10-12 11:48:21 -06006395 skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_00422);
6396 }
6397 return skip;
6398}
6399
Tobin Ehlis04c04272016-10-12 11:54:09 -06006400static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
Tobin Ehlis53332f02016-10-12 11:48:21 -06006401 VK_OBJECT obj_struct) {
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07006402 invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
Tobin Ehlis53332f02016-10-12 11:48:21 -06006403 dev_data->frameBufferMap.erase(framebuffer);
6404}
6405
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006406VKAPI_ATTR void VKAPI_CALL
6407DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006408 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis04c04272016-10-12 11:54:09 -06006409 FRAMEBUFFER_STATE *framebuffer_state = nullptr;
Tobin Ehlis53332f02016-10-12 11:48:21 -06006410 VK_OBJECT obj_struct;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006411 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis53332f02016-10-12 11:48:21 -06006412 bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
6413 if (!skip) {
6414 lock.unlock();
6415 dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
6416 lock.lock();
6417 PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006418 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006419}
6420
Tobin Ehlis062595b2016-10-12 16:58:54 -06006421static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
6422 VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06006423 *rp_state = getRenderPassState(dev_data, render_pass);
6424 *obj_struct = {reinterpret_cast<uint64_t &>(render_pass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT};
Chris Forbesa13fe522016-10-13 15:34:59 +13006425 if (dev_data->instance_data->disabled.destroy_renderpass)
Tobin Ehlis062595b2016-10-12 16:58:54 -06006426 return false;
6427 bool skip = false;
Tobin Ehlis062595b2016-10-12 16:58:54 -06006428 if (*rp_state) {
Tobin Ehlis062595b2016-10-12 16:58:54 -06006429 skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_00393);
6430 }
6431 return skip;
6432}
6433
6434static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
6435 VK_OBJECT obj_struct) {
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07006436 invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
Tobin Ehlis062595b2016-10-12 16:58:54 -06006437 dev_data->renderPassMap.erase(render_pass);
6438}
6439
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006440VKAPI_ATTR void VKAPI_CALL
6441DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006442 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis062595b2016-10-12 16:58:54 -06006443 RENDER_PASS_STATE *rp_state = nullptr;
6444 VK_OBJECT obj_struct;
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06006445 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis062595b2016-10-12 16:58:54 -06006446 bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
Tobin Ehlisa5495242016-09-19 14:20:37 -06006447 if (!skip) {
Tobin Ehlisa5495242016-09-19 14:20:37 -06006448 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13006449 dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
Tobin Ehlis062595b2016-10-12 16:58:54 -06006450 lock.lock();
6451 PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
Tobin Ehlisa5495242016-09-19 14:20:37 -06006452 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006453}
6454
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006455VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6456 const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006457 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Karl Schultza9ef1e52016-10-06 17:53:48 -06006458 // TODO: Add check for VALIDATION_ERROR_00658
6459 // TODO: Add check for VALIDATION_ERROR_00666
6460 // TODO: Add check for VALIDATION_ERROR_00667
6461 // TODO: Add check for VALIDATION_ERROR_00668
6462 // TODO: Add check for VALIDATION_ERROR_00669
Chris Forbesaaa9c282016-10-03 20:01:14 +13006463 VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006464
6465 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006466 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006467 // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
Tobin Ehlis4668dce2016-11-16 09:30:23 -07006468 dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_STATE>(new BUFFER_STATE(*pBuffer, pCreateInfo))));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006469 }
6470 return result;
6471}
6472
Mark Youngd339ba32016-05-30 13:28:35 -06006473static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) {
6474 bool skip_call = false;
Tobin Ehlis4668dce2016-11-16 09:30:23 -07006475 BUFFER_STATE *buffer_state = getBufferState(dev_data, pCreateInfo->buffer);
Mark Youngd339ba32016-05-30 13:28:35 -06006476 // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
Tobin Ehlis4668dce2016-11-16 09:30:23 -07006477 if (buffer_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07006478 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCreateBufferView()", VALIDATION_ERROR_02522);
Tobin Ehlis18bca092016-06-29 09:07:52 -06006479 // In order to create a valid buffer view, the buffer must have been created with at least one of the
6480 // following flags: UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07006481 skip_call |= ValidateBufferUsageFlags(
Tobin Ehlis4668dce2016-11-16 09:30:23 -07006482 dev_data, buffer_state, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07006483 VALIDATION_ERROR_00694, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -06006484 }
Mark Youngd339ba32016-05-30 13:28:35 -06006485 return skip_call;
6486}
6487
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006488VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6489 const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006490 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Mark Youngd339ba32016-05-30 13:28:35 -06006491 std::unique_lock<std::mutex> lock(global_lock);
6492 bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
6493 lock.unlock();
6494 if (skip_call)
6495 return VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbesaaa9c282016-10-03 20:01:14 +13006496 VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006497 if (VK_SUCCESS == result) {
Mark Youngd339ba32016-05-30 13:28:35 -06006498 lock.lock();
Tobin Ehlis8b872462016-09-14 08:12:08 -06006499 dev_data->bufferViewMap[*pView] = unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
Mark Youngd339ba32016-05-30 13:28:35 -06006500 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006501 }
6502 return result;
6503}
6504
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006505VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6506 const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006507 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Mark Lobodzinskif20f0942016-03-22 10:07:26 -06006508
Chris Forbesaaa9c282016-10-03 20:01:14 +13006509 VkResult result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006510
6511 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006512 std::lock_guard<std::mutex> lock(global_lock);
Mark Lobodzinskibf93db72017-01-17 16:24:29 -07006513 PostCallRecordCreateImage(&dev_data->imageMap, &dev_data->imageSubresourceMap, &dev_data->imageLayoutMap, pCreateInfo,
6514 pImage);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006515 }
6516 return result;
6517}
6518
6519static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07006520 // Expects global_lock to be held by caller
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006521
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006522 auto image_state = getImageState(dev_data, image);
6523 if (image_state) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07006524 // If the caller used the special values VK_REMAINING_MIP_LEVELS and VK_REMAINING_ARRAY_LAYERS, resolve them now in our
6525 // internal state to the actual values.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006526 if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006527 range->levelCount = image_state->createInfo.mipLevels - range->baseMipLevel;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006528 }
6529
6530 if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006531 range->layerCount = image_state->createInfo.arrayLayers - range->baseArrayLayer;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006532 }
6533 }
6534}
6535
6536// Return the correct layer/level counts if the caller used the special
6537// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6538static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6539 VkImage image) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07006540 // Expects global_lock to be held by caller
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006541
6542 *levels = range.levelCount;
6543 *layers = range.layerCount;
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006544 auto image_state = getImageState(dev_data, image);
6545 if (image_state) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006546 if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006547 *levels = image_state->createInfo.mipLevels - range.baseMipLevel;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006548 }
6549 if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006550 *layers = image_state->createInfo.arrayLayers - range.baseArrayLayer;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006551 }
6552 }
6553}
6554
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006555// For the given format verify that the aspect masks make sense
6556static bool ValidateImageAspectMask(layer_data *dev_data, VkImage image, VkFormat format, VkImageAspectFlags aspect_mask,
6557 const char *func_name) {
6558 bool skip = false;
6559 if (vk_format_is_color(format)) {
6560 if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
6561 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6562 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6563 "%s: Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name,
6564 validation_error_map[VALIDATION_ERROR_00741]);
Tobin Ehlisa4306ef2017-01-02 10:04:56 -07006565 } else if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006566 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6567 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6568 "%s: Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name,
6569 validation_error_map[VALIDATION_ERROR_00741]);
6570 }
6571 } else if (vk_format_is_depth_and_stencil(format)) {
6572 if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) {
6573 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6574 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", "%s: Depth/stencil image formats must have "
6575 "at least one of VK_IMAGE_ASPECT_DEPTH_BIT "
6576 "and VK_IMAGE_ASPECT_STENCIL_BIT set. %s",
6577 func_name, validation_error_map[VALIDATION_ERROR_00741]);
Tobin Ehlisa4306ef2017-01-02 10:04:56 -07006578 } else if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006579 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6580 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6581 "%s: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and "
6582 "VK_IMAGE_ASPECT_STENCIL_BIT set. %s",
6583 func_name, validation_error_map[VALIDATION_ERROR_00741]);
6584 }
6585 } else if (vk_format_is_depth_only(format)) {
6586 if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) {
6587 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6588 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6589 "%s: Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name,
6590 validation_error_map[VALIDATION_ERROR_00741]);
Tobin Ehlisa4306ef2017-01-02 10:04:56 -07006591 } else if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006592 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6593 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6594 "%s: Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name,
6595 validation_error_map[VALIDATION_ERROR_00741]);
6596 }
6597 } else if (vk_format_is_stencil_only(format)) {
6598 if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) {
6599 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6600 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6601 "%s: Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name,
6602 validation_error_map[VALIDATION_ERROR_00741]);
Tobin Ehlisa4306ef2017-01-02 10:04:56 -07006603 } else if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006604 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6605 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6606 "%s: Stencil-only image formats can have only the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name,
6607 validation_error_map[VALIDATION_ERROR_00741]);
6608 }
6609 }
6610 return skip;
6611}
6612
Tobin Ehlis16239872016-10-26 10:42:49 -06006613static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info) {
Tobin Ehlis4ad9d852016-10-26 08:04:58 -06006614 bool skip = false;
Tobin Ehlis16239872016-10-26 10:42:49 -06006615 IMAGE_STATE *image_state = getImageState(dev_data, create_info->image);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006616 if (image_state) {
Tobin Ehlis4ad9d852016-10-26 08:04:58 -06006617 skip |= ValidateImageUsageFlags(
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006618 dev_data, image_state, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
6619 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07006620 false, -1, "vkCreateImageView()",
Tony Barbour311dcbe2016-08-26 13:01:43 -06006621 "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT]_BIT");
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -06006622 // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
Tobin Ehlise1995fc2016-12-22 12:45:09 -07006623 skip |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCreateImageView()", VALIDATION_ERROR_02524);
Tobin Ehlis16239872016-10-26 10:42:49 -06006624 // Checks imported from image layer
6625 if (create_info->subresourceRange.baseMipLevel >= image_state->createInfo.mipLevels) {
6626 std::stringstream ss;
6627 ss << "vkCreateImageView called with baseMipLevel " << create_info->subresourceRange.baseMipLevel << " for image "
6628 << create_info->image << " that only has " << image_state->createInfo.mipLevels << " mip levels.";
6629 skip |=
6630 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6631 VALIDATION_ERROR_00768, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00768]);
6632 }
6633 if (create_info->subresourceRange.baseArrayLayer >= image_state->createInfo.arrayLayers) {
6634 std::stringstream ss;
6635 ss << "vkCreateImageView called with baseArrayLayer " << create_info->subresourceRange.baseArrayLayer << " for image "
6636 << create_info->image << " that only has " << image_state->createInfo.arrayLayers << " array layers.";
6637 skip |=
6638 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6639 VALIDATION_ERROR_00769, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00769]);
6640 }
6641 // TODO: Need new valid usage language for levelCount == 0 & layerCount == 0
6642 if (!create_info->subresourceRange.levelCount) {
6643 std::stringstream ss;
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006644 ss << "vkCreateImageView called with 0 in pCreateInfo->subresourceRange.levelCount.";
Tobin Ehlis16239872016-10-26 10:42:49 -06006645 skip |=
6646 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6647 VALIDATION_ERROR_00768, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00768]);
6648 }
6649 if (!create_info->subresourceRange.layerCount) {
6650 std::stringstream ss;
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006651 ss << "vkCreateImageView called with 0 in pCreateInfo->subresourceRange.layerCount.";
Tobin Ehlis16239872016-10-26 10:42:49 -06006652 skip |=
6653 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6654 VALIDATION_ERROR_00769, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00769]);
6655 }
6656
6657 VkImageCreateFlags image_flags = image_state->createInfo.flags;
6658 VkFormat image_format = image_state->createInfo.format;
6659 VkFormat view_format = create_info->format;
6660 VkImageAspectFlags aspect_mask = create_info->subresourceRange.aspectMask;
6661
6662 // Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state
6663 if (image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
6664 // Format MUST be compatible (in the same format compatibility class) as the format the image was created with
6665 if (vk_format_get_compatibility_class(image_format) != vk_format_get_compatibility_class(view_format)) {
6666 std::stringstream ss;
6667 ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
6668 << " is not in the same format compatibility class as image (" << (uint64_t)create_info->image << ") format "
6669 << string_VkFormat(image_format) << ". Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT "
6670 << "can support ImageViews with differing formats but they must be in the same compatibility class.";
6671 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6672 VALIDATION_ERROR_02171, "IMAGE", "%s %s", ss.str().c_str(),
6673 validation_error_map[VALIDATION_ERROR_02171]);
6674 }
6675 } else {
6676 // Format MUST be IDENTICAL to the format the image was created with
6677 if (image_format != view_format) {
6678 std::stringstream ss;
6679 ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from image "
6680 << (uint64_t)create_info->image << " format " << string_VkFormat(image_format)
6681 << ". Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.";
6682 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6683 VALIDATION_ERROR_02172, "IMAGE", "%s %s", ss.str().c_str(),
6684 validation_error_map[VALIDATION_ERROR_02172]);
6685 }
6686 }
6687
6688 // Validate correct image aspect bits for desired formats and format consistency
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006689 skip |= ValidateImageAspectMask(dev_data, image_state->image, image_format, aspect_mask, "vkCreateImageView()");
Mark Youngd339ba32016-05-30 13:28:35 -06006690 }
Tobin Ehlis4ad9d852016-10-26 08:04:58 -06006691 return skip;
Mark Youngd339ba32016-05-30 13:28:35 -06006692}
6693
Tobin Ehlis16239872016-10-26 10:42:49 -06006694static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info, VkImageView view) {
6695 dev_data->imageViewMap[view] = unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, create_info));
6696 ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[view].get()->create_info.subresourceRange, create_info->image);
Mark Youngd339ba32016-05-30 13:28:35 -06006697}
6698
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006699VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6700 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006701 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Mark Youngd339ba32016-05-30 13:28:35 -06006702 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis4ad9d852016-10-26 08:04:58 -06006703 bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
Mark Youngd339ba32016-05-30 13:28:35 -06006704 lock.unlock();
Tobin Ehlis4ad9d852016-10-26 08:04:58 -06006705 if (skip)
Mark Youngd339ba32016-05-30 13:28:35 -06006706 return VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbesaaa9c282016-10-03 20:01:14 +13006707 VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006708 if (VK_SUCCESS == result) {
Mark Youngd339ba32016-05-30 13:28:35 -06006709 lock.lock();
Tobin Ehlis8b26a382016-09-14 08:02:49 -06006710 PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
Mark Youngd339ba32016-05-30 13:28:35 -06006711 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006712 }
Chris Forbes86c586a2016-05-08 10:19:14 +12006713
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006714 return result;
6715}
6716
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006717VKAPI_ATTR VkResult VKAPI_CALL
6718CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006719 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13006720 VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006721 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006722 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06006723 auto &fence_node = dev_data->fenceMap[*pFence];
Chris Forbesbc0b8212016-06-10 15:22:37 +12006724 fence_node.fence = *pFence;
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06006725 fence_node.createInfo = *pCreateInfo;
Chris Forbesff96dcd2016-06-16 11:47:24 +12006726 fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006727 }
6728 return result;
6729}
6730
6731// TODO handle pipeline caches
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006732VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6733 const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006734 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13006735 VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006736 return result;
6737}
6738
6739VKAPI_ATTR void VKAPI_CALL
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006740DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006741 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13006742 dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006743}
6744
6745VKAPI_ATTR VkResult VKAPI_CALL
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006746GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006747 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13006748 VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006749 return result;
6750}
6751
6752VKAPI_ATTR VkResult VKAPI_CALL
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006753MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006754 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13006755 VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006756 return result;
6757}
6758
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06006759// utility function to set collective state for pipeline
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006760void set_pipeline_state(PIPELINE_STATE *pPipe) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06006761 // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6762 if (pPipe->graphicsPipelineCI.pColorBlendState) {
6763 for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6764 if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6765 if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6766 (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6767 ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6768 (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6769 ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6770 (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6771 ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6772 (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6773 pPipe->blendConstantsEnabled = true;
6774 }
6775 }
6776 }
6777 }
6778}
6779
Mark Lobodzinski7f170b32016-11-16 10:05:30 -07006780static bool PreCallCreateGraphicsPipelines(layer_data *device_data, uint32_t count,
6781 const VkGraphicsPipelineCreateInfo *create_infos, vector<PIPELINE_STATE *> &pipe_state) {
6782 bool skip = false;
Mark Lobodzinskica60e142016-11-16 11:12:30 -07006783 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
Mark Lobodzinski7f170b32016-11-16 10:05:30 -07006784
6785 for (uint32_t i = 0; i < count; i++) {
6786 skip |= verifyPipelineCreateState(device_data, pipe_state, i);
Mark Lobodzinskica60e142016-11-16 11:12:30 -07006787 if (create_infos[i].pVertexInputState != NULL) {
6788 for (uint32_t j = 0; j < create_infos[i].pVertexInputState->vertexAttributeDescriptionCount; j++) {
6789 VkFormat format = create_infos[i].pVertexInputState->pVertexAttributeDescriptions[j].format;
6790 // Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
6791 VkFormatProperties properties;
6792 instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &properties);
6793 if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
6794 skip |= log_msg(
6795 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6796 __LINE__, VALIDATION_ERROR_01413, "IMAGE",
6797 "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
6798 "(%s) is not a supported vertex buffer format. %s",
6799 i, j, string_VkFormat(format), validation_error_map[VALIDATION_ERROR_01413]);
6800 }
6801 }
6802 }
Mark Lobodzinski7f170b32016-11-16 10:05:30 -07006803 }
6804 return skip;
6805}
6806
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006807VKAPI_ATTR VkResult VKAPI_CALL
6808CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6809 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6810 VkPipeline *pPipelines) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006811 // TODO What to do with pipelineCache?
6812 // The order of operations here is a little convoluted but gets the job done
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006813 // 1. Pipeline create state is first shadowed into PIPELINE_STATE struct
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006814 // 2. Create state is then validated (which uses flags setup during shadowing)
6815 // 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
Mark Lobodzinski1f34f6f2016-11-16 10:11:02 -07006816 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006817 // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
Mark Lobodzinski1f34f6f2016-11-16 10:11:02 -07006818 vector<PIPELINE_STATE *> pipe_state(count);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006819 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6820
6821 uint32_t i = 0;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006822 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006823
6824 for (i = 0; i < count; i++) {
Mark Lobodzinski1f34f6f2016-11-16 10:11:02 -07006825 pipe_state[i] = new PIPELINE_STATE;
6826 pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i]);
6827 pipe_state[i]->render_pass_ci.initialize(getRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
6828 pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006829 }
Mark Lobodzinski1f34f6f2016-11-16 10:11:02 -07006830 skip |= PreCallCreateGraphicsPipelines(dev_data, count, pCreateInfos, pipe_state);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006831
Chris Forbes56754e22016-11-30 14:24:32 +13006832 if (skip) {
6833 for (i = 0; i < count; i++) {
6834 delete pipe_state[i];
Chris Forbes83e91ad2016-11-30 14:26:50 +13006835 pPipelines[i] = VK_NULL_HANDLE;
Chris Forbes56754e22016-11-30 14:24:32 +13006836 }
Chris Forbes78a69c62016-11-30 14:39:24 +13006837 return VK_ERROR_VALIDATION_FAILED_EXT;
6838 }
6839
6840 lock.unlock();
6841 auto result = dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6842 lock.lock();
6843 for (i = 0; i < count; i++) {
Chris Forbesb186edf2016-11-30 14:41:35 +13006844 if (pPipelines[i] == VK_NULL_HANDLE) {
6845 delete pipe_state[i];
6846 }
6847 else {
6848 pipe_state[i]->pipeline = pPipelines[i];
6849 dev_data->pipelineMap[pipe_state[i]->pipeline] = pipe_state[i];
6850 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006851 }
Chris Forbes56754e22016-11-30 14:24:32 +13006852
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006853 return result;
6854}
6855
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006856VKAPI_ATTR VkResult VKAPI_CALL
6857CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6858 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6859 VkPipeline *pPipelines) {
Chris Forbes183f4f92016-11-30 14:35:52 +13006860 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006861
6862 // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006863 vector<PIPELINE_STATE *> pPipeState(count);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006864 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6865
6866 uint32_t i = 0;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006867 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006868 for (i = 0; i < count; i++) {
6869 // TODO: Verify compute stage bits
6870
6871 // Create and initialize internal tracking data structure
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006872 pPipeState[i] = new PIPELINE_STATE;
6873 pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
Tobin Ehlisc1d9be12016-10-13 10:18:18 -06006874 pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006875
6876 // TODO: Add Compute Pipeline Verification
Chris Forbes183f4f92016-11-30 14:35:52 +13006877 skip |= !validate_compute_pipeline(dev_data->report_data, pPipeState[i], &dev_data->enabled_features,
Tobin Ehlisfe871282016-06-28 10:28:02 -06006878 dev_data->shaderModuleMap);
Chris Forbes183f4f92016-11-30 14:35:52 +13006879 // skip |= verifyPipelineCreateState(dev_data, pPipeState[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006880 }
6881
Chris Forbes78a69c62016-11-30 14:39:24 +13006882 if (skip) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006883 for (i = 0; i < count; i++) {
6884 // Clean up any locally allocated data structures
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006885 delete pPipeState[i];
Chris Forbes3224f952016-11-30 14:44:03 +13006886 pPipelines[i] = VK_NULL_HANDLE;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006887 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006888 return VK_ERROR_VALIDATION_FAILED_EXT;
6889 }
Chris Forbes78a69c62016-11-30 14:39:24 +13006890
6891 lock.unlock();
6892 auto result = dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6893 lock.lock();
6894 for (i = 0; i < count; i++) {
Chris Forbes3224f952016-11-30 14:44:03 +13006895 if (pPipelines[i] == VK_NULL_HANDLE) {
6896 delete pPipeState[i];
6897 }
6898 else {
6899 pPipeState[i]->pipeline = pPipelines[i];
6900 dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
6901 }
Chris Forbes78a69c62016-11-30 14:39:24 +13006902 }
6903
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006904 return result;
6905}
6906
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006907VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6908 const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006909 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13006910 VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006911 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006912 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisfad7adf2016-10-20 06:50:37 -06006913 dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006914 }
6915 return result;
6916}
6917
Tobin Ehlis154c2692016-10-25 09:36:53 -06006918static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
6919 if (dev_data->instance_data->disabled.create_descriptor_set_layout)
6920 return false;
6921 return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info);
6922}
6923
6924static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
6925 VkDescriptorSetLayout set_layout) {
Tobin Ehlisfdcb63f2016-10-25 20:56:47 -06006926 // TODO: Convert this to unique_ptr to avoid leaks
Tobin Ehlis154c2692016-10-25 09:36:53 -06006927 dev_data->descriptorSetLayoutMap[set_layout] = new cvdescriptorset::DescriptorSetLayout(create_info, set_layout);
6928}
6929
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006930VKAPI_ATTR VkResult VKAPI_CALL
6931CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6932 const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006933 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis154c2692016-10-25 09:36:53 -06006934 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6935 std::unique_lock<std::mutex> lock(global_lock);
6936 bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
6937 if (!skip) {
6938 lock.unlock();
6939 result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6940 if (VK_SUCCESS == result) {
6941 lock.lock();
6942 PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
6943 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006944 }
6945 return result;
6946}
6947
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006948// Used by CreatePipelineLayout and CmdPushConstants.
6949// Note that the index argument is optional and only used by CreatePipelineLayout.
6950static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6951 const char *caller_name, uint32_t index = 0) {
Chris Forbesa13fe522016-10-13 15:34:59 +13006952 if (dev_data->instance_data->disabled.push_constant_range)
Tobin Ehlisf0e83a32016-10-06 14:16:14 -06006953 return false;
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006954 uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
Tobin Ehlisfe871282016-06-28 10:28:02 -06006955 bool skip_call = false;
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006956 // Check that offset + size don't exceed the max.
6957 // Prevent arithetic overflow here by avoiding addition and testing in this order.
6958 if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6959 // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6960 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
Tobin Ehlis2765e462016-11-23 10:47:26 -07006961 if (offset >= maxPushConstantsSize) {
6962 skip_call |=
6963 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6964 VALIDATION_ERROR_00877, "DS", "%s call has push constants index %u with offset %u that "
6965 "exceeds this device's maxPushConstantSize of %u. %s",
6966 caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00877]);
6967 }
6968 if (size > maxPushConstantsSize - offset) {
6969 skip_call |=
6970 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6971 VALIDATION_ERROR_00880, "DS", "%s call has push constants index %u with offset %u and size %u that "
6972 "exceeds this device's maxPushConstantSize of %u. %s",
6973 caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00880]);
6974 }
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006975 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
Dave Houlton197211a2016-12-23 15:26:29 -07006976 if (offset >= maxPushConstantsSize) {
6977 skip_call |=
6978 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6979 VALIDATION_ERROR_00991, "DS", "%s call has push constants index %u with offset %u that "
6980 "exceeds this device's maxPushConstantSize of %u. %s",
6981 caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00991]);
6982 }
6983 if (size > maxPushConstantsSize - offset) {
6984 skip_call |=
6985 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6986 VALIDATION_ERROR_00992, "DS", "%s call has push constants index %u with offset %u and size %u that "
6987 "exceeds this device's maxPushConstantSize of %u. %s",
6988 caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00992]);
6989 }
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006990 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06006991 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6992 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006993 }
6994 }
6995 // size needs to be non-zero and a multiple of 4.
6996 if ((size == 0) || ((size & 0x3) != 0)) {
6997 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
Tobin Ehlisfe699542016-11-23 09:41:12 -07006998 if (size == 0) {
6999 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7000 __LINE__, VALIDATION_ERROR_00878, "DS", "%s call has push constants index %u with "
7001 "size %u. Size must be greater than zero. %s",
7002 caller_name, index, size, validation_error_map[VALIDATION_ERROR_00878]);
7003 }
7004 if (size & 0x3) {
7005 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7006 __LINE__, VALIDATION_ERROR_00879, "DS", "%s call has push constants index %u with "
7007 "size %u. Size must be a multiple of 4. %s",
7008 caller_name, index, size, validation_error_map[VALIDATION_ERROR_00879]);
7009 }
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007010 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
Dave Houlton197211a2016-12-23 15:26:29 -07007011 if (size == 0) {
7012 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7013 __LINE__, VALIDATION_ERROR_01000, "DS", "%s call has push constants index %u with "
7014 "size %u. Size must be greater than zero. %s",
7015 caller_name, index, size, validation_error_map[VALIDATION_ERROR_01000]);
7016 }
7017 if (size & 0x3) {
7018 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7019 __LINE__, VALIDATION_ERROR_00990, "DS", "%s call has push constants index %u with "
7020 "size %u. Size must be a multiple of 4. %s",
7021 caller_name, index, size, validation_error_map[VALIDATION_ERROR_00990]);
7022 }
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007023 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007024 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7025 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007026 }
7027 }
7028 // offset needs to be a multiple of 4.
7029 if ((offset & 0x3) != 0) {
7030 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007031 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Dave Houlton197211a2016-12-23 15:26:29 -07007032 VALIDATION_ERROR_02521, "DS", "%s call has push constants index %u with "
7033 "offset %u. Offset must be a multiple of 4. %s",
7034 caller_name, index, offset, validation_error_map[VALIDATION_ERROR_02521]);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007035 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007036 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Dave Houlton197211a2016-12-23 15:26:29 -07007037 VALIDATION_ERROR_00989, "DS", "%s call has push constants with "
7038 "offset %u. Offset must be a multiple of 4. %s",
7039 caller_name, offset, validation_error_map[VALIDATION_ERROR_00989]);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007040 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007041 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7042 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007043 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007044 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06007045 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007046}
7047
Dave Houlton197211a2016-12-23 15:26:29 -07007048VKAPI_ATTR VkResult VKAPI_CALL
7049CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007050 const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007051 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007052 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisbf98b692016-10-06 12:58:06 -06007053 // TODO : Add checks for VALIDATION_ERRORS 865-871
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007054 // Push Constant Range checks
Karl Schultzb7c6d0f2016-09-13 14:23:19 -06007055 uint32_t i, j;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007056 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007057 skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
7058 pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007059 if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007060 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Dave Houlton197211a2016-12-23 15:26:29 -07007061 VALIDATION_ERROR_00882, "DS", "vkCreatePipelineLayout() call has no stageFlags set. %s",
7062 validation_error_map[VALIDATION_ERROR_00882]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007063 }
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007064 }
Karl Schultzb7c6d0f2016-09-13 14:23:19 -06007065 if (skip_call)
7066 return VK_ERROR_VALIDATION_FAILED_EXT;
7067
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007068 // Each range has been validated. Now check for overlap between ranges (if they are good).
Karl Schultzb7c6d0f2016-09-13 14:23:19 -06007069 // There's no explicit Valid Usage language against this, so issue a warning instead of an error.
7070 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
7071 for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
7072 const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
7073 const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
7074 const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
7075 const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
7076 if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
7077 skip_call |=
7078 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7079 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
7080 "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
7081 i, minA, maxA, j, minB, maxB);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007082 }
7083 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007084 }
Chris Forbescf7615e2016-05-10 16:02:49 +12007085
Chris Forbesaaa9c282016-10-03 20:01:14 +13007086 VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007087 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007088 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007089 PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
Tobin Ehlis0fc85672016-07-07 11:06:26 -06007090 plNode.layout = *pPipelineLayout;
Tobin Ehlis3df41292016-07-07 09:23:38 -06007091 plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007092 for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
Tobin Ehlis3df41292016-07-07 09:23:38 -06007093 plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007094 }
Tobin Ehlis3df41292016-07-07 09:23:38 -06007095 plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007096 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
Tobin Ehlis3df41292016-07-07 09:23:38 -06007097 plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007098 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007099 }
7100 return result;
7101}
7102
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007103VKAPI_ATTR VkResult VKAPI_CALL
7104CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
7105 VkDescriptorPool *pDescriptorPool) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007106 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13007107 VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007108 if (VK_SUCCESS == result) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007109 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
Mark Muelleraab36502016-05-03 13:17:29 -06007110 (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007111 (uint64_t)*pDescriptorPool))
7112 return VK_ERROR_VALIDATION_FAILED_EXT;
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06007113 DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007114 if (NULL == pNewNode) {
7115 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
7116 (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06007117 "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007118 return VK_ERROR_VALIDATION_FAILED_EXT;
7119 } else {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007120 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007121 dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007122 }
7123 } else {
7124 // Need to do anything if pool create fails?
7125 }
7126 return result;
7127}
7128
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007129VKAPI_ATTR VkResult VKAPI_CALL
7130ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
Tobin Ehlis75f04ec2016-10-06 17:43:11 -06007131 // TODO : Add checks for VALIDATION_ERROR_00928
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007132 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13007133 VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007134 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007135 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007136 clearDescriptorPool(dev_data, device, descriptorPool, flags);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007137 }
7138 return result;
7139}
Chris Forbesb3f60062016-05-23 11:29:51 +12007140// Ensure the pool contains enough descriptors and descriptor sets to satisfy
Tobin Ehlisbdb28002016-06-01 11:56:42 -06007141// an allocation request. Fills common_data with the total number of descriptors of each type required,
7142// as well as DescriptorSetLayout ptrs used for later update.
Tobin Ehlis68d0adf2016-06-01 11:33:50 -06007143static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
7144 cvdescriptorset::AllocateDescriptorSetsData *common_data) {
Chris Forbesa13fe522016-10-13 15:34:59 +13007145 if (dev_data->instance_data->disabled.allocate_descriptor_sets)
Tobin Ehlisf0e83a32016-10-06 14:16:14 -06007146 return false;
Tobin Ehlisee471462016-05-26 11:21:59 -06007147 // All state checks for AllocateDescriptorSets is done in single function
Tobin Ehlis815e8132016-06-02 13:02:17 -06007148 return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
Tobin Ehlisee471462016-05-26 11:21:59 -06007149}
7150// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
7151static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
Tobin Ehlis68d0adf2016-06-01 11:33:50 -06007152 VkDescriptorSet *pDescriptorSets,
7153 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
Tobin Ehlisee471462016-05-26 11:21:59 -06007154 // All the updates are contained in a single cvdescriptorset function
Tobin Ehlisd5fb09e2016-06-02 10:54:09 -06007155 cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
Tobin Ehlis4e380592016-06-02 12:41:47 -06007156 &dev_data->setMap, dev_data);
Chris Forbesb3f60062016-05-23 11:29:51 +12007157}
7158
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007159VKAPI_ATTR VkResult VKAPI_CALL
7160AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007161 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007162 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis68d0adf2016-06-01 11:33:50 -06007163 cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
7164 bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007165 lock.unlock();
Chris Forbes70858bb2016-05-20 17:12:39 +12007166
Tobin Ehlisee471462016-05-26 11:21:59 -06007167 if (skip_call)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007168 return VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbes70858bb2016-05-20 17:12:39 +12007169
Chris Forbesaaa9c282016-10-03 20:01:14 +13007170 VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
Chris Forbesbc22feb2016-05-20 16:34:01 +12007171
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007172 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007173 lock.lock();
Tobin Ehlis68d0adf2016-06-01 11:33:50 -06007174 PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007175 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007176 }
7177 return result;
7178}
Tobin Ehlis2c763302016-05-26 13:30:45 -06007179// Verify state before freeing DescriptorSets
7180static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
7181 const VkDescriptorSet *descriptor_sets) {
Chris Forbesa13fe522016-10-13 15:34:59 +13007182 if (dev_data->instance_data->disabled.free_descriptor_sets)
Tobin Ehlisf0e83a32016-10-06 14:16:14 -06007183 return false;
Tobin Ehlis2c763302016-05-26 13:30:45 -06007184 bool skip_call = false;
7185 // First make sure sets being destroyed are not currently in-use
7186 for (uint32_t i = 0; i < count; ++i)
7187 skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
7188
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06007189 DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
7190 if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
Tobin Ehlis2c763302016-05-26 13:30:45 -06007191 // Can't Free from a NON_FREE pool
7192 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
Tobin Ehlisbf98b692016-10-06 12:58:06 -06007193 reinterpret_cast<uint64_t &>(pool), __LINE__, VALIDATION_ERROR_00922, "DS",
Tobin Ehlis2c763302016-05-26 13:30:45 -06007194 "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
Tobin Ehlisbf98b692016-10-06 12:58:06 -06007195 "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
7196 validation_error_map[VALIDATION_ERROR_00922]);
Tobin Ehlis2c763302016-05-26 13:30:45 -06007197 }
7198 return skip_call;
7199}
7200// Sets have been removed from the pool so update underlying state
7201static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
7202 const VkDescriptorSet *descriptor_sets) {
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06007203 DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
Tobin Ehlis2c763302016-05-26 13:30:45 -06007204 // Update available descriptor sets in pool
7205 pool_state->availableSets += count;
7206
7207 // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
7208 for (uint32_t i = 0; i < count; ++i) {
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007209 auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
Tobin Ehlis2c763302016-05-26 13:30:45 -06007210 uint32_t type_index = 0, descriptor_count = 0;
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007211 for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
7212 type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
7213 descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
Tobin Ehlis2c763302016-05-26 13:30:45 -06007214 pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
7215 }
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007216 freeDescriptorSet(dev_data, descriptor_set);
7217 pool_state->sets.erase(descriptor_set);
Tobin Ehlis2c763302016-05-26 13:30:45 -06007218 }
7219}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007220
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007221VKAPI_ATTR VkResult VKAPI_CALL
7222FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007223 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7224 // Make sure that no sets being destroyed are in-flight
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007225 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisfe871282016-06-28 10:28:02 -06007226 bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007227 lock.unlock();
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06007228
Tobin Ehlisfe871282016-06-28 10:28:02 -06007229 if (skip_call)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007230 return VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbesaaa9c282016-10-03 20:01:14 +13007231 VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007232 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007233 lock.lock();
Tobin Ehlis2c763302016-05-26 13:30:45 -06007234 PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007235 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007236 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007237 return result;
7238}
Tobin Ehlis300888c2016-05-18 13:43:26 -06007239// TODO : This is a Proof-of-concept for core validation architecture
7240// Really we'll want to break out these functions to separate files but
7241// keeping it all together here to prove out design
7242// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
7243static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
7244 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
7245 const VkCopyDescriptorSet *pDescriptorCopies) {
Chris Forbesa13fe522016-10-13 15:34:59 +13007246 if (dev_data->instance_data->disabled.update_descriptor_sets)
Tobin Ehlisc67108b2016-10-10 11:14:52 -06007247 return false;
Tobin Ehlis300888c2016-05-18 13:43:26 -06007248 // First thing to do is perform map look-ups.
7249 // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
7250 // so we can't just do a single map look-up up-front, but do them individually in functions below
7251
7252 // Now make call(s) that validate state, but don't perform state updates in this function
7253 // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
7254 // namespace which will parse params and make calls into specific class instances
Tobin Ehlis6a72dc72016-06-01 16:41:17 -06007255 return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
7256 descriptorCopyCount, pDescriptorCopies);
Tobin Ehlis300888c2016-05-18 13:43:26 -06007257}
7258// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
7259static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
7260 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
7261 const VkCopyDescriptorSet *pDescriptorCopies) {
Tobin Ehlis6a72dc72016-06-01 16:41:17 -06007262 cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
Tobin Ehlis300888c2016-05-18 13:43:26 -06007263 pDescriptorCopies);
7264}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007265
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007266VKAPI_ATTR void VKAPI_CALL
7267UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
7268 uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
Tobin Ehlis300888c2016-05-18 13:43:26 -06007269 // Only map look-up at top level is for device-level layer_data
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007270 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007271 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis300888c2016-05-18 13:43:26 -06007272 bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7273 pDescriptorCopies);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007274 lock.unlock();
Tobin Ehlis300888c2016-05-18 13:43:26 -06007275 if (!skip_call) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13007276 dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7277 pDescriptorCopies);
Tobin Ehlis300888c2016-05-18 13:43:26 -06007278 lock.lock();
7279 // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
7280 PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7281 pDescriptorCopies);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007282 }
7283}
7284
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007285VKAPI_ATTR VkResult VKAPI_CALL
7286AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007287 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13007288 VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007289 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007290 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbesc25c8452016-06-21 14:32:00 +12007291 auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
7292
7293 if (pPool) {
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007294 for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007295 // Add command buffer to its commandPool map
Chris Forbesc25c8452016-06-21 14:32:00 +12007296 pPool->commandBuffers.push_back(pCommandBuffer[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007297 GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
7298 // Add command buffer to map
7299 dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
7300 resetCB(dev_data, pCommandBuffer[i]);
7301 pCB->createInfo = *pCreateInfo;
7302 pCB->device = device;
7303 }
7304 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007305 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007306 }
7307 return result;
7308}
7309
Tobin Ehlis9fc34aa2016-09-14 15:26:09 -06007310// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
Tobin Ehlis04c04272016-10-12 11:54:09 -06007311static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
Tobin Ehlise5df29e2016-10-12 13:18:20 -06007312 addCommandBufferBinding(&fb_state->cb_bindings,
7313 {reinterpret_cast<uint64_t &>(fb_state->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT},
7314 cb_state);
Tobin Ehlis9fc34aa2016-09-14 15:26:09 -06007315 for (auto attachment : fb_state->attachments) {
7316 auto view_state = attachment.view_state;
7317 if (view_state) {
Tobin Ehlis15b8ea02016-09-19 14:02:58 -06007318 AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
Tobin Ehlis9fc34aa2016-09-14 15:26:09 -06007319 }
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06007320 auto rp_state = getRenderPassState(dev_data, fb_state->createInfo.renderPass);
Tobin Ehlis9fc34aa2016-09-14 15:26:09 -06007321 if (rp_state) {
7322 addCommandBufferBinding(
7323 &rp_state->cb_bindings,
7324 {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7325 }
7326 }
7327}
7328
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007329VKAPI_ATTR VkResult VKAPI_CALL
7330BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007331 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007332 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007333 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007334 // Validate command buffer level
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007335 GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
7336 if (cb_node) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007337 // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06007338 if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007339 skip_call |=
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06007340 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton197211a2016-12-23 15:26:29 -07007341 (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00104, "MEM",
Mike Weiblencce7ec72016-10-17 19:33:05 -06007342 "Calling vkBeginCommandBuffer() on active command buffer 0x%p before it has completed. "
Dave Houlton197211a2016-12-23 15:26:29 -07007343 "You must check command buffer fence before this call. %s",
7344 commandBuffer, validation_error_map[VALIDATION_ERROR_00104]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007345 }
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007346 clear_cmd_buf_and_mem_references(dev_data, cb_node);
7347 if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007348 // Secondary Command Buffer
7349 const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
7350 if (!pInfo) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007351 skip_call |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007352 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton197211a2016-12-23 15:26:29 -07007353 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00106, "DS",
7354 "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info. %s",
7355 commandBuffer, validation_error_map[VALIDATION_ERROR_00106]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007356 } else {
7357 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
Tobin Ehlis8fc7df22017-01-04 09:21:17 -07007358 // Object_tracker makes sure these objects are valid
7359 assert(pInfo->renderPass);
7360 assert(pInfo->framebuffer);
7361 string errorString = "";
7362 auto framebuffer = getFramebufferState(dev_data, pInfo->framebuffer);
7363 if (framebuffer) {
7364 if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
7365 !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
7366 getRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(),
7367 errorString)) {
7368 // renderPass that framebuffer was created with must be compatible with local renderPass
7369 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7370 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7371 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00112, "DS",
7372 "vkBeginCommandBuffer(): Secondary Command "
7373 "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
7374 "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s. %s",
7375 commandBuffer, reinterpret_cast<const uint64_t &>(pInfo->renderPass),
7376 reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
7377 reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass),
7378 errorString.c_str(), validation_error_map[VALIDATION_ERROR_00112]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007379 }
Tobin Ehlis8fc7df22017-01-04 09:21:17 -07007380 // Connect this framebuffer and its children to this cmdBuffer
7381 AddFramebufferBinding(dev_data, cb_node, framebuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007382 }
7383 }
Dave Houlton197211a2016-12-23 15:26:29 -07007384 if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007385 (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007386 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7387 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
Dave Houlton197211a2016-12-23 15:26:29 -07007388 __LINE__, VALIDATION_ERROR_00107, "DS",
Tobin Ehlisfe871282016-06-28 10:28:02 -06007389 "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
7390 "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
Dave Houlton197211a2016-12-23 15:26:29 -07007391 "support precise occlusion queries. %s",
7392 commandBuffer, validation_error_map[VALIDATION_ERROR_00107]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007393 }
7394 }
7395 if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06007396 auto renderPass = getRenderPassState(dev_data, pInfo->renderPass);
Chris Forbes967c4682016-05-17 11:36:23 +12007397 if (renderPass) {
Chris Forbesef730462016-09-27 12:03:31 +13007398 if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007399 skip_call |= log_msg(
Dave Houlton197211a2016-12-23 15:26:29 -07007400 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7401 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7402 VALIDATION_ERROR_00111, "DS",
7403 "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must have a subpass index (%d) "
7404 "that is less than the number of subpasses (%d). %s",
7405 commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount,
7406 validation_error_map[VALIDATION_ERROR_00111]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007407 }
7408 }
7409 }
7410 }
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007411 if (CB_RECORDING == cb_node->state) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007412 skip_call |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007413 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton197211a2016-12-23 15:26:29 -07007414 (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00103, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07007415 "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%p"
Dave Houlton197211a2016-12-23 15:26:29 -07007416 ") in the RECORDING state. Must first call vkEndCommandBuffer(). %s",
7417 commandBuffer, validation_error_map[VALIDATION_ERROR_00103]);
Chris Forbese46e0a12016-12-20 11:33:11 +13007418 } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->last_cmd)) {
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007419 VkCommandPool cmdPool = cb_node->createInfo.commandPool;
Chris Forbesc25c8452016-06-21 14:32:00 +12007420 auto pPool = getCommandPoolNode(dev_data, cmdPool);
7421 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007422 skip_call |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007423 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton197211a2016-12-23 15:26:29 -07007424 (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00105, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07007425 "Call to vkBeginCommandBuffer() on command buffer (0x%p"
Mark Muelleraab36502016-05-03 13:17:29 -06007426 ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
Dave Houlton197211a2016-12-23 15:26:29 -07007427 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
7428 commandBuffer, (uint64_t)cmdPool, validation_error_map[VALIDATION_ERROR_00105]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007429 }
7430 resetCB(dev_data, commandBuffer);
7431 }
7432 // Set updated state here in case implicit reset occurs above
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007433 cb_node->state = CB_RECORDING;
7434 cb_node->beginInfo = *pBeginInfo;
7435 if (cb_node->beginInfo.pInheritanceInfo) {
7436 cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
7437 cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
Mark Young75c43592016-05-06 13:48:26 -06007438 // If we are a secondary command-buffer and inheriting. Update the items we should inherit.
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007439 if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
7440 (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06007441 cb_node->activeRenderPass = getRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007442 cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
Tobin Ehlis3d916312016-11-03 07:26:28 -06007443 cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007444 cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
Mark Young75c43592016-05-06 13:48:26 -06007445 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007446 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007447 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007448 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007449 if (skip_call) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007450 return VK_ERROR_VALIDATION_FAILED_EXT;
7451 }
Chris Forbesaaa9c282016-10-03 20:01:14 +13007452 VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
Tobin Ehlis4c522322016-04-11 16:39:29 -06007453
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007454 return result;
7455}
7456
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007457VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007458 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007459 VkResult result = VK_SUCCESS;
7460 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007461 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007462 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7463 if (pCB) {
Dave Houlton197211a2016-12-23 15:26:29 -07007464 if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
7465 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
Cody Northrop3bb4d962016-05-09 16:15:57 -06007466 // This needs spec clarification to update valid usage, see comments in PR:
7467 // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
Mike Weiblen6daea5b2016-12-19 20:41:58 -07007468 skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_00123);
Cody Northrop3bb4d962016-05-09 16:15:57 -06007469 }
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007470 skip_call |= ValidateCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
7471 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_END);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007472 for (auto query : pCB->activeQueries) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007473 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Dave Houlton197211a2016-12-23 15:26:29 -07007474 VALIDATION_ERROR_00124, "DS",
7475 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d. %s",
7476 (uint64_t)(query.pool), query.index, validation_error_map[VALIDATION_ERROR_00124]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007477 }
7478 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06007479 if (!skip_call) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007480 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13007481 result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007482 lock.lock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007483 if (VK_SUCCESS == result) {
7484 pCB->state = CB_RECORDED;
7485 // Reset CB status flags
7486 pCB->status = 0;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007487 }
7488 } else {
7489 result = VK_ERROR_VALIDATION_FAILED_EXT;
7490 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007491 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007492 return result;
7493}
7494
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007495VKAPI_ATTR VkResult VKAPI_CALL
7496ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
Tobin Ehlis739d62a2016-04-14 12:22:03 -06007497 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007498 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007499 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007500 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7501 VkCommandPool cmdPool = pCB->createInfo.commandPool;
Chris Forbesc25c8452016-06-21 14:32:00 +12007502 auto pPool = getCommandPoolNode(dev_data, cmdPool);
7503 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
Tobin Ehlis739d62a2016-04-14 12:22:03 -06007504 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton197211a2016-12-23 15:26:29 -07007505 (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00093, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07007506 "Attempt to reset command buffer (0x%p) created from command pool (0x%" PRIxLEAST64
Dave Houlton197211a2016-12-23 15:26:29 -07007507 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
7508 commandBuffer, (uint64_t)cmdPool, validation_error_map[VALIDATION_ERROR_00093]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007509 }
Tobin Ehlis9a9f7a22016-10-20 07:43:15 -06007510 skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_00092);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007511 lock.unlock();
Tobin Ehlis739d62a2016-04-14 12:22:03 -06007512 if (skip_call)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007513 return VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbesaaa9c282016-10-03 20:01:14 +13007514 VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007515 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007516 lock.lock();
Chris Forbese30fb982016-06-21 12:35:16 +12007517 dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007518 resetCB(dev_data, commandBuffer);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007519 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007520 }
7521 return result;
7522}
Mark Lobodzinski188b2302016-04-12 10:41:59 -06007523
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007524VKAPI_ATTR void VKAPI_CALL
7525CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007526 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007527 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007528 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007529 GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
7530 if (cb_state) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007531 skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7532 UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_BINDPIPELINE);
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007533 if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (cb_state->activeRenderPass)) {
7534 skip |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007535 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7536 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06007537 "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007538 (uint64_t)pipeline, (uint64_t)cb_state->activeRenderPass->renderPass);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007539 }
Dave Houlton197211a2016-12-23 15:26:29 -07007540 // TODO: VALIDATION_ERROR_00594 VALIDATION_ERROR_00596
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007541
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007542 PIPELINE_STATE *pipe_state = getPipelineState(dev_data, pipeline);
7543 if (pipe_state) {
7544 cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
7545 set_cb_pso_status(cb_state, pipe_state);
7546 set_pipeline_state(pipe_state);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007547 } else {
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007548 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
Dave Houlton197211a2016-12-23 15:26:29 -07007549 (uint64_t)pipeline, __LINE__, VALIDATION_ERROR_00600, "DS",
7550 "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist! %s", (uint64_t)(pipeline),
7551 validation_error_map[VALIDATION_ERROR_00600]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007552 }
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007553 addCommandBufferBinding(&pipe_state->cb_bindings,
7554 {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, cb_state);
7555 if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
7556 // Add binding for child renderpass
7557 auto rp_state = getRenderPassState(dev_data, pipe_state->graphicsPipelineCI.renderPass);
7558 if (rp_state) {
7559 addCommandBufferBinding(
7560 &rp_state->cb_bindings,
7561 {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7562 }
7563 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007564 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007565 lock.unlock();
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007566 if (!skip)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007567 dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007568}
7569
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007570VKAPI_ATTR void VKAPI_CALL
7571CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007572 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007573 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007574 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007575 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7576 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007577 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7578 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE);
Chris Forbes5fc77832016-07-28 14:15:38 +12007579 pCB->viewportMask |= ((1u<<viewportCount) - 1u) << firstViewport;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007580 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007581 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007582 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007583 dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007584}
7585
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007586VKAPI_ATTR void VKAPI_CALL
7587CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007588 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007589 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007590 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007591 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7592 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007593 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7594 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSCISSORSTATE);
Chris Forbes5fc77832016-07-28 14:15:38 +12007595 pCB->scissorMask |= ((1u<<scissorCount) - 1u) << firstScissor;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007596 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007597 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007598 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007599 dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007600}
7601
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007602VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
Mark Young7394fdd2016-03-31 14:56:43 -06007603 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007604 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007605 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007606 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7607 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007608 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7609 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007610 pCB->status |= CBSTATUS_LINE_WIDTH_SET;
Mark Young7394fdd2016-03-31 14:56:43 -06007611
Tobin Ehlis52c76a32016-10-12 09:05:51 -06007612 PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
Mark Young7394fdd2016-03-31 14:56:43 -06007613 if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
7614 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007615 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, VALIDATION_ERROR_01476, "DS",
baldurk209ccda2016-05-05 16:31:05 +02007616 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007617 "flag. This is undefined behavior and could be ignored. %s",
7618 validation_error_map[VALIDATION_ERROR_01476]);
Mark Young7394fdd2016-03-31 14:56:43 -06007619 } else {
7620 skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
7621 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007622 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007623 lock.unlock();
Mark Young7394fdd2016-03-31 14:56:43 -06007624 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007625 dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007626}
7627
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007628VKAPI_ATTR void VKAPI_CALL
7629CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007630 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007631 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007632 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007633 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7634 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007635 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7636 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007637 pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007638 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007639 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007640 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007641 dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007642}
7643
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007644VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007645 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007646 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007647 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007648 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7649 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007650 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7651 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETBLENDSTATE);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06007652 pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007653 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007654 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007655 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007656 dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007657}
7658
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007659VKAPI_ATTR void VKAPI_CALL
7660CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007661 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007662 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007663 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007664 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7665 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007666 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7667 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007668 pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007669 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007670 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007671 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007672 dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007673}
7674
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007675VKAPI_ATTR void VKAPI_CALL
7676CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007677 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007678 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007679 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007680 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7681 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007682 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7683 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007684 pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7685 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007686 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007687 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007688 dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007689}
7690
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007691VKAPI_ATTR void VKAPI_CALL
7692CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007693 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007694 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007695 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007696 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7697 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007698 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7699 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007700 pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7701 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007702 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007703 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007704 dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007705}
7706
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007707VKAPI_ATTR void VKAPI_CALL
7708CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007709 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007710 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007711 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007712 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7713 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007714 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7715 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007716 pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7717 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007718 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007719 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007720 dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007721}
7722
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007723VKAPI_ATTR void VKAPI_CALL
7724CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7725 uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7726 const uint32_t *pDynamicOffsets) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007727 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007728 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007729 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007730 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7731 if (pCB) {
7732 if (pCB->state == CB_RECORDING) {
Tobin Ehlis285a8282016-03-17 13:37:40 -06007733 // Track total count of dynamic descriptor types to make sure we have an offset for each one
7734 uint32_t totalDynamicDescriptors = 0;
7735 string errorString = "";
7736 uint32_t lastSetIndex = firstSet + setCount - 1;
Chris Forbes0b03b932016-05-16 14:09:35 +12007737 if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007738 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
Chris Forbes0b03b932016-05-16 14:09:35 +12007739 pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
7740 }
Tobin Ehlis09d16712016-05-17 10:41:55 -06007741 auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
Tobin Ehlisc1d9be12016-10-13 10:18:18 -06007742 auto pipeline_layout = getPipelineLayout(dev_data, layout);
Tobin Ehlis285a8282016-03-17 13:37:40 -06007743 for (uint32_t i = 0; i < setCount; i++) {
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007744 cvdescriptorset::DescriptorSet *descriptor_set = getSetNode(dev_data, pDescriptorSets[i]);
7745 if (descriptor_set) {
Tobin Ehlis0fc85672016-07-07 11:06:26 -06007746 pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007747 pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = descriptor_set;
Tobin Ehlisfe871282016-06-28 10:28:02 -06007748 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7749 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
Mike Weiblencce7ec72016-10-17 19:33:05 -06007750 DRAWSTATE_NONE, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound on pipeline %s",
Tobin Ehlisfe871282016-06-28 10:28:02 -06007751 (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007752 if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007753 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7754 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7755 DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
Mike Weiblencce7ec72016-10-17 19:33:05 -06007756 "Descriptor Set 0x%" PRIxLEAST64
Tobin Ehlisfe871282016-06-28 10:28:02 -06007757 " bound but it was never updated. You may want to either update it or not bind it.",
7758 (uint64_t)pDescriptorSets[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007759 }
Tobin Ehlis285a8282016-03-17 13:37:40 -06007760 // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007761 if (!verify_set_layout_compatibility(dev_data, descriptor_set, pipeline_layout, i + firstSet, errorString)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007762 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7763 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007764 VALIDATION_ERROR_00974, "DS",
Tobin Ehlisfe871282016-06-28 10:28:02 -06007765 "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007766 "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s. %s",
7767 i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str(),
7768 validation_error_map[VALIDATION_ERROR_00974]);
Tobin Ehlis285a8282016-03-17 13:37:40 -06007769 }
Chris Forbes0b03b932016-05-16 14:09:35 +12007770
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007771 auto setDynamicDescriptorCount = descriptor_set->GetDynamicDescriptorCount();
Chris Forbes0b03b932016-05-16 14:09:35 +12007772
7773 pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
7774
7775 if (setDynamicDescriptorCount) {
Tobin Ehlis285a8282016-03-17 13:37:40 -06007776 // First make sure we won't overstep bounds of pDynamicOffsets array
Chris Forbes0b03b932016-05-16 14:09:35 +12007777 if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007778 skip_call |=
Tobin Ehlis285a8282016-03-17 13:37:40 -06007779 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7780 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7781 DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06007782 "descriptorSet #%u (0x%" PRIxLEAST64
Tobin Ehlis285a8282016-03-17 13:37:40 -06007783 ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7784 "array. There must be one dynamic offset for each dynamic descriptor being bound.",
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007785 i, (uint64_t)pDescriptorSets[i], descriptor_set->GetDynamicDescriptorCount(),
Tobin Ehlis285a8282016-03-17 13:37:40 -06007786 (dynamicOffsetCount - totalDynamicDescriptors));
7787 } else { // Validate and store dynamic offsets with the set
7788 // Validate Dynamic Offset Minimums
7789 uint32_t cur_dyn_offset = totalDynamicDescriptors;
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007790 for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
7791 if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
Tobin Ehlis285a8282016-03-17 13:37:40 -06007792 if (vk_safe_modulo(
7793 pDynamicOffsets[cur_dyn_offset],
Tobin Ehlise54be7b2016-04-11 14:49:55 -06007794 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007795 skip_call |= log_msg(
Tobin Ehlis285a8282016-03-17 13:37:40 -06007796 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007797 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_00978,
7798 "DS", "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7799 "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
Tobin Ehlis285a8282016-03-17 13:37:40 -06007800 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007801 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
7802 validation_error_map[VALIDATION_ERROR_00978]);
Tobin Ehlis285a8282016-03-17 13:37:40 -06007803 }
7804 cur_dyn_offset++;
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007805 } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
Tobin Ehlis285a8282016-03-17 13:37:40 -06007806 if (vk_safe_modulo(
7807 pDynamicOffsets[cur_dyn_offset],
Tobin Ehlise54be7b2016-04-11 14:49:55 -06007808 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007809 skip_call |= log_msg(
Tobin Ehlis285a8282016-03-17 13:37:40 -06007810 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007811 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_00978,
7812 "DS", "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7813 "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
Tobin Ehlis285a8282016-03-17 13:37:40 -06007814 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007815 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment,
7816 validation_error_map[VALIDATION_ERROR_00978]);
Tobin Ehlis285a8282016-03-17 13:37:40 -06007817 }
7818 cur_dyn_offset++;
7819 }
7820 }
Chris Forbes0b03b932016-05-16 14:09:35 +12007821
7822 pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
7823 std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
7824 pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
Tobin Ehlis285a8282016-03-17 13:37:40 -06007825 // Keep running total of dynamic descriptor count to verify at the end
Chris Forbes0b03b932016-05-16 14:09:35 +12007826 totalDynamicDescriptors += setDynamicDescriptorCount;
7827
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007828 }
7829 }
Tobin Ehlis285a8282016-03-17 13:37:40 -06007830 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007831 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7832 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
Mike Weiblencce7ec72016-10-17 19:33:05 -06007833 DRAWSTATE_INVALID_SET, "DS", "Attempt to bind descriptor set 0x%" PRIxLEAST64
7834 " that doesn't exist!",
Tobin Ehlisfe871282016-06-28 10:28:02 -06007835 (uint64_t)pDescriptorSets[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007836 }
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007837 skip_call |= ValidateCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7838 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS);
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007839 // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7840 if (firstSet > 0) { // Check set #s below the first bound set
7841 for (uint32_t i = 0; i < firstSet; ++i) {
7842 if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
Tobin Ehlis09d16712016-05-17 10:41:55 -06007843 !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
Tobin Ehlis0fc85672016-07-07 11:06:26 -06007844 pipeline_layout, i, errorString)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007845 skip_call |= log_msg(
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007846 dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7847 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7848 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
Mike Weiblencce7ec72016-10-17 19:33:05 -06007849 "DescriptorSet 0x%" PRIxLEAST64
Mark Muelleraab36502016-05-03 13:17:29 -06007850 " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007851 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7852 pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7853 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007854 }
7855 }
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007856 // Check if newly last bound set invalidates any remaining bound sets
7857 if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7858 if (oldFinalBoundSet &&
Tobin Ehlis0fc85672016-07-07 11:06:26 -06007859 !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) {
Tobin Ehlis09d16712016-05-17 10:41:55 -06007860 auto old_set = oldFinalBoundSet->GetSet();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007861 skip_call |=
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007862 log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
Tobin Ehlis09d16712016-05-17 10:41:55 -06007863 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
Mike Weiblencce7ec72016-10-17 19:33:05 -06007864 DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64
Mark Muelleraab36502016-05-03 13:17:29 -06007865 " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007866 " newly bound as set #%u so set #%u and any subsequent sets were "
Mark Muelleraab36502016-05-03 13:17:29 -06007867 "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
Tobin Ehlis09d16712016-05-17 10:41:55 -06007868 reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007869 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7870 lastSetIndex + 1, (uint64_t)layout);
7871 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7872 }
7873 }
Tobin Ehlis285a8282016-03-17 13:37:40 -06007874 }
7875 // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7876 if (totalDynamicDescriptors != dynamicOffsetCount) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007877 skip_call |=
7878 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007879 (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00975, "DS",
Tobin Ehlisfe871282016-06-28 10:28:02 -06007880 "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007881 "is %u. It should exactly match the number of dynamic descriptors. %s",
7882 setCount, totalDynamicDescriptors, dynamicOffsetCount, validation_error_map[VALIDATION_ERROR_00975]);
Tobin Ehlis285a8282016-03-17 13:37:40 -06007883 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007884 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007885 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007886 }
7887 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007888 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007889 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007890 dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7891 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007892}
7893
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007894VKAPI_ATTR void VKAPI_CALL
7895CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007896 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007897 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06007898 // TODO : Somewhere need to verify that IBs have correct usage state flagged
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007899 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -06007900
Tobin Ehlis4668dce2016-11-16 09:30:23 -07007901 auto buffer_state = getBufferState(dev_data, buffer);
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06007902 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07007903 if (cb_node && buffer_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07007904 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_02543);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06007905 std::function<bool()> function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07007906 return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06007907 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06007908 cb_node->validate_functions.push_back(function);
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007909 skip_call |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7910 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007911 VkDeviceSize offset_align = 0;
7912 switch (indexType) {
7913 case VK_INDEX_TYPE_UINT16:
7914 offset_align = 2;
7915 break;
7916 case VK_INDEX_TYPE_UINT32:
7917 offset_align = 4;
7918 break;
7919 default:
7920 // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7921 break;
7922 }
7923 if (!offset_align || (offset % offset_align)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007924 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7925 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7926 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7927 offset, string_VkIndexType(indexType));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007928 }
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06007929 cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06007930 } else {
7931 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007932 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007933 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007934 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007935 dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007936}
7937
7938void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7939 uint32_t end = firstBinding + bindingCount;
7940 if (pCB->currentDrawData.buffers.size() < end) {
7941 pCB->currentDrawData.buffers.resize(end);
7942 }
7943 for (uint32_t i = 0; i < bindingCount; ++i) {
7944 pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7945 }
7946}
7947
Dustin Graves8f1eab92016-04-05 09:41:17 -06007948static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007949
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007950VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7951 uint32_t bindingCount, const VkBuffer *pBuffers,
7952 const VkDeviceSize *pOffsets) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007953 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007954 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06007955 // TODO : Somewhere need to verify that VBs have correct usage state flagged
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007956 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -06007957
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06007958 auto cb_node = getCBNode(dev_data, commandBuffer);
7959 if (cb_node) {
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06007960 for (uint32_t i = 0; i < bindingCount; ++i) {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07007961 auto buffer_state = getBufferState(dev_data, pBuffers[i]);
7962 assert(buffer_state);
Tobin Ehlise1995fc2016-12-22 12:45:09 -07007963 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_02546);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06007964 std::function<bool()> function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07007965 return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06007966 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06007967 cb_node->validate_functions.push_back(function);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007968 }
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007969 skip_call |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7970 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER);
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06007971 updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007972 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007973 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007974 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007975 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007976 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007977 dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007978}
7979
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07007980// Expects global_lock to be held by caller
Tobin Ehlis5558ecb2016-12-19 15:16:37 -07007981static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
Tobin Ehlis2e319d42016-03-25 11:49:51 -06007982 for (auto imageView : pCB->updateImages) {
Tobin Ehlis8b26a382016-09-14 08:02:49 -06007983 auto view_state = getImageViewState(dev_data, imageView);
7984 if (!view_state)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007985 continue;
Tobin Ehlis5611e922016-06-28 15:52:55 -06007986
Tobin Ehlis30df15c2016-10-12 17:17:57 -06007987 auto image_state = getImageState(dev_data, view_state->create_info.image);
7988 assert(image_state);
Dustin Graves8f1eab92016-04-05 09:41:17 -06007989 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06007990 SetImageMemoryValid(dev_data, image_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06007991 return false;
Tobin Ehlis2e319d42016-03-25 11:49:51 -06007992 };
7993 pCB->validate_functions.push_back(function);
7994 }
7995 for (auto buffer : pCB->updateBuffers) {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07007996 auto buffer_state = getBufferState(dev_data, buffer);
7997 assert(buffer_state);
Dustin Graves8f1eab92016-04-05 09:41:17 -06007998 std::function<bool()> function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07007999 SetBufferMemoryValid(dev_data, buffer_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06008000 return false;
Tobin Ehlis2e319d42016-03-25 11:49:51 -06008001 };
8002 pCB->validate_functions.push_back(function);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008003 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008004}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008005
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008006// Generic function to handle validation for all CmdDraw* type functions
Tobin Ehlis022528b2016-12-29 12:22:32 -07008007static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
8008 CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller,
Jeremy Hayese2583052016-12-12 11:01:28 -07008009 UNIQUE_VALIDATION_ERROR_CODE msg_code, UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008010 bool skip = false;
8011 *cb_state = getCBNode(dev_data, cmd_buffer);
8012 if (*cb_state) {
8013 skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
Jeremy Hayese2583052016-12-12 11:01:28 -07008014 skip |= ValidateDrawState(dev_data, *cb_state, indexed, bind_point, caller, dynamic_state_msg_code);
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008015 skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
8016 : insideRenderPass(dev_data, *cb_state, caller, msg_code);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008017 }
8018 return skip;
8019}
8020
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008021// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
Tobin Ehlis022528b2016-12-29 12:22:32 -07008022static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8023 CMD_TYPE cmd_type) {
8024 UpdateDrawState(dev_data, cb_state, bind_point);
Tobin Ehlis7afd7812016-12-28 14:24:47 -07008025 MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008026 UpdateCmdBufferLastCmd(dev_data, cb_state, cmd_type);
8027}
8028
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008029// Generic function to handle state update for all CmdDraw* type functions
Tobin Ehlis022528b2016-12-29 12:22:32 -07008030static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8031 CMD_TYPE cmd_type, DRAW_TYPE draw_type) {
8032 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, cmd_type);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008033 updateResourceTrackingOnDraw(cb_state);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008034 cb_state->drawCount[draw_type]++;
8035}
8036
Tobin Ehlis022528b2016-12-29 12:22:32 -07008037static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
8038 GLOBAL_CB_NODE **cb_state, const char *caller) {
Jeremy Hayese2583052016-12-12 11:01:28 -07008039 return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VALIDATION_ERROR_01365,
8040 VALIDATION_ERROR_02203);
Tobin Ehlis232017e2016-12-21 10:28:54 -07008041}
8042
Tobin Ehlis022528b2016-12-29 12:22:32 -07008043static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
8044 UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAW, DRAW);
Tobin Ehlis18132402016-12-21 07:52:20 -07008045}
8046
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008047VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
8048 uint32_t firstVertex, uint32_t firstInstance) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008049 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlis232017e2016-12-21 10:28:54 -07008050 GLOBAL_CB_NODE *cb_state = nullptr;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008051 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis022528b2016-12-29 12:22:32 -07008052 bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008053 lock.unlock();
Tobin Ehlis232017e2016-12-21 10:28:54 -07008054 if (!skip) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13008055 dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
Tobin Ehlis18132402016-12-21 07:52:20 -07008056 lock.lock();
Tobin Ehlis022528b2016-12-29 12:22:32 -07008057 PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
Tobin Ehlis18132402016-12-21 07:52:20 -07008058 lock.unlock();
8059 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008060}
8061
Tobin Ehlis022528b2016-12-29 12:22:32 -07008062static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
8063 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
Jeremy Hayese2583052016-12-12 11:01:28 -07008064 return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VALIDATION_ERROR_01372,
8065 VALIDATION_ERROR_02216);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008066}
8067
Tobin Ehlis022528b2016-12-29 12:22:32 -07008068static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
8069 UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXED, DRAW_INDEXED);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008070}
8071
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008072VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
8073 uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008074 uint32_t firstInstance) {
8075 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008076 GLOBAL_CB_NODE *cb_state = nullptr;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008077 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008078 bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
Tobin Ehlis022528b2016-12-29 12:22:32 -07008079 "vkCmdDrawIndexed()");
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008080 lock.unlock();
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008081 if (!skip) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13008082 dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008083 lock.lock();
Tobin Ehlis022528b2016-12-29 12:22:32 -07008084 PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008085 lock.unlock();
8086 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008087}
8088
Tobin Ehlis022528b2016-12-29 12:22:32 -07008089static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
8090 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
8091 const char *caller) {
Jeremy Hayese2583052016-12-12 11:01:28 -07008092 bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller,
8093 VALIDATION_ERROR_01381, VALIDATION_ERROR_02234);
Tobin Ehlis52179ad2016-12-21 11:21:36 -07008094 *buffer_state = getBufferState(dev_data, buffer);
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008095 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02544);
Tobin Ehlis52179ad2016-12-21 11:21:36 -07008096 return skip;
8097}
8098
Tobin Ehlis022528b2016-12-29 12:22:32 -07008099static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8100 BUFFER_STATE *buffer_state) {
8101 UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDIRECT, DRAW_INDIRECT);
Tobin Ehlis52179ad2016-12-21 11:21:36 -07008102 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
8103}
8104
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008105VKAPI_ATTR void VKAPI_CALL
8106CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008107 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlis52179ad2016-12-21 11:21:36 -07008108 GLOBAL_CB_NODE *cb_state = nullptr;
8109 BUFFER_STATE *buffer_state = nullptr;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008110 std::unique_lock<std::mutex> lock(global_lock);
Tony Barbour0725b0d2017-01-06 11:52:50 -07008111 bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
Tobin Ehlis022528b2016-12-29 12:22:32 -07008112 &buffer_state, "vkCmdDrawIndirect()");
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008113 lock.unlock();
Tobin Ehlis52179ad2016-12-21 11:21:36 -07008114 if (!skip) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13008115 dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
Tobin Ehlis52179ad2016-12-21 11:21:36 -07008116 lock.lock();
Tobin Ehlis022528b2016-12-29 12:22:32 -07008117 PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
Tobin Ehlis52179ad2016-12-21 11:21:36 -07008118 lock.unlock();
8119 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008120}
8121
Tobin Ehlis022528b2016-12-29 12:22:32 -07008122static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
8123 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
8124 BUFFER_STATE **buffer_state, const char *caller) {
8125 bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
Jeremy Hayese2583052016-12-12 11:01:28 -07008126 VALIDATION_ERROR_01393, VALIDATION_ERROR_02272);
Tobin Ehlis46132632016-12-21 12:22:11 -07008127 *buffer_state = getBufferState(dev_data, buffer);
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008128 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02545);
Tobin Ehlis46132632016-12-21 12:22:11 -07008129 return skip;
8130}
8131
Tobin Ehlis022528b2016-12-29 12:22:32 -07008132static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8133 BUFFER_STATE *buffer_state) {
8134 UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXEDINDIRECT, DRAW_INDEXED_INDIRECT);
Tobin Ehlis46132632016-12-21 12:22:11 -07008135 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
8136}
8137
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008138VKAPI_ATTR void VKAPI_CALL
8139CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008140 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlis46132632016-12-21 12:22:11 -07008141 GLOBAL_CB_NODE *cb_state = nullptr;
8142 BUFFER_STATE *buffer_state = nullptr;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008143 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis46132632016-12-21 12:22:11 -07008144 bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
Tobin Ehlis022528b2016-12-29 12:22:32 -07008145 &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008146 lock.unlock();
Tobin Ehlis46132632016-12-21 12:22:11 -07008147 if (!skip) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13008148 dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
Tobin Ehlis46132632016-12-21 12:22:11 -07008149 lock.lock();
Tobin Ehlis022528b2016-12-29 12:22:32 -07008150 PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
Tobin Ehlis46132632016-12-21 12:22:11 -07008151 lock.unlock();
8152 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008153}
8154
Tobin Ehlis022528b2016-12-29 12:22:32 -07008155static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
8156 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
Jeremy Hayese2583052016-12-12 11:01:28 -07008157 return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VALIDATION_ERROR_01562,
8158 VALIDATION_ERROR_UNDEFINED);
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008159}
8160
Tobin Ehlis022528b2016-12-29 12:22:32 -07008161static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
8162 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCH);
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008163}
8164
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008165VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008166 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008167 GLOBAL_CB_NODE *cb_state = nullptr;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008168 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis022528b2016-12-29 12:22:32 -07008169 bool skip =
8170 PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008171 lock.unlock();
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008172 if (!skip) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13008173 dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008174 lock.lock();
Tobin Ehlis022528b2016-12-29 12:22:32 -07008175 PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008176 lock.unlock();
8177 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008178}
8179
Tobin Ehlis022528b2016-12-29 12:22:32 -07008180static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
8181 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
8182 BUFFER_STATE **buffer_state, const char *caller) {
8183 bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller,
Jeremy Hayese2583052016-12-12 11:01:28 -07008184 VALIDATION_ERROR_01569, VALIDATION_ERROR_UNDEFINED);
Tobin Ehlis4c54bde2016-12-21 12:37:16 -07008185 *buffer_state = getBufferState(dev_data, buffer);
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008186 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02547);
Tobin Ehlis4c54bde2016-12-21 12:37:16 -07008187 return skip;
8188}
8189
Tobin Ehlis022528b2016-12-29 12:22:32 -07008190static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8191 BUFFER_STATE *buffer_state) {
8192 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCHINDIRECT);
Tobin Ehlis4c54bde2016-12-21 12:37:16 -07008193 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
8194}
8195
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008196VKAPI_ATTR void VKAPI_CALL
8197CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008198 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlis4c54bde2016-12-21 12:37:16 -07008199 GLOBAL_CB_NODE *cb_state = nullptr;
8200 BUFFER_STATE *buffer_state = nullptr;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008201 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis205f0032016-12-29 11:39:10 -07008202 bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
Tobin Ehlis022528b2016-12-29 12:22:32 -07008203 &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008204 lock.unlock();
Tobin Ehlis4c54bde2016-12-21 12:37:16 -07008205 if (!skip) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13008206 dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
Tobin Ehlis4c54bde2016-12-21 12:37:16 -07008207 lock.lock();
Tobin Ehlis022528b2016-12-29 12:22:32 -07008208 PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
Tobin Ehlis4c54bde2016-12-21 12:37:16 -07008209 lock.unlock();
8210 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008211}
8212
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008213VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
8214 uint32_t regionCount, const VkBufferCopy *pRegions) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008215 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008216 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008217 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008218
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008219 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008220 auto src_buff_state = getBufferState(dev_data, srcBuffer);
8221 auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8222 if (cb_node && src_buff_state && dst_buff_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008223 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_state, "vkCmdCopyBuffer()", VALIDATION_ERROR_02531);
8224 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyBuffer()", VALIDATION_ERROR_02532);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008225 // Update bindings between buffers and cmd buffer
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008226 AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_state);
8227 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008228 // Validate that SRC & DST buffers have correct usage flags set
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008229 skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008230 VALIDATION_ERROR_01164, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008231 skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008232 VALIDATION_ERROR_01165, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008233
8234 std::function<bool()> function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008235 return ValidateBufferMemoryIsValid(dev_data, src_buff_state, "vkCmdCopyBuffer()");
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008236 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008237 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008238 function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008239 SetBufferMemoryValid(dev_data, dst_buff_state, true);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008240 return false;
8241 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008242 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008243
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008244 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
8245 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYBUFFER);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008246 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()", VALIDATION_ERROR_01172);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008247 } else {
8248 // Param_checker will flag errors on invalid objects, just assert here as debugging aid
8249 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008250 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008251 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008252 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008253 dev_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008254}
8255
Tobin Ehlis67883822016-07-06 11:23:05 -06008256static bool VerifySourceImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage srcImage,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008257 VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout,
8258 UNIQUE_VALIDATION_ERROR_CODE msgCode) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06008259 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008260
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008261 for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
8262 uint32_t layer = i + subLayers.baseArrayLayer;
8263 VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
8264 IMAGE_CMD_BUF_LAYOUT_NODE node;
Tobin Ehlis67883822016-07-06 11:23:05 -06008265 if (!FindLayout(cb_node, srcImage, sub, node)) {
8266 SetLayout(cb_node, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008267 continue;
8268 }
8269 if (node.layout != srcImageLayout) {
8270 // TODO: Improve log message in the next pass
8271 skip_call |=
8272 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8273 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
8274 "and doesn't match the current layout %s.",
8275 string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
8276 }
8277 }
8278 if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
8279 if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
Tobin Ehlis4d686262016-07-06 11:32:12 -06008280 // TODO : Can we deal with image node from the top of call tree and avoid map look-up here?
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008281 auto image_state = getImageState(dev_data, srcImage);
8282 if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
Tobin Ehlis4d686262016-07-06 11:32:12 -06008283 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
8284 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8285 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8286 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
8287 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008288 } else {
8289 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008290 msgCode, "DS", "Layout for input image is %s but can only be TRANSFER_SRC_OPTIMAL or GENERAL. %s",
8291 string_VkImageLayout(srcImageLayout), validation_error_map[msgCode]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008292 }
8293 }
8294 return skip_call;
8295}
8296
Tobin Ehlis67883822016-07-06 11:23:05 -06008297static bool VerifyDestImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage destImage,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008298 VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout,
8299 UNIQUE_VALIDATION_ERROR_CODE msgCode) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06008300 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008301
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008302 for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
8303 uint32_t layer = i + subLayers.baseArrayLayer;
8304 VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
8305 IMAGE_CMD_BUF_LAYOUT_NODE node;
Tobin Ehlis67883822016-07-06 11:23:05 -06008306 if (!FindLayout(cb_node, destImage, sub, node)) {
8307 SetLayout(cb_node, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008308 continue;
8309 }
8310 if (node.layout != destImageLayout) {
8311 skip_call |=
8312 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8313 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
8314 "doesn't match the current layout %s.",
8315 string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
8316 }
8317 }
8318 if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
8319 if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008320 auto image_state = getImageState(dev_data, destImage);
8321 if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
Tobin Ehlis4d686262016-07-06 11:32:12 -06008322 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
8323 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8324 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8325 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
8326 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008327 } else {
8328 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008329 msgCode, "DS", "Layout for output image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL. %s",
8330 string_VkImageLayout(destImageLayout), validation_error_map[msgCode]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008331 }
8332 }
8333 return skip_call;
8334}
8335
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008336static bool VerifyClearImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage image, VkImageSubresourceRange range,
Cort0ebd1082016-12-08 09:59:43 -08008337 VkImageLayout dest_image_layout, const char *func_name) {
8338 bool skip = false;
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008339
8340 VkImageSubresourceRange resolvedRange = range;
8341 ResolveRemainingLevelsLayers(dev_data, &resolvedRange, image);
8342
Cort0ebd1082016-12-08 09:59:43 -08008343 if (dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
8344 if (dest_image_layout == VK_IMAGE_LAYOUT_GENERAL) {
Cort830c7c12016-12-05 17:33:49 -08008345 auto image_state = getImageState(dev_data, image);
8346 if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
8347 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
Cort0ebd1082016-12-08 09:59:43 -08008348 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8349 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8350 "%s: Layout for cleared image should be TRANSFER_DST_OPTIMAL instead of GENERAL.", func_name);
Cort830c7c12016-12-05 17:33:49 -08008351 }
8352 } else {
Cort0ebd1082016-12-08 09:59:43 -08008353 UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_01086;
8354 if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
Cort830c7c12016-12-05 17:33:49 -08008355 error_code = VALIDATION_ERROR_01101;
8356 } else {
Cort0ebd1082016-12-08 09:59:43 -08008357 assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
Cort830c7c12016-12-05 17:33:49 -08008358 }
Cort0ebd1082016-12-08 09:59:43 -08008359 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8360 error_code, "DS", "%s: Layout for cleared image is %s but can only be "
8361 "TRANSFER_DST_OPTIMAL or GENERAL. %s",
8362 func_name, string_VkImageLayout(dest_image_layout), validation_error_map[error_code]);
Cort830c7c12016-12-05 17:33:49 -08008363 }
8364 }
8365
Karl Schultz537d8c22016-12-09 16:34:16 -07008366 for (uint32_t levelIdx = 0; levelIdx < resolvedRange.levelCount; ++levelIdx) {
8367 uint32_t level = levelIdx + resolvedRange.baseMipLevel;
8368 for (uint32_t layerIdx = 0; layerIdx < resolvedRange.layerCount; ++layerIdx) {
8369 uint32_t layer = layerIdx + resolvedRange.baseArrayLayer;
8370 VkImageSubresource sub = {resolvedRange.aspectMask, level, layer};
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008371 IMAGE_CMD_BUF_LAYOUT_NODE node;
8372 if (!FindLayout(cb_node, image, sub, node)) {
Cort0ebd1082016-12-08 09:59:43 -08008373 SetLayout(cb_node, image, sub, IMAGE_CMD_BUF_LAYOUT_NODE(dest_image_layout, dest_image_layout));
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008374 continue;
8375 }
Cort0ebd1082016-12-08 09:59:43 -08008376 if (node.layout != dest_image_layout) {
8377 UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_01085;
8378 if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
Cort830c7c12016-12-05 17:33:49 -08008379 error_code = VALIDATION_ERROR_01100;
8380 } else {
Cort0ebd1082016-12-08 09:59:43 -08008381 assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
Cort830c7c12016-12-05 17:33:49 -08008382 }
Cort0ebd1082016-12-08 09:59:43 -08008383 skip |=
8384 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8385 __LINE__, error_code, "DS", "%s: Cannot clear an image whose layout is %s and "
8386 "doesn't match the current layout %s. %s",
8387 func_name, string_VkImageLayout(dest_image_layout), string_VkImageLayout(node.layout),
8388 validation_error_map[error_code]);
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008389 }
8390 }
8391 }
8392
Cort0ebd1082016-12-08 09:59:43 -08008393 return skip;
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008394}
8395
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008396// Test if two VkExtent3D structs are equivalent
8397static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
8398 bool result = true;
8399 if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
8400 (extent->depth != other_extent->depth)) {
8401 result = false;
8402 }
8403 return result;
8404}
8405
8406// Returns the image extent of a specific subresource.
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008407static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_STATE *img, const VkImageSubresourceLayers *subresource) {
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008408 const uint32_t mip = subresource->mipLevel;
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008409 VkExtent3D extent = img->createInfo.extent;
Gregory Mitranoc7302232016-09-18 23:48:29 -04008410 extent.width = std::max(1U, extent.width >> mip);
8411 extent.height = std::max(1U, extent.height >> mip);
8412 extent.depth = std::max(1U, extent.depth >> mip);
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008413 return extent;
8414}
8415
8416// Test if the extent argument has all dimensions set to 0.
8417static inline bool IsExtentZero(const VkExtent3D *extent) {
8418 return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
8419}
8420
8421// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008422static inline VkExtent3D GetScaledItg(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img) {
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008423 // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
8424 VkExtent3D granularity = { 0, 0, 0 };
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008425 auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
8426 if (pPool) {
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008427 granularity = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
8428 if (vk_format_is_compressed(img->createInfo.format)) {
8429 auto block_size = vk_format_compressed_block_size(img->createInfo.format);
8430 granularity.width *= block_size.width;
8431 granularity.height *= block_size.height;
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008432 }
8433 }
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008434 return granularity;
8435}
8436
8437// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
8438static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
8439 bool valid = true;
8440 if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) ||
8441 (vk_safe_modulo(extent->height, granularity->height) != 0)) {
8442 valid = false;
8443 }
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008444 return valid;
8445}
8446
8447// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008448static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
8449 const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member) {
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008450 bool skip = false;
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008451 VkExtent3D offset_extent = {};
8452 offset_extent.width = static_cast<uint32_t>(abs(offset->x));
8453 offset_extent.height = static_cast<uint32_t>(abs(offset->y));
8454 offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
8455 if (IsExtentZero(granularity)) {
8456 // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
8457 if (IsExtentZero(&offset_extent) == false) {
8458 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8459 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8460 "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) "
8461 "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8462 function, i, member, offset->x, offset->y, offset->z);
8463 }
8464 } else {
8465 // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
8466 // integer multiples of the image transfer granularity.
8467 if (IsExtentAligned(&offset_extent, granularity) == false) {
8468 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8469 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8470 "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer "
8471 "multiples of this command buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
8472 function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
8473 granularity->depth);
8474 }
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008475 }
8476 return skip;
8477}
8478
8479// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008480static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
8481 const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
8482 const uint32_t i, const char *function, const char *member) {
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008483 bool skip = false;
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008484 if (IsExtentZero(granularity)) {
8485 // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
8486 // subresource extent.
8487 if (IsExtentEqual(extent, subresource_extent) == false) {
8488 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8489 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8490 "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
8491 "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8492 function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
8493 subresource_extent->height, subresource_extent->depth);
8494 }
8495 } else {
8496 // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
8497 // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
8498 // subresource extent dimensions.
8499 VkExtent3D offset_extent_sum = {};
8500 offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
8501 offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
8502 offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
8503 if ((IsExtentAligned(extent, granularity) == false) && (IsExtentEqual(&offset_extent_sum, subresource_extent) == false)) {
8504 skip |=
8505 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008506 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008507 "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command buffer's "
8508 "queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
8509 "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
8510 function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
8511 granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
8512 subresource_extent->width, subresource_extent->height, subresource_extent->depth);
8513 }
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008514 }
8515 return skip;
8516}
8517
8518// Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008519static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value,
8520 const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008521 bool skip = false;
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008522 if (vk_safe_modulo(value, granularity) != 0) {
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008523 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8524 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008525 "%s: pRegion[%d].%s (%d) must be an even integer multiple of this command buffer's queue family image "
8526 "transfer granularity width (%d).",
8527 function, i, member, value, granularity);
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008528 }
8529 return skip;
8530}
8531
8532// Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008533static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value,
8534 const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008535 bool skip = false;
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008536 if (vk_safe_modulo(value, granularity) != 0) {
8537 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8538 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8539 "%s: pRegion[%d].%s (%" PRIdLEAST64
8540 ") must be an even integer multiple of this command buffer's queue family image transfer "
8541 "granularity width (%d).",
8542 function, i, member, value, granularity);
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008543 }
8544 return skip;
8545}
8546
8547// Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure
8548static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008549 const IMAGE_STATE *img, const VkImageCopy *region,
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008550 const uint32_t i, const char *function) {
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008551 bool skip = false;
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008552 VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8553 skip |= CheckItgOffset(dev_data, cb_node, &region->srcOffset, &granularity, i, function, "srcOffset");
8554 skip |= CheckItgOffset(dev_data, cb_node, &region->dstOffset, &granularity, i, function, "dstOffset");
8555 VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->dstSubresource);
8556 skip |= CheckItgExtent(dev_data, cb_node, &region->extent, &region->dstOffset, &granularity, &subresource_extent, i, function,
8557 "extent");
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008558 return skip;
8559}
8560
8561// Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure
8562static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008563 const IMAGE_STATE *img, const VkBufferImageCopy *region,
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008564 const uint32_t i, const char *function) {
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008565 bool skip = false;
Mark Lobodzinskia4937292016-12-09 11:20:23 -07008566 if (vk_format_is_compressed(img->createInfo.format) == true) {
8567 // TODO: Add granularity checking for compressed formats
8568
8569 // bufferRowLength must be a multiple of the compressed texel block width
8570 // bufferImageHeight must be a multiple of the compressed texel block height
8571 // all members of imageOffset must be a multiple of the corresponding dimensions of the compressed texel block
8572 // bufferOffset must be a multiple of the compressed texel block size in bytes
8573 // imageExtent.width must be a multiple of the compressed texel block width or (imageExtent.width + imageOffset.x)
8574 // must equal the image subresource width
8575 // imageExtent.height must be a multiple of the compressed texel block height or (imageExtent.height + imageOffset.y)
8576 // must equal the image subresource height
8577 // imageExtent.depth must be a multiple of the compressed texel block depth or (imageExtent.depth + imageOffset.z)
8578 // must equal the image subresource depth
8579 } else {
8580 VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8581 skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, granularity.width, i, function, "bufferOffset");
8582 skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, granularity.width, i, function, "bufferRowLength");
8583 skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, granularity.width, i, function, "bufferImageHeight");
8584 skip |= CheckItgOffset(dev_data, cb_node, &region->imageOffset, &granularity, i, function, "imageOffset");
8585 VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource);
8586 skip |= CheckItgExtent(dev_data, cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, i,
8587 function, "imageExtent");
8588 }
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008589 return skip;
8590}
8591
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008592VKAPI_ATTR void VKAPI_CALL
8593CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8594 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008595 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008596 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008597 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008598
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008599 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008600 auto src_image_state = getImageState(dev_data, srcImage);
8601 auto dst_image_state = getImageState(dev_data, dstImage);
8602 if (cb_node && src_image_state && dst_image_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008603 skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImage()", VALIDATION_ERROR_02533);
8604 skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyImage()", VALIDATION_ERROR_02534);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008605 // Update bindings between images and cmd buffer
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008606 AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8607 AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008608 // Validate that SRC & DST images have correct usage flags set
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008609 skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8610 VALIDATION_ERROR_01178, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8611 skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8612 VALIDATION_ERROR_01181, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008613 std::function<bool()> function = [=]() {
8614 return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImage()");
8615 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008616 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008617 function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008618 SetImageMemoryValid(dev_data, dst_image_state, true);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008619 return false;
8620 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008621 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008622
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008623 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
8624 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYIMAGE);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008625 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()", VALIDATION_ERROR_01194);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008626 for (uint32_t i = 0; i < regionCount; ++i) {
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008627 skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout,
8628 VALIDATION_ERROR_01180);
8629 skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout,
8630 VALIDATION_ERROR_01183);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008631 skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008632 "vkCmdCopyImage()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008633 }
Tobin Ehlis5611e922016-06-28 15:52:55 -06008634 } else {
8635 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008636 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008637 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008638 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008639 dev_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8640 pRegions);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008641}
8642
Mark Lobodzinski1850d072016-08-23 15:10:36 -06008643// Validate that an image's sampleCount matches the requirement for a specific API call
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008644static inline bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008645 const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
Mark Lobodzinski1850d072016-08-23 15:10:36 -06008646 bool skip = false;
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008647 if (image_state->createInfo.samples != sample_count) {
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008648 skip =
8649 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8650 reinterpret_cast<uint64_t &>(image_state->image), 0, msgCode, "DS",
8651 "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s. %s", location,
8652 reinterpret_cast<uint64_t &>(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
8653 string_VkSampleCountFlagBits(sample_count), validation_error_map[msgCode]);
Mark Lobodzinski1850d072016-08-23 15:10:36 -06008654 }
8655 return skip;
8656}
8657
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008658VKAPI_ATTR void VKAPI_CALL
8659CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8660 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008661 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008662 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008663 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008664
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008665 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008666 auto src_image_state = getImageState(dev_data, srcImage);
8667 auto dst_image_state = getImageState(dev_data, dstImage);
8668 if (cb_node && src_image_state && dst_image_state) {
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008669 skip_call |= ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage",
8670 VALIDATION_ERROR_02194);
8671 skip_call |= ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage",
8672 VALIDATION_ERROR_02195);
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008673 skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdBlitImage()", VALIDATION_ERROR_02539);
8674 skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdBlitImage()", VALIDATION_ERROR_02540);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008675 // Update bindings between images and cmd buffer
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008676 AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8677 AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008678 // Validate that SRC & DST images have correct usage flags set
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008679 skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8680 VALIDATION_ERROR_02182, "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8681 skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8682 VALIDATION_ERROR_02186, "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008683 std::function<bool()> function = [=]() {
8684 return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdBlitImage()");
8685 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008686 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008687 function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008688 SetImageMemoryValid(dev_data, dst_image_state, true);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008689 return false;
8690 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008691 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008692
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008693 skip_call |= ValidateCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
8694 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BLITIMAGE);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008695 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage()", VALIDATION_ERROR_01300);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008696 } else {
8697 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008698 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008699 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008700 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008701 dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8702 pRegions, filter);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008703}
8704
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008705VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8706 VkImage dstImage, VkImageLayout dstImageLayout,
8707 uint32_t regionCount, const VkBufferImageCopy *pRegions) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008708 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008709 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008710 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008711
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008712 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008713 auto src_buff_state = getBufferState(dev_data, srcBuffer);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008714 auto dst_image_state = getImageState(dev_data, dstImage);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008715 if (cb_node && src_buff_state && dst_image_state) {
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008716 skip_call |= ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT,
8717 "vkCmdCopyBufferToImage(): dstImage", VALIDATION_ERROR_01232);
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008718 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_state, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_02535);
8719 skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_02536);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008720 AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_state);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008721 AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008722 skip_call |=
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008723 ValidateBufferUsageFlags(dev_data, src_buff_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, VALIDATION_ERROR_01230,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008724 "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008725 skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008726 VALIDATION_ERROR_01231, "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
Dustin Graves8f1eab92016-04-05 09:41:17 -06008727 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008728 SetImageMemoryValid(dev_data, dst_image_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06008729 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008730 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008731 cb_node->validate_functions.push_back(function);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008732 function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_state, "vkCmdCopyBufferToImage()"); };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008733 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008734
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008735 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8736 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008737 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_01242);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008738 for (uint32_t i = 0; i < regionCount; ++i) {
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008739 skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout,
8740 VALIDATION_ERROR_01234);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008741 skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008742 "vkCmdCopyBufferToImage()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008743 }
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008744 } else {
8745 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008746 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008747 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008748 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008749 dev_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008750}
8751
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008752VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8753 VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8754 uint32_t regionCount, const VkBufferImageCopy *pRegions) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008755 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008756 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008757 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008758
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008759 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008760 auto src_image_state = getImageState(dev_data, srcImage);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008761 auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8762 if (cb_node && src_image_state && dst_buff_state) {
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008763 skip_call |= ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT,
8764 "vkCmdCopyImageToBuffer(): srcImage", VALIDATION_ERROR_01249);
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008765 skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_02537);
8766 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_02538);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008767 // Update bindings between buffer/image and cmd buffer
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008768 AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008769 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008770 // Validate that SRC image & DST buffer have correct usage flags set
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008771 skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008772 VALIDATION_ERROR_01248, "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8773 skip_call |=
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008774 ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01252,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008775 "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
Dustin Graves8f1eab92016-04-05 09:41:17 -06008776 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008777 return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImageToBuffer()");
Dustin Graves8f1eab92016-04-05 09:41:17 -06008778 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008779 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008780 function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008781 SetBufferMemoryValid(dev_data, dst_buff_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06008782 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008783 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008784 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008785
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008786 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8787 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008788 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_01260);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008789 for (uint32_t i = 0; i < regionCount; ++i) {
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008790 skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout,
8791 VALIDATION_ERROR_01251);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008792 skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, src_image_state, &pRegions[i], i,
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008793 "CmdCopyImageToBuffer");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008794 }
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008795 } else {
8796 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008797 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008798 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008799 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008800 dev_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008801}
8802
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008803VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8804 VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008805 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008806 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008807 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008808
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008809 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008810 auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8811 if (cb_node && dst_buff_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008812 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_02530);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008813 // Update bindings between buffer and cmd buffer
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008814 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008815 // Validate that DST buffer has correct usage flags set
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008816 skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008817 VALIDATION_ERROR_01146, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
Dustin Graves8f1eab92016-04-05 09:41:17 -06008818 std::function<bool()> function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008819 SetBufferMemoryValid(dev_data, dst_buff_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06008820 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008821 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008822 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008823
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008824 skip_call |= ValidateCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8825 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_UPDATEBUFFER);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008826 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdUpdateBuffer()", VALIDATION_ERROR_01155);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008827 } else {
8828 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008829 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008830 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008831 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008832 dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008833}
8834
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008835VKAPI_ATTR void VKAPI_CALL
8836CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008837 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008838 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008839 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008840
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008841 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008842 auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8843 if (cb_node && dst_buff_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008844 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdFillBuffer()", VALIDATION_ERROR_02529);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008845 // Update bindings between buffer and cmd buffer
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008846 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008847 // Validate that DST buffer has correct usage flags set
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008848 skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008849 VALIDATION_ERROR_01137, "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
Dustin Graves8f1eab92016-04-05 09:41:17 -06008850 std::function<bool()> function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008851 SetBufferMemoryValid(dev_data, dst_buff_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06008852 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008853 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008854 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008855
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008856 skip_call |= ValidateCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8857 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_FILLBUFFER);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008858 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdFillBuffer()", VALIDATION_ERROR_01142);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008859 } else {
8860 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008861 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008862 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008863 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008864 dev_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008865}
8866
Mark Lobodzinskia501de32017-01-12 11:14:35 -07008867// Returns true if sub_rect is entirely contained within rect
8868static inline bool ContainsRect(VkRect2D rect, VkRect2D sub_rect) {
8869 if ((sub_rect.offset.x < rect.offset.x) ||
8870 (sub_rect.offset.x + sub_rect.extent.width > rect.offset.x + rect.extent.width) ||
8871 (sub_rect.offset.y < rect.offset.y) ||
8872 (sub_rect.offset.y + sub_rect.extent.height > rect.offset.y + rect.extent.height))
8873 return false;
8874 return true;
8875}
8876
Mark Lobodzinski3513bec2017-01-18 15:14:29 -07008877bool PreCallValidateCmdClearAttachments(layer_data *dev_data, VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8878 const VkClearAttachment *pAttachments, uint32_t rectCount, const VkClearRect *pRects) {
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008879 GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
Mark Lobodzinski3513bec2017-01-18 15:14:29 -07008880 bool skip = false;
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008881 if (cb_node) {
8882 skip |= ValidateCmd(dev_data, cb_node, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8883 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_CLEARATTACHMENTS);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008884 // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008885 if (!hasDrawCmd(cb_node) && (cb_node->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8886 (cb_node->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008887 // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8888 // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008889 // call CmdClearAttachments. Otherwise this seems more like a performance warning.
8890 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8891 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t &>(commandBuffer), 0,
8892 DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8893 "vkCmdClearAttachments() issued on command buffer object 0x%p prior to any Draw Cmds."
8894 " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8895 commandBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008896 }
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008897 skip |= outsideRenderPass(dev_data, cb_node, "vkCmdClearAttachments()", VALIDATION_ERROR_01122);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008898 }
8899
8900 // Validate that attachment is in reference list of active subpass
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008901 if (cb_node->activeRenderPass) {
8902 const VkRenderPassCreateInfo *renderpass_create_info = cb_node->activeRenderPass->createInfo.ptr();
8903 const VkSubpassDescription *subpass_desc = &renderpass_create_info->pSubpasses[cb_node->activeSubpass];
8904 auto framebuffer = getFramebufferState(dev_data, cb_node->activeFramebuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008905
Chris Forbes7f498352016-10-25 16:32:54 +13008906 for (uint32_t i = 0; i < attachmentCount; i++) {
8907 auto clear_desc = &pAttachments[i];
Chris Forbes34a09d42016-10-26 12:40:31 +13008908 VkImageView image_view = VK_NULL_HANDLE;
8909
Chris Forbes7f498352016-10-25 16:32:54 +13008910 if (clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008911 if (clear_desc->colorAttachment >= subpass_desc->colorAttachmentCount) {
8912 skip |= log_msg(
Chris Forbesda6ae6f2016-09-09 14:36:33 +12008913 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Chris Forbese7629452016-10-27 09:32:34 +13008914 (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_01114, "DS",
8915 "vkCmdClearAttachments() color attachment index %d out of range for active subpass %d. %s",
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008916 clear_desc->colorAttachment, cb_node->activeSubpass, validation_error_map[VALIDATION_ERROR_01114]);
8917 } else if (subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment == VK_ATTACHMENT_UNUSED) {
8918 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8919 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
8920 DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8921 "vkCmdClearAttachments() color attachment index %d is VK_ATTACHMENT_UNUSED; ignored.",
8922 clear_desc->colorAttachment);
Mark Lobodzinskia501de32017-01-12 11:14:35 -07008923 } else {
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008924 image_view = framebuffer->createInfo
8925 .pAttachments[subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment];
Chris Forbes34a09d42016-10-26 12:40:31 +13008926 }
Chris Forbes7f498352016-10-25 16:32:54 +13008927 } else if (clear_desc->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008928 if (!subpass_desc->pDepthStencilAttachment || // Says no DS will be used in active subpass
8929 (subpass_desc->pDepthStencilAttachment->attachment ==
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008930 VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8931
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008932 skip |=
Mark Lobodzinskia501de32017-01-12 11:14:35 -07008933 log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8934 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
8935 DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8936 "vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored");
8937 } else {
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008938 image_view = framebuffer->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment];
Chris Forbes34a09d42016-10-26 12:40:31 +13008939 }
8940 }
8941
8942 if (image_view) {
8943 auto image_view_state = getImageViewState(dev_data, image_view);
Mark Lobodzinskia501de32017-01-12 11:14:35 -07008944 for (uint32_t j = 0; j < rectCount; j++) {
8945 // The rectangular region specified by a given element of pRects must be contained within the render area of the
8946 // current render pass instance
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008947 if (false == ContainsRect(cb_node->activeRenderPassBeginInfo.renderArea, pRects[j].rect)) {
8948 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8949 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, VALIDATION_ERROR_01115, "DS",
8950 "vkCmdClearAttachments(): The area defined by pRects[%d] is not contained in the area of "
8951 "the current render pass instance. %s",
8952 j, validation_error_map[VALIDATION_ERROR_01115]);
Mark Lobodzinskia501de32017-01-12 11:14:35 -07008953 }
8954 // The layers specified by a given element of pRects must be contained within every attachment that
8955 // pAttachments refers to
8956 auto attachment_base_array_layer = image_view_state->create_info.subresourceRange.baseArrayLayer;
8957 auto attachment_layer_count = image_view_state->create_info.subresourceRange.layerCount;
8958 if ((pRects[j].baseArrayLayer < attachment_base_array_layer) || pRects[j].layerCount > attachment_layer_count) {
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008959 skip |=
Mark Lobodzinskia501de32017-01-12 11:14:35 -07008960 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8961 0, __LINE__, VALIDATION_ERROR_01116, "DS",
8962 "vkCmdClearAttachments(): The layers defined in pRects[%d] are not contained in the layers of "
8963 "pAttachment[%d]. %s",
8964 j, i, validation_error_map[VALIDATION_ERROR_01116]);
8965 }
Chris Forbes34a09d42016-10-26 12:40:31 +13008966 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008967 }
8968 }
8969 }
Mark Lobodzinski3513bec2017-01-18 15:14:29 -07008970 return skip;
8971}
8972
8973VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8974 const VkClearAttachment *pAttachments, uint32_t rectCount,
8975 const VkClearRect *pRects) {
8976 bool skip = false;
8977 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8978 {
8979 std::lock_guard<std::mutex> lock(global_lock);
8980 skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8981 }
Mark Lobodzinski63bd3052017-01-18 14:55:58 -07008982 if (!skip)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008983 dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008984}
8985
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008986VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8987 VkImageLayout imageLayout, const VkClearColorValue *pColor,
8988 uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008989 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008990 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008991 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008992 // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
Tobin Ehlis5611e922016-06-28 15:52:55 -06008993
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008994 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008995 auto image_state = getImageState(dev_data, image);
8996 if (cb_node && image_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008997 skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearColorImage()", VALIDATION_ERROR_02527);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008998 AddCommandBufferBindingImage(dev_data, cb_node, image_state);
Dustin Graves8f1eab92016-04-05 09:41:17 -06008999 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009000 SetImageMemoryValid(dev_data, image_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06009001 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009002 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06009003 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06009004
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009005 skip_call |= ValidateCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
9006 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07009007 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage()", VALIDATION_ERROR_01096);
Tobin Ehlis5611e922016-06-28 15:52:55 -06009008 } else {
9009 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009010 }
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01009011 for (uint32_t i = 0; i < rangeCount; ++i) {
Cort830c7c12016-12-05 17:33:49 -08009012 skip_call |= VerifyClearImageLayout(dev_data, cb_node, image, pRanges[i], imageLayout, "vkCmdClearColorImage()");
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01009013 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009014 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009015 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009016 dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009017}
9018
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009019VKAPI_ATTR void VKAPI_CALL
9020CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
9021 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
9022 const VkImageSubresourceRange *pRanges) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009023 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009024 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009025 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009026 // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
Tobin Ehlis5611e922016-06-28 15:52:55 -06009027
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06009028 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009029 auto image_state = getImageState(dev_data, image);
9030 if (cb_node && image_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07009031 skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearDepthStencilImage()", VALIDATION_ERROR_02528);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009032 AddCommandBufferBindingImage(dev_data, cb_node, image_state);
Dustin Graves8f1eab92016-04-05 09:41:17 -06009033 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009034 SetImageMemoryValid(dev_data, image_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06009035 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009036 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06009037 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06009038
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009039 skip_call |= ValidateCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
9040 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07009041 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage()", VALIDATION_ERROR_01111);
Tobin Ehlis5611e922016-06-28 15:52:55 -06009042 } else {
9043 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009044 }
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01009045 for (uint32_t i = 0; i < rangeCount; ++i) {
Cort830c7c12016-12-05 17:33:49 -08009046 skip_call |= VerifyClearImageLayout(dev_data, cb_node, image, pRanges[i], imageLayout, "vkCmdClearDepthStencilImage()");
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01009047 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009048 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009049 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009050 dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009051}
9052
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009053VKAPI_ATTR void VKAPI_CALL
9054CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
9055 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009056 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009057 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009058 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06009059
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06009060 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009061 auto src_image_state = getImageState(dev_data, srcImage);
9062 auto dst_image_state = getImageState(dev_data, dstImage);
9063 if (cb_node && src_image_state && dst_image_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07009064 skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdResolveImage()", VALIDATION_ERROR_02541);
9065 skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdResolveImage()", VALIDATION_ERROR_02542);
Tobin Ehlis5611e922016-06-28 15:52:55 -06009066 // Update bindings between images and cmd buffer
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009067 AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
9068 AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
Dustin Graves8f1eab92016-04-05 09:41:17 -06009069 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009070 return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdResolveImage()");
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06009071 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06009072 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06009073 function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009074 SetImageMemoryValid(dev_data, dst_image_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06009075 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009076 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06009077 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06009078
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009079 skip_call |= ValidateCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
9080 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_RESOLVEIMAGE);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07009081 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage()", VALIDATION_ERROR_01335);
Tobin Ehlis5611e922016-06-28 15:52:55 -06009082 } else {
9083 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009084 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009085 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009086 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009087 dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
9088 pRegions);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009089}
9090
Michael Lentineb653eb22016-03-18 14:11:44 -05009091bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
9092 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9093 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9094 if (pCB) {
9095 pCB->eventToStageMap[event] = stageMask;
9096 }
9097 auto queue_data = dev_data->queueMap.find(queue);
9098 if (queue_data != dev_data->queueMap.end()) {
9099 queue_data->second.eventToStageMap[event] = stageMask;
9100 }
9101 return false;
9102}
9103
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009104VKAPI_ATTR void VKAPI_CALL
9105CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009106 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009107 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009108 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009109 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9110 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009111 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
9112 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETEVENT);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07009113 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_00238);
Tobin Ehlisb093da82017-01-19 12:05:27 -07009114 skip_call |=
9115 ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", VALIDATION_ERROR_00230, VALIDATION_ERROR_00231);
Tobin Ehlis1af17132016-10-20 14:17:21 -06009116 auto event_state = getEventNode(dev_data, event);
9117 if (event_state) {
9118 addCommandBufferBinding(&event_state->cb_bindings,
Tobin Ehlisb073d992016-07-07 16:47:10 -06009119 {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
Tobin Ehlis1af17132016-10-20 14:17:21 -06009120 event_state->cb_bindings.insert(pCB);
Tobin Ehlisb073d992016-07-07 16:47:10 -06009121 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009122 pCB->events.push_back(event);
Michael Lentine860b0fe2016-05-20 10:14:00 -05009123 if (!pCB->waitedEvents.count(event)) {
9124 pCB->writeEventsBeforeWait.push_back(event);
9125 }
Michael Lentineb653eb22016-03-18 14:11:44 -05009126 std::function<bool(VkQueue)> eventUpdate =
9127 std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
9128 pCB->eventUpdates.push_back(eventUpdate);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009129 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009130 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009131 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009132 dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009133}
9134
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009135VKAPI_ATTR void VKAPI_CALL
9136CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009137 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009138 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009139 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009140 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9141 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009142 skip_call |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
9143 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_RESETEVENT);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07009144 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_00249);
Tobin Ehlisb093da82017-01-19 12:05:27 -07009145 skip_call |=
9146 ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", VALIDATION_ERROR_00240, VALIDATION_ERROR_00241);
Tobin Ehlis1af17132016-10-20 14:17:21 -06009147 auto event_state = getEventNode(dev_data, event);
9148 if (event_state) {
9149 addCommandBufferBinding(&event_state->cb_bindings,
Tobin Ehlisb073d992016-07-07 16:47:10 -06009150 {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
Tobin Ehlis1af17132016-10-20 14:17:21 -06009151 event_state->cb_bindings.insert(pCB);
Tobin Ehlisb073d992016-07-07 16:47:10 -06009152 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009153 pCB->events.push_back(event);
Michael Lentine860b0fe2016-05-20 10:14:00 -05009154 if (!pCB->waitedEvents.count(event)) {
9155 pCB->writeEventsBeforeWait.push_back(event);
9156 }
Tobin Ehlisb093da82017-01-19 12:05:27 -07009157 // TODO : Add check for VALIDATION_ERROR_00226
Michael Lentineb653eb22016-03-18 14:11:44 -05009158 std::function<bool(VkQueue)> eventUpdate =
9159 std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
9160 pCB->eventUpdates.push_back(eventUpdate);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009161 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009162 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009163 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009164 dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009165}
9166
Alex Smithc4659e42017-01-10 09:51:22 +00009167static bool TransitionImageAspectLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkImageMemoryBarrier *mem_barrier,
9168 uint32_t level, uint32_t layer, VkImageAspectFlags aspect)
9169{
9170 if (!(mem_barrier->subresourceRange.aspectMask & aspect)) {
9171 return false;
9172 }
9173 VkImageSubresource sub = {aspect, level, layer};
9174 IMAGE_CMD_BUF_LAYOUT_NODE node;
9175 if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
9176 SetLayout(pCB, mem_barrier->image, sub,
9177 IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
9178 return false;
9179 }
9180 bool skip = false;
9181 if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
9182 // TODO: Set memory invalid which is in mem_tracker currently
9183 } else if (node.layout != mem_barrier->oldLayout) {
9184 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9185 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9186 "You cannot transition the layout of aspect %d from %s when current layout is %s.",
9187 aspect, string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
9188 }
9189 SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
9190 return skip;
9191}
9192
Mark Lobodzinski0c5e8042016-12-12 08:33:13 -07009193// TODO: Separate validation and layout state updates
Dustin Graves8f1eab92016-04-05 09:41:17 -06009194static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
9195 const VkImageMemoryBarrier *pImgMemBarriers) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009196 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9197 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
Dustin Graves8f1eab92016-04-05 09:41:17 -06009198 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009199 uint32_t levelCount = 0;
9200 uint32_t layerCount = 0;
9201
9202 for (uint32_t i = 0; i < memBarrierCount; ++i) {
9203 auto mem_barrier = &pImgMemBarriers[i];
9204 if (!mem_barrier)
9205 continue;
9206 // TODO: Do not iterate over every possibility - consolidate where
9207 // possible
9208 ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
9209
9210 for (uint32_t j = 0; j < levelCount; j++) {
9211 uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
9212 for (uint32_t k = 0; k < layerCount; k++) {
9213 uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
Alex Smithc4659e42017-01-10 09:51:22 +00009214 skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_COLOR_BIT);
9215 skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_DEPTH_BIT);
9216 skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_STENCIL_BIT);
9217 skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_METADATA_BIT);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009218 }
9219 }
9220 }
9221 return skip;
9222}
9223
9224// Print readable FlagBits in FlagMask
Dustin Graves8f1eab92016-04-05 09:41:17 -06009225static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009226 std::string result;
9227 std::string separator;
9228
9229 if (accessMask == 0) {
9230 result = "[None]";
9231 } else {
9232 result = "[";
9233 for (auto i = 0; i < 32; i++) {
9234 if (accessMask & (1 << i)) {
9235 result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
9236 separator = " | ";
9237 }
9238 }
9239 result = result + "]";
9240 }
9241 return result;
9242}
9243
9244// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
9245// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
9246// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
Dustin Graves8f1eab92016-04-05 09:41:17 -06009247static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
9248 const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
9249 const char *type) {
9250 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009251
9252 if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
Chris Forbes9b21a852016-05-06 18:07:09 +12009253 if (accessMask & ~(required_bit | optional_bits)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009254 // TODO: Verify against Valid Use
9255 skip_call |=
9256 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Tobin Ehlis6fb205c2016-05-23 13:45:36 -06009257 DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009258 type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
9259 }
9260 } else {
9261 if (!required_bit) {
Michael Lentine02508232016-04-13 17:12:57 -05009262 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009263 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
9264 "%s when layout is %s, unless the app has previously added a "
9265 "barrier for this transition.",
9266 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
9267 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
9268 } else {
9269 std::string opt_bits;
9270 if (optional_bits != 0) {
9271 std::stringstream ss;
9272 ss << optional_bits;
9273 opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
9274 }
Michael Lentine02508232016-04-13 17:12:57 -05009275 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009276 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
9277 "layout is %s, unless the app has previously added a barrier for "
9278 "this transition.",
9279 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
9280 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
9281 }
9282 }
9283 return skip_call;
9284}
9285
Dustin Graves8f1eab92016-04-05 09:41:17 -06009286static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
9287 const VkImageLayout &layout, const char *type) {
9288 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009289 switch (layout) {
9290 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
9291 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
Jan-Harald Fredriksendef60332016-10-21 15:14:16 +02009292 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009293 break;
9294 }
9295 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
9296 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
Jan-Harald Fredriksendef60332016-10-21 15:14:16 +02009297 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009298 break;
9299 }
9300 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
9301 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
9302 break;
9303 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009304 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
9305 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
Jan-Harald Fredriksendef60332016-10-21 15:14:16 +02009306 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
9307 VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009308 break;
9309 }
9310 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
9311 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
9312 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
9313 break;
9314 }
9315 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
9316 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
9317 break;
9318 }
Tony Barbourc0676ef2016-09-29 13:41:49 -06009319 case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: {
9320 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_MEMORY_READ_BIT, 0, type);
9321 break;
9322 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009323 case VK_IMAGE_LAYOUT_UNDEFINED: {
9324 if (accessMask != 0) {
9325 // TODO: Verify against Valid Use section spec
9326 skip_call |=
9327 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Tobin Ehlis6fb205c2016-05-23 13:45:36 -06009328 DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009329 type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
9330 }
9331 break;
9332 }
9333 case VK_IMAGE_LAYOUT_GENERAL:
9334 default: { break; }
9335 }
9336 return skip_call;
9337}
9338
Dustin Graves8f1eab92016-04-05 09:41:17 -06009339static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
9340 const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
9341 const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
9342 const VkImageMemoryBarrier *pImageMemBarriers) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009343 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009344 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9345 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9346 if (pCB->activeRenderPass && memBarrierCount) {
Chris Forbesa4937a72016-05-06 16:31:14 +12009347 if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009348 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9349 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
9350 "with no self dependency specified.",
9351 funcName, pCB->activeSubpass);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009352 }
9353 }
9354 for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
9355 auto mem_barrier = &pImageMemBarriers[i];
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009356 auto image_data = getImageState(dev_data, mem_barrier->image);
Tobin Ehlis1c9c55f2016-06-02 11:49:22 -06009357 if (image_data) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009358 uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
9359 uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
Tobin Ehlis1c9c55f2016-06-02 11:49:22 -06009360 if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009361 // srcQueueFamilyIndex and dstQueueFamilyIndex must both
9362 // be VK_QUEUE_FAMILY_IGNORED
9363 if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009364 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9365 __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9366 "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
9367 "VK_SHARING_MODE_CONCURRENT. Src and dst "
9368 "queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
9369 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009370 }
9371 } else {
9372 // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
9373 // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
9374 // or both be a valid queue family
9375 if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
9376 (src_q_f_index != dst_q_f_index)) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009377 skip |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009378 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9379 DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
9380 "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
9381 "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
9382 "must be.",
9383 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
9384 } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
Tobin Ehlise54be7b2016-04-11 14:49:55 -06009385 ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
9386 (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009387 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9388 __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9389 "%s: Image 0x%" PRIx64 " was created with sharingMode "
9390 "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
9391 " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
9392 "queueFamilies crated for this device.",
9393 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index, dst_q_f_index,
9394 dev_data->phys_dev_properties.queue_family_properties.size());
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009395 }
9396 }
9397 }
9398
9399 if (mem_barrier) {
Tony Barbour14740bb2016-10-04 12:03:50 -06009400 if (mem_barrier->oldLayout != mem_barrier->newLayout) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009401 skip |=
Tony Barbour14740bb2016-10-04 12:03:50 -06009402 ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009403 skip |=
Tony Barbour14740bb2016-10-04 12:03:50 -06009404 ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
9405 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009406 if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009407 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9408 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
9409 "PREINITIALIZED.",
9410 funcName);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009411 }
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009412 auto image_data = getImageState(dev_data, mem_barrier->image);
Jamie Madill2b6b8d52016-04-04 15:09:51 -04009413 VkFormat format = VK_FORMAT_UNDEFINED;
9414 uint32_t arrayLayers = 0, mipLevels = 0;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009415 bool imageFound = false;
Tobin Ehlis1c9c55f2016-06-02 11:49:22 -06009416 if (image_data) {
9417 format = image_data->createInfo.format;
9418 arrayLayers = image_data->createInfo.arrayLayers;
9419 mipLevels = image_data->createInfo.mipLevels;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009420 imageFound = true;
9421 } else if (dev_data->device_extensions.wsi_enabled) {
Tobin Ehlis969a5262016-06-02 12:13:32 -06009422 auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
9423 if (imageswap_data) {
Tobin Ehlis4e380592016-06-02 12:41:47 -06009424 auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
9425 if (swapchain_data) {
9426 format = swapchain_data->createInfo.imageFormat;
9427 arrayLayers = swapchain_data->createInfo.imageArrayLayers;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009428 mipLevels = 1;
9429 imageFound = true;
9430 }
9431 }
9432 }
9433 if (imageFound) {
Tobin Ehlisd7e548b2016-07-19 17:39:34 -06009434 auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009435 skip |= ValidateImageAspectMask(dev_data, image_data->image, format, aspect_mask, funcName);
Michael Lentine87e44e02016-03-18 14:49:09 -05009436 int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
9437 ? 1
9438 : mem_barrier->subresourceRange.layerCount;
9439 if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009440 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9441 __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
9442 "baseArrayLayer (%d) and layerCount (%d) be less "
9443 "than or equal to the total number of layers (%d).",
9444 funcName, mem_barrier->subresourceRange.baseArrayLayer,
9445 mem_barrier->subresourceRange.layerCount, arrayLayers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009446 }
Michael Lentine87e44e02016-03-18 14:49:09 -05009447 int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
9448 ? 1
9449 : mem_barrier->subresourceRange.levelCount;
9450 if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009451 skip |= log_msg(
9452 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9453 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
9454 "(%d) and levelCount (%d) be less than or equal to "
9455 "the total number of levels (%d).",
9456 funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount, mipLevels);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009457 }
9458 }
9459 }
9460 }
9461 for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
9462 auto mem_barrier = &pBufferMemBarriers[i];
9463 if (pCB->activeRenderPass) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009464 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9465 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009466 }
9467 if (!mem_barrier)
9468 continue;
9469
9470 // Validate buffer barrier queue family indices
9471 if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
Tobin Ehlise54be7b2016-04-11 14:49:55 -06009472 mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009473 (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
Tobin Ehlise54be7b2016-04-11 14:49:55 -06009474 mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009475 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9476 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9477 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
9478 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
9479 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9480 dev_data->phys_dev_properties.queue_family_properties.size());
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009481 }
9482
Tobin Ehlis4668dce2016-11-16 09:30:23 -07009483 auto buffer_state = getBufferState(dev_data, mem_barrier->buffer);
9484 if (buffer_state) {
9485 auto buffer_size = buffer_state->requirements.size;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009486 if (mem_barrier->offset >= buffer_size) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009487 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9488 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64
9489 " which is not less than total size 0x%" PRIx64 ".",
9490 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9491 reinterpret_cast<const uint64_t &>(mem_barrier->offset),
9492 reinterpret_cast<const uint64_t &>(buffer_size));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009493 } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009494 skip |= log_msg(
Tobin Ehlisf263ba42016-04-05 13:33:00 -06009495 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mark Muelleraab36502016-05-03 13:17:29 -06009496 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
9497 " whose sum is greater than total size 0x%" PRIx64 ".",
Tobin Ehlisf263ba42016-04-05 13:33:00 -06009498 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9499 reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
9500 reinterpret_cast<const uint64_t &>(buffer_size));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009501 }
9502 }
9503 }
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009504 return skip;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009505}
9506
Chris Forbesf321eba2016-03-31 11:22:37 +13009507bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
Michael Lentineb653eb22016-03-18 14:11:44 -05009508 bool skip_call = false;
9509 VkPipelineStageFlags stageMask = 0;
9510 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9511 for (uint32_t i = 0; i < eventCount; ++i) {
Chris Forbesf321eba2016-03-31 11:22:37 +13009512 auto event = pCB->events[firstEventIndex + i];
Michael Lentineb653eb22016-03-18 14:11:44 -05009513 auto queue_data = dev_data->queueMap.find(queue);
9514 if (queue_data == dev_data->queueMap.end())
9515 return false;
Chris Forbesf321eba2016-03-31 11:22:37 +13009516 auto event_data = queue_data->second.eventToStageMap.find(event);
Michael Lentineb653eb22016-03-18 14:11:44 -05009517 if (event_data != queue_data->second.eventToStageMap.end()) {
9518 stageMask |= event_data->second;
9519 } else {
Tobin Ehliscab6b7d2016-07-07 16:47:10 -06009520 auto global_event_data = getEventNode(dev_data, event);
9521 if (!global_event_data) {
Michael Lentineb653eb22016-03-18 14:11:44 -05009522 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
Chris Forbes0fa92212016-03-31 11:47:29 +13009523 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
9524 "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
Chris Forbesf321eba2016-03-31 11:22:37 +13009525 reinterpret_cast<const uint64_t &>(event));
Michael Lentineb653eb22016-03-18 14:11:44 -05009526 } else {
Tobin Ehliscab6b7d2016-07-07 16:47:10 -06009527 stageMask |= global_event_data->stageMask;
Michael Lentineb653eb22016-03-18 14:11:44 -05009528 }
9529 }
9530 }
Michael Lentine860b0fe2016-05-20 10:14:00 -05009531 // TODO: Need to validate that host_bit is only set if set event is called
9532 // but set event can be called at any time.
9533 if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
9534 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009535 VALIDATION_ERROR_00254, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
9536 "using srcStageMask 0x%X which must be the bitwise "
9537 "OR of the stageMask parameters used in calls to "
9538 "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
9539 "used with vkSetEvent but instead is 0x%X. %s",
9540 sourceStageMask, stageMask, validation_error_map[VALIDATION_ERROR_00254]);
Michael Lentineb653eb22016-03-18 14:11:44 -05009541 }
9542 return skip_call;
9543}
9544
Mark Lobodzinskie6ce3f62016-12-10 10:53:34 -07009545// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
9546static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
9547 {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
9548 {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
9549 {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
9550 {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9551 {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9552 {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9553 {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9554 {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9555 {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
9556 {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
9557 {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
9558 {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
9559 {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
9560 {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
9561
9562static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
9563 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
9564 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
9565 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
9566 VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
9567 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
9568 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
9569 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
9570 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
9571 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
9572 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
9573 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
9574 VK_PIPELINE_STAGE_TRANSFER_BIT,
9575 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
9576
9577bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
9578 VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
9579 UNIQUE_VALIDATION_ERROR_CODE error_code) {
9580 bool skip = false;
9581 // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
9582 for (const auto &item : stage_flag_bit_array) {
9583 if (stage_mask & item) {
9584 if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
9585 skip |=
9586 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9587 reinterpret_cast<uint64_t &>(command_buffer), __LINE__, error_code, "DL",
9588 "%s(): %s flag %s is not compatible with the queue family properties of this "
9589 "command buffer. %s",
9590 function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)),
9591 validation_error_map[error_code]);
9592 }
9593 }
9594 }
9595 return skip;
9596}
9597
9598bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
9599 VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
9600 const char *function, UNIQUE_VALIDATION_ERROR_CODE error_code) {
9601 bool skip = false;
9602 uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
9603 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
9604 auto physical_device_state = getPhysicalDeviceState(instance_data, dev_data->physical_device);
9605
9606 // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
9607 // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
9608 // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
9609
9610 if (queue_family_index < physical_device_state->queue_family_properties.size()) {
9611 VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
9612
9613 if ((source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
9614 skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
9615 function, "srcStageMask", error_code);
9616 }
9617 if ((dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
9618 skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
9619 function, "dstStageMask", error_code);
9620 }
9621 }
9622 return skip;
9623}
9624
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009625VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
9626 VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
9627 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9628 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9629 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9630 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009631 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009632 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009633 GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
9634 if (cb_state) {
9635 skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, "vkCmdWaitEvents",
9636 VALIDATION_ERROR_02510);
Tobin Ehlisb093da82017-01-19 12:05:27 -07009637 skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_02067,
9638 VALIDATION_ERROR_02069);
9639 skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_02068,
9640 VALIDATION_ERROR_02070);
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009641 auto first_event_index = cb_state->events.size();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009642 for (uint32_t i = 0; i < eventCount; ++i) {
Tobin Ehlis1af17132016-10-20 14:17:21 -06009643 auto event_state = getEventNode(dev_data, pEvents[i]);
9644 if (event_state) {
9645 addCommandBufferBinding(&event_state->cb_bindings,
Tobin Ehlisb073d992016-07-07 16:47:10 -06009646 {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT},
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009647 cb_state);
9648 event_state->cb_bindings.insert(cb_state);
Tobin Ehlisb073d992016-07-07 16:47:10 -06009649 }
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009650 cb_state->waitedEvents.insert(pEvents[i]);
9651 cb_state->events.push_back(pEvents[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009652 }
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009653 std::function<bool(VkQueue)> event_update =
9654 std::bind(validateEventStageMask, std::placeholders::_1, cb_state, eventCount, first_event_index, sourceStageMask);
9655 cb_state->eventUpdates.push_back(event_update);
9656 if (cb_state->state == CB_RECORDING) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009657 skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
9658 UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_WAITEVENTS);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009659 } else {
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009660 skip |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009661 }
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009662 skip |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9663 skip |= ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
9664 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009665 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009666 lock.unlock();
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009667 if (!skip)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009668 dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
9669 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9670 imageMemoryBarrierCount, pImageMemoryBarriers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009671}
9672
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009673VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
9674 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
9675 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9676 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9677 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9678 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009679 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009680 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009681 GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
9682 if (cb_state) {
9683 skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, srcStageMask, dstStageMask, "vkCmdPipelineBarrier",
9684 VALIDATION_ERROR_02513);
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009685 skip |= ValidateCmd(dev_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
Tobin Ehlisb093da82017-01-19 12:05:27 -07009686 skip |= ValidateStageMaskGsTsEnables(dev_data, srcStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_00265,
9687 VALIDATION_ERROR_00267);
9688 skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_00266,
9689 VALIDATION_ERROR_00268);
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009690 UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_PIPELINEBARRIER);
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009691 skip |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9692 skip |= ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers,
9693 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009694 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009695 lock.unlock();
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009696 if (!skip)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009697 dev_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
9698 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9699 imageMemoryBarrierCount, pImageMemoryBarriers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009700}
9701
Michael Lentine5627e692016-05-20 17:45:02 -05009702bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
9703 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9704 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9705 if (pCB) {
9706 pCB->queryToStateMap[object] = value;
9707 }
9708 auto queue_data = dev_data->queueMap.find(queue);
9709 if (queue_data != dev_data->queueMap.end()) {
9710 queue_data->second.queryToStateMap[object] = value;
9711 }
9712 return false;
9713}
9714
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009715VKAPI_ATTR void VKAPI_CALL
9716CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009717 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009718 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009719 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009720 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9721 if (pCB) {
9722 QueryObject query = {queryPool, slot};
9723 pCB->activeQueries.insert(query);
9724 if (!pCB->startedQueries.count(query)) {
9725 pCB->startedQueries.insert(query);
9726 }
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009727 skip_call |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
9728 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BEGINQUERY);
Tobin Ehlis2e8f5322016-07-08 14:22:01 -06009729 addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9730 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009731 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009732 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009733 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009734 dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009735}
9736
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009737VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009738 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009739 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009740 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009741 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9742 if (pCB) {
9743 QueryObject query = {queryPool, slot};
9744 if (!pCB->activeQueries.count(query)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009745 skip_call |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009746 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009747 VALIDATION_ERROR_01041, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d. %s",
9748 (uint64_t)(queryPool), slot, validation_error_map[VALIDATION_ERROR_01041]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009749 } else {
9750 pCB->activeQueries.erase(query);
9751 }
Karl Schultz63c1eb52016-05-24 13:49:59 -06009752 std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
Michael Lentine5627e692016-05-20 17:45:02 -05009753 pCB->queryUpdates.push_back(queryUpdate);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009754 if (pCB->state == CB_RECORDING) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009755 skip_call |= ValidateCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
9756 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_ENDQUERY);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009757 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009758 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009759 }
Tobin Ehlis2e8f5322016-07-08 14:22:01 -06009760 addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9761 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009762 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009763 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009764 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009765 dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009766}
9767
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009768VKAPI_ATTR void VKAPI_CALL
9769CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009770 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009771 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009772 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009773 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9774 if (pCB) {
9775 for (uint32_t i = 0; i < queryCount; i++) {
9776 QueryObject query = {queryPool, firstQuery + i};
9777 pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
Karl Schultz63c1eb52016-05-24 13:49:59 -06009778 std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
Michael Lentine5627e692016-05-20 17:45:02 -05009779 pCB->queryUpdates.push_back(queryUpdate);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009780 }
9781 if (pCB->state == CB_RECORDING) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009782 skip_call |= ValidateCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
9783 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_RESETQUERYPOOL);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009784 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009785 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009786 }
Mike Weiblen6daea5b2016-12-19 20:41:58 -07009787 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetQueryPool()", VALIDATION_ERROR_01025);
Tobin Ehlis2e8f5322016-07-08 14:22:01 -06009788 addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9789 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009790 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009791 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009792 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009793 dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009794}
9795
Michael Lentine5627e692016-05-20 17:45:02 -05009796bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
9797 bool skip_call = false;
9798 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
9799 auto queue_data = dev_data->queueMap.find(queue);
9800 if (queue_data == dev_data->queueMap.end())
9801 return false;
9802 for (uint32_t i = 0; i < queryCount; i++) {
9803 QueryObject query = {queryPool, firstQuery + i};
9804 auto query_data = queue_data->second.queryToStateMap.find(query);
9805 bool fail = false;
9806 if (query_data != queue_data->second.queryToStateMap.end()) {
9807 if (!query_data->second) {
9808 fail = true;
9809 }
9810 } else {
9811 auto global_query_data = dev_data->queryToStateMap.find(query);
9812 if (global_query_data != dev_data->queryToStateMap.end()) {
9813 if (!global_query_data->second) {
9814 fail = true;
9815 }
9816 } else {
9817 fail = true;
9818 }
9819 }
9820 if (fail) {
9821 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9822 DRAWSTATE_INVALID_QUERY, "DS",
9823 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
9824 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
9825 }
9826 }
9827 return skip_call;
9828}
9829
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009830VKAPI_ATTR void VKAPI_CALL
9831CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
9832 VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009833 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009834 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009835 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06009836
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06009837 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07009838 auto dst_buff_state = getBufferState(dev_data, dstBuffer);
9839 if (cb_node && dst_buff_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07009840 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_02526);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06009841 // Update bindings between buffer and cmd buffer
Tobin Ehlis4668dce2016-11-16 09:30:23 -07009842 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06009843 // Validate that DST buffer has correct usage flags set
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07009844 skip_call |=
Tobin Ehlis4668dce2016-11-16 09:30:23 -07009845 ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01066,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07009846 "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
Dustin Graves8f1eab92016-04-05 09:41:17 -06009847 std::function<bool()> function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07009848 SetBufferMemoryValid(dev_data, dst_buff_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06009849 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009850 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06009851 cb_node->validate_functions.push_back(function);
Michael Lentine5627e692016-05-20 17:45:02 -05009852 std::function<bool(VkQueue)> queryUpdate =
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06009853 std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
9854 cb_node->queryUpdates.push_back(queryUpdate);
9855 if (cb_node->state == CB_RECORDING) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009856 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
9857 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009858 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009859 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009860 }
Mike Weiblen6daea5b2016-12-19 20:41:58 -07009861 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_01074);
Tobin Ehlis2e8f5322016-07-08 14:22:01 -06009862 addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9863 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06009864 } else {
9865 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009866 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009867 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009868 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009869 dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
9870 stride, flags);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009871}
9872
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009873VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
9874 VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
9875 const void *pValues) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009876 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009877 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009878 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009879 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9880 if (pCB) {
9881 if (pCB->state == CB_RECORDING) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009882 skip_call |= ValidateCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
9883 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_PUSHCONSTANTS);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009884 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009885 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009886 }
9887 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06009888 skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
Karl Schultzfc8eaf12016-05-06 13:56:42 -06009889 if (0 == stageFlags) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009890 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009891 VALIDATION_ERROR_00996, "DS", "vkCmdPushConstants() call has no stageFlags set. %s",
9892 validation_error_map[VALIDATION_ERROR_00996]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009893 }
Karl Schultzfc8eaf12016-05-06 13:56:42 -06009894
Karl Schultzc81037d2016-05-12 08:11:23 -06009895 // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
Tobin Ehlisc1d9be12016-10-13 10:18:18 -06009896 auto pipeline_layout = getPipelineLayout(dev_data, layout);
Tobin Ehlisfbc847c2016-07-18 16:22:03 -06009897 // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
9898 // contained in the pipeline ranges.
9899 // Build a {start, end} span list for ranges with matching stage flags.
9900 const auto &ranges = pipeline_layout->push_constant_ranges;
9901 struct span {
9902 uint32_t start;
9903 uint32_t end;
9904 };
9905 std::vector<span> spans;
9906 spans.reserve(ranges.size());
9907 for (const auto &iter : ranges) {
9908 if (iter.stageFlags == stageFlags) {
9909 spans.push_back({iter.offset, iter.offset + iter.size});
9910 }
9911 }
9912 if (spans.size() == 0) {
9913 // There were no ranges that matched the stageFlags.
9914 skip_call |=
9915 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009916 VALIDATION_ERROR_00988, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
9917 "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ". %s",
9918 (uint32_t)stageFlags, (uint64_t)layout, validation_error_map[VALIDATION_ERROR_00988]);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06009919 } else {
Tobin Ehlisfbc847c2016-07-18 16:22:03 -06009920 // Sort span list by start value.
9921 struct comparer {
9922 bool operator()(struct span i, struct span j) { return i.start < j.start; }
9923 } my_comparer;
9924 std::sort(spans.begin(), spans.end(), my_comparer);
9925
9926 // Examine two spans at a time.
9927 std::vector<span>::iterator current = spans.begin();
9928 std::vector<span>::iterator next = current + 1;
9929 while (next != spans.end()) {
9930 if (current->end < next->start) {
9931 // There is a gap; cannot coalesce. Move to the next two spans.
9932 ++current;
9933 ++next;
9934 } else {
9935 // Coalesce the two spans. The start of the next span
9936 // is within the current span, so pick the larger of
9937 // the end values to extend the current span.
9938 // Then delete the next span and set next to the span after it.
9939 current->end = max(current->end, next->end);
9940 next = spans.erase(next);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06009941 }
9942 }
Karl Schultzc81037d2016-05-12 08:11:23 -06009943
Tobin Ehlisfbc847c2016-07-18 16:22:03 -06009944 // Now we can check if the incoming range is within any of the spans.
9945 bool contained_in_a_range = false;
9946 for (uint32_t i = 0; i < spans.size(); ++i) {
9947 if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
9948 contained_in_a_range = true;
9949 break;
Karl Schultzc81037d2016-05-12 08:11:23 -06009950 }
Tobin Ehlisfbc847c2016-07-18 16:22:03 -06009951 }
9952 if (!contained_in_a_range) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009953 skip_call |= log_msg(
9954 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9955 VALIDATION_ERROR_00988, "DS", "vkCmdPushConstants() Push constant range [%d, %d) "
9956 "with stageFlags = 0x%" PRIx32 " "
9957 "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ". %s",
9958 offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout, validation_error_map[VALIDATION_ERROR_00988]);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06009959 }
9960 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009961 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009962 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009963 dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009964}
9965
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009966VKAPI_ATTR void VKAPI_CALL
9967CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009968 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009969 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009970 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009971 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9972 if (pCB) {
9973 QueryObject query = {queryPool, slot};
Karl Schultz63c1eb52016-05-24 13:49:59 -06009974 std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
Michael Lentine5627e692016-05-20 17:45:02 -05009975 pCB->queryUpdates.push_back(queryUpdate);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009976 if (pCB->state == CB_RECORDING) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009977 skip_call |= ValidateCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9978 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_WRITETIMESTAMP);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009979 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009980 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009981 }
9982 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009983 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009984 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009985 dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009986}
9987
Mark Lobodzinski67533742016-06-16 13:23:02 -06009988static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009989 const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
9990 UNIQUE_VALIDATION_ERROR_CODE error_code) {
Mark Lobodzinski67533742016-06-16 13:23:02 -06009991 bool skip_call = false;
9992
9993 for (uint32_t attach = 0; attach < count; attach++) {
9994 if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
9995 // Attachment counts are verified elsewhere, but prevent an invalid access
9996 if (attachments[attach].attachment < fbci->attachmentCount) {
9997 const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
Tobin Ehlis8b26a382016-09-14 08:02:49 -06009998 auto view_state = getImageViewState(dev_data, *image_view);
9999 if (view_state) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -060010000 const VkImageCreateInfo *ici = &getImageState(dev_data, view_state->create_info.image)->createInfo;
Mark Lobodzinski67533742016-06-16 13:23:02 -060010001 if (ici != nullptr) {
10002 if ((ici->usage & usage_flag) == 0) {
10003 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010004 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, error_code, "DS",
Mark Lobodzinski67533742016-06-16 13:23:02 -060010005 "vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's "
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010006 "IMAGE_USAGE flags (%s). %s",
10007 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag),
10008 validation_error_map[error_code]);
Mark Lobodzinski67533742016-06-16 13:23:02 -060010009 }
10010 }
10011 }
10012 }
10013 }
10014 }
10015 return skip_call;
10016}
10017
Tobin Ehlisd0945232016-06-22 10:02:02 -060010018// Validate VkFramebufferCreateInfo which includes:
10019// 1. attachmentCount equals renderPass attachmentCount
Tobin Ehlisfd005382016-06-22 13:32:23 -060010020// 2. corresponding framebuffer and renderpass attachments have matching formats
10021// 3. corresponding framebuffer and renderpass attachments have matching sample counts
10022// 4. fb attachments only have a single mip level
10023// 5. fb attachment dimensions are each at least as large as the fb
10024// 6. fb attachments use idenity swizzle
10025// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010026// 8. fb dimensions are within physical device limits
Tobin Ehlisd0945232016-06-22 10:02:02 -060010027static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
Mark Lobodzinski67533742016-06-16 13:23:02 -060010028 bool skip_call = false;
10029
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060010030 auto rp_state = getRenderPassState(dev_data, pCreateInfo->renderPass);
10031 if (rp_state) {
10032 const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
Tobin Ehlisd0945232016-06-22 10:02:02 -060010033 if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
10034 skip_call |= log_msg(
10035 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010036 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00404, "DS",
Tobin Ehlisd0945232016-06-22 10:02:02 -060010037 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010038 "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer. %s",
10039 pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass),
10040 validation_error_map[VALIDATION_ERROR_00404]);
Tobin Ehlisfd005382016-06-22 13:32:23 -060010041 } else {
Tobin Ehlisdae051d2016-06-22 14:16:06 -060010042 // attachmentCounts match, so make sure corresponding attachment details line up
Tobin Ehlisfd005382016-06-22 13:32:23 -060010043 const VkImageView *image_views = pCreateInfo->pAttachments;
10044 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010045 auto view_state = getImageViewState(dev_data, image_views[i]);
Tobin Ehlisc8ca0312016-09-22 07:30:05 -060010046 auto &ivci = view_state->create_info;
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010047 if (ivci.format != rpci->pAttachments[i].format) {
Tobin Ehlisfd005382016-06-22 13:32:23 -060010048 skip_call |= log_msg(
10049 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010050 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00408, "DS",
10051 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
10052 "the format of "
10053 "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010054 i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010055 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_00408]);
Tobin Ehlisfd005382016-06-22 13:32:23 -060010056 }
Tobin Ehlis30df15c2016-10-12 17:17:57 -060010057 const VkImageCreateInfo *ici = &getImageState(dev_data, ivci.image)->createInfo;
Tobin Ehlisfd005382016-06-22 13:32:23 -060010058 if (ici->samples != rpci->pAttachments[i].samples) {
Tobin Ehlisdae051d2016-06-22 14:16:06 -060010059 skip_call |= log_msg(
10060 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010061 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00409, "DS",
10062 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
10063 "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
Tobin Ehlisdae051d2016-06-22 14:16:06 -060010064 i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010065 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_00409]);
Tobin Ehlisfd005382016-06-22 13:32:23 -060010066 }
10067 // Verify that view only has a single mip level
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010068 if (ivci.subresourceRange.levelCount != 1) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010069 skip_call |=
10070 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10071 VALIDATION_ERROR_00411, "DS",
10072 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
10073 "but only a single mip level (levelCount == 1) is allowed when creating a Framebuffer. %s",
10074 i, ivci.subresourceRange.levelCount, validation_error_map[VALIDATION_ERROR_00411]);
Tobin Ehlisfd005382016-06-22 13:32:23 -060010075 }
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010076 const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
Tobin Ehlisf058eca2016-06-22 16:38:29 -060010077 uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
10078 uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010079 if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
Tobin Ehlisf058eca2016-06-22 16:38:29 -060010080 (mip_height < pCreateInfo->height)) {
10081 skip_call |=
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010082 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
Tobin Ehlisf058eca2016-06-22 16:38:29 -060010083 DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
10084 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
10085 "than the corresponding "
10086 "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
10087 "dimensions for "
10088 "attachment #%u, framebuffer:\n"
10089 "width: %u, %u\n"
10090 "height: %u, %u\n"
10091 "layerCount: %u, %u\n",
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010092 i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
10093 pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
Tobin Ehlisfd005382016-06-22 13:32:23 -060010094 }
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010095 if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
10096 ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
10097 ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
10098 ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
Tobin Ehlis9e4f16b2016-06-23 07:49:12 -060010099 skip_call |= log_msg(
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010100 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010101 VALIDATION_ERROR_00412, "DS",
Tobin Ehlis9e4f16b2016-06-23 07:49:12 -060010102 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
10103 "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
10104 "r swizzle = %s\n"
10105 "g swizzle = %s\n"
10106 "b swizzle = %s\n"
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010107 "a swizzle = %s\n"
10108 "%s",
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010109 i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010110 string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a),
10111 validation_error_map[VALIDATION_ERROR_00412]);
Tobin Ehlisfd005382016-06-22 13:32:23 -060010112 }
Tobin Ehlisfd005382016-06-22 13:32:23 -060010113 }
Tobin Ehlisd0945232016-06-22 10:02:02 -060010114 }
Tobin Ehlisfd005382016-06-22 13:32:23 -060010115 // Verify correct attachment usage flags
Mark Lobodzinski67533742016-06-16 13:23:02 -060010116 for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
10117 // Verify input attachments:
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010118 skip_call |=
10119 MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
10120 pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_00407);
Mark Lobodzinski67533742016-06-16 13:23:02 -060010121 // Verify color attachments:
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010122 skip_call |=
10123 MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
10124 pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_00405);
Mark Lobodzinski67533742016-06-16 13:23:02 -060010125 // Verify depth/stencil attachments:
10126 if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
10127 skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010128 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_00406);
Mark Lobodzinski67533742016-06-16 13:23:02 -060010129 }
10130 }
10131 }
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010132 // Verify FB dimensions are within physical device limits
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010133 if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010134 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010135 VALIDATION_ERROR_00413, "DS",
10136 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. "
10137 "Requested width: %u, device max: %u\n"
10138 "%s",
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010139 pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010140 validation_error_map[VALIDATION_ERROR_00413]);
10141 }
10142 if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
10143 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10144 VALIDATION_ERROR_00414, "DS",
10145 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. "
10146 "Requested height: %u, device max: %u\n"
10147 "%s",
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010148 pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010149 validation_error_map[VALIDATION_ERROR_00414]);
10150 }
10151 if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
10152 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10153 VALIDATION_ERROR_00415, "DS",
10154 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. "
10155 "Requested layers: %u, device max: %u\n"
10156 "%s",
10157 pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers,
10158 validation_error_map[VALIDATION_ERROR_00415]);
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010159 }
Mark Lobodzinski67533742016-06-16 13:23:02 -060010160 return skip_call;
10161}
10162
Tobin Ehlis7fad84d2016-06-22 09:06:48 -060010163// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
10164// Return true if an error is encountered and callback returns true to skip call down chain
10165// false indicates that call down chain should proceed
10166static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
10167 // TODO : Verify that renderPass FB is created with is compatible with FB
10168 bool skip_call = false;
Tobin Ehlisd0945232016-06-22 10:02:02 -060010169 skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
Tobin Ehlis7fad84d2016-06-22 09:06:48 -060010170 return skip_call;
10171}
10172
Tobin Ehlisb3d26812016-06-22 09:20:42 -060010173// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
10174static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
10175 // Shadow create info and store in map
Tobin Ehlis04c04272016-10-12 11:54:09 -060010176 std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
10177 new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
Tobin Ehlis042ec782016-06-23 14:25:22 -060010178
Tobin Ehlisb3d26812016-06-22 09:20:42 -060010179 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
10180 VkImageView view = pCreateInfo->pAttachments[i];
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010181 auto view_state = getImageViewState(dev_data, view);
10182 if (!view_state) {
Tobin Ehlisb3d26812016-06-22 09:20:42 -060010183 continue;
10184 }
10185 MT_FB_ATTACHMENT_INFO fb_info;
Tobin Ehlis54108272016-10-11 14:26:49 -060010186 fb_info.mem = getImageState(dev_data, view_state->create_info.image)->binding.mem;
Tobin Ehlis9fc34aa2016-09-14 15:26:09 -060010187 fb_info.view_state = view_state;
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010188 fb_info.image = view_state->create_info.image;
Tobin Ehlis04c04272016-10-12 11:54:09 -060010189 fb_state->attachments.push_back(fb_info);
Tobin Ehlisb3d26812016-06-22 09:20:42 -060010190 }
Tobin Ehlis04c04272016-10-12 11:54:09 -060010191 dev_data->frameBufferMap[fb] = std::move(fb_state);
Tobin Ehlisb3d26812016-06-22 09:20:42 -060010192}
10193
Chia-I Wu629d7cd2016-05-06 11:32:54 +080010194VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
10195 const VkAllocationCallbacks *pAllocator,
10196 VkFramebuffer *pFramebuffer) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010197 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis7fad84d2016-06-22 09:06:48 -060010198 std::unique_lock<std::mutex> lock(global_lock);
10199 bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
10200 lock.unlock();
Mark Lobodzinski67533742016-06-16 13:23:02 -060010201
Tobin Ehlis7fad84d2016-06-22 09:06:48 -060010202 if (skip_call)
10203 return VK_ERROR_VALIDATION_FAILED_EXT;
10204
Chris Forbesaaa9c282016-10-03 20:01:14 +130010205 VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
Tobin Ehlis7fad84d2016-06-22 09:06:48 -060010206
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010207 if (VK_SUCCESS == result) {
Tobin Ehlis7fad84d2016-06-22 09:06:48 -060010208 lock.lock();
Tobin Ehlisb3d26812016-06-22 09:20:42 -060010209 PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
10210 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010211 }
10212 return result;
10213}
10214
Dustin Graves8f1eab92016-04-05 09:41:17 -060010215static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
10216 std::unordered_set<uint32_t> &processed_nodes) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010217 // If we have already checked this node we have not found a dependency path so return false.
10218 if (processed_nodes.count(index))
Dustin Graves8f1eab92016-04-05 09:41:17 -060010219 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010220 processed_nodes.insert(index);
10221 const DAGNode &node = subpass_to_node[index];
10222 // Look for a dependency path. If one exists return true else recurse on the previous nodes.
Bruce Dawsonb86c3a12017-01-23 21:48:04 -080010223 if (std::find(node.prev.begin(), node.prev.end(), static_cast<uint32_t>(dependent)) == node.prev.end()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010224 for (auto elem : node.prev) {
10225 if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
Dustin Graves8f1eab92016-04-05 09:41:17 -060010226 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010227 }
10228 } else {
Dustin Graves8f1eab92016-04-05 09:41:17 -060010229 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010230 }
Dustin Graves8f1eab92016-04-05 09:41:17 -060010231 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010232}
10233
Chris Forbes3dd83742016-10-03 19:35:49 +130010234static bool CheckDependencyExists(const layer_data *dev_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
Dustin Graves8f1eab92016-04-05 09:41:17 -060010235 const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
10236 bool result = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010237 // Loop through all subpasses that share the same attachment and make sure a dependency exists
10238 for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
Jamie Madill2b6b8d52016-04-04 15:09:51 -040010239 if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010240 continue;
10241 const DAGNode &node = subpass_to_node[subpass];
10242 // Check for a specified dependency between the two nodes. If one exists we are done.
10243 auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
10244 auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
10245 if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
Jan-Harald Fredriksen4b478382016-06-08 14:20:50 +020010246 // If no dependency exits an implicit dependency still might. If not, throw an error.
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010247 std::unordered_set<uint32_t> processed_nodes;
Jan-Harald Fredriksen4b478382016-06-08 14:20:50 +020010248 if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
10249 FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010250 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010251 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10252 "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
10253 dependent_subpasses[k]);
Dustin Graves8f1eab92016-04-05 09:41:17 -060010254 result = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010255 }
10256 }
10257 }
10258 return result;
10259}
10260
Chris Forbes3dd83742016-10-03 19:35:49 +130010261static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
Dustin Graves8f1eab92016-04-05 09:41:17 -060010262 const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010263 const DAGNode &node = subpass_to_node[index];
10264 // If this node writes to the attachment return true as next nodes need to preserve the attachment.
10265 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
10266 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10267 if (attachment == subpass.pColorAttachments[j].attachment)
Dustin Graves8f1eab92016-04-05 09:41:17 -060010268 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010269 }
10270 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10271 if (attachment == subpass.pDepthStencilAttachment->attachment)
Dustin Graves8f1eab92016-04-05 09:41:17 -060010272 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010273 }
Dustin Graves8f1eab92016-04-05 09:41:17 -060010274 bool result = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010275 // Loop through previous nodes and see if any of them write to the attachment.
10276 for (auto elem : node.prev) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010277 result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010278 }
10279 // If the attachment was written to by a previous node than this node needs to preserve it.
10280 if (result && depth > 0) {
10281 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
Dustin Graves8f1eab92016-04-05 09:41:17 -060010282 bool has_preserved = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010283 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
10284 if (subpass.pPreserveAttachments[j] == attachment) {
Dustin Graves8f1eab92016-04-05 09:41:17 -060010285 has_preserved = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010286 break;
10287 }
10288 }
Dustin Graves8f1eab92016-04-05 09:41:17 -060010289 if (!has_preserved) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010290 skip_call |=
Chris Forbes3dd83742016-10-03 19:35:49 +130010291 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010292 DRAWSTATE_INVALID_RENDERPASS, "DS",
10293 "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
10294 }
10295 }
10296 return result;
10297}
10298
10299template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
10300 return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
10301 ((offset1 > offset2) && (offset1 < (offset2 + size2)));
10302}
10303
10304bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
10305 return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
10306 isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
10307}
10308
Tobin Ehlis04c04272016-10-12 11:54:09 -060010309static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060010310 RENDER_PASS_STATE const *renderPass) {
Dustin Graves8f1eab92016-04-05 09:41:17 -060010311 bool skip_call = false;
Chris Forbesef730462016-09-27 12:03:31 +130010312 auto const pFramebufferInfo = framebuffer->createInfo.ptr();
10313 auto const pCreateInfo = renderPass->createInfo.ptr();
Chris Forbes967c4682016-05-17 11:36:23 +120010314 auto const & subpass_to_node = renderPass->subpassToNode;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010315 std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
10316 std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
10317 std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
10318 // Find overlapping attachments
10319 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
10320 for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
10321 VkImageView viewi = pFramebufferInfo->pAttachments[i];
10322 VkImageView viewj = pFramebufferInfo->pAttachments[j];
10323 if (viewi == viewj) {
10324 overlapping_attachments[i].push_back(j);
10325 overlapping_attachments[j].push_back(i);
10326 continue;
10327 }
Chris Forbes3dd83742016-10-03 19:35:49 +130010328 auto view_state_i = getImageViewState(dev_data, viewi);
10329 auto view_state_j = getImageViewState(dev_data, viewj);
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010330 if (!view_state_i || !view_state_j) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010331 continue;
10332 }
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010333 auto view_ci_i = view_state_i->create_info;
10334 auto view_ci_j = view_state_j->create_info;
10335 if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010336 overlapping_attachments[i].push_back(j);
10337 overlapping_attachments[j].push_back(i);
10338 continue;
10339 }
Tobin Ehlis30df15c2016-10-12 17:17:57 -060010340 auto image_data_i = getImageState(dev_data, view_ci_i.image);
10341 auto image_data_j = getImageState(dev_data, view_ci_j.image);
Tobin Ehlis1c9c55f2016-06-02 11:49:22 -060010342 if (!image_data_i || !image_data_j) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010343 continue;
10344 }
Tobin Ehlis54108272016-10-11 14:26:49 -060010345 if (image_data_i->binding.mem == image_data_j->binding.mem &&
10346 isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
10347 image_data_j->binding.size)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010348 overlapping_attachments[i].push_back(j);
10349 overlapping_attachments[j].push_back(i);
10350 }
10351 }
10352 }
10353 for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
10354 uint32_t attachment = i;
10355 for (auto other_attachment : overlapping_attachments[i]) {
10356 if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010357 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10358 __LINE__, VALIDATION_ERROR_00324, "DS", "Attachment %d aliases attachment %d but doesn't "
10359 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
10360 attachment, other_attachment, validation_error_map[VALIDATION_ERROR_00324]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010361 }
10362 if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010363 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10364 __LINE__, VALIDATION_ERROR_00324, "DS", "Attachment %d aliases attachment %d but doesn't "
10365 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
10366 other_attachment, attachment, validation_error_map[VALIDATION_ERROR_00324]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010367 }
10368 }
10369 }
10370 // Find for each attachment the subpasses that use them.
Mark Youngf5bba552016-03-30 02:23:18 -060010371 unordered_set<uint32_t> attachmentIndices;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010372 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10373 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
Mark Youngf5bba552016-03-30 02:23:18 -060010374 attachmentIndices.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010375 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10376 uint32_t attachment = subpass.pInputAttachments[j].attachment;
Chris Forbesac825182016-07-01 11:45:49 +120010377 if (attachment == VK_ATTACHMENT_UNUSED)
10378 continue;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010379 input_attachment_to_subpass[attachment].push_back(i);
10380 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10381 input_attachment_to_subpass[overlapping_attachment].push_back(i);
10382 }
10383 }
10384 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10385 uint32_t attachment = subpass.pColorAttachments[j].attachment;
Chris Forbesac825182016-07-01 11:45:49 +120010386 if (attachment == VK_ATTACHMENT_UNUSED)
10387 continue;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010388 output_attachment_to_subpass[attachment].push_back(i);
10389 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10390 output_attachment_to_subpass[overlapping_attachment].push_back(i);
10391 }
Mark Youngf5bba552016-03-30 02:23:18 -060010392 attachmentIndices.insert(attachment);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010393 }
10394 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10395 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10396 output_attachment_to_subpass[attachment].push_back(i);
10397 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10398 output_attachment_to_subpass[overlapping_attachment].push_back(i);
10399 }
Mark Youngf5bba552016-03-30 02:23:18 -060010400
10401 if (attachmentIndices.count(attachment)) {
10402 skip_call |=
Chris Forbes3dd83742016-10-03 19:35:49 +130010403 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10404 DRAWSTATE_INVALID_RENDERPASS, "DS",
10405 "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
Mark Youngf5bba552016-03-30 02:23:18 -060010406 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010407 }
10408 }
10409 // If there is a dependency needed make sure one exists
10410 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10411 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10412 // If the attachment is an input then all subpasses that output must have a dependency relationship
10413 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
Chris Forbesac825182016-07-01 11:45:49 +120010414 uint32_t attachment = subpass.pInputAttachments[j].attachment;
10415 if (attachment == VK_ATTACHMENT_UNUSED)
10416 continue;
Chris Forbes3dd83742016-10-03 19:35:49 +130010417 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010418 }
10419 // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
10420 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
Chris Forbesac825182016-07-01 11:45:49 +120010421 uint32_t attachment = subpass.pColorAttachments[j].attachment;
10422 if (attachment == VK_ATTACHMENT_UNUSED)
10423 continue;
Chris Forbes3dd83742016-10-03 19:35:49 +130010424 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10425 CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010426 }
10427 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10428 const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
Chris Forbes3dd83742016-10-03 19:35:49 +130010429 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10430 CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010431 }
10432 }
10433 // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
10434 // written.
10435 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10436 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10437 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010438 CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010439 }
10440 }
10441 return skip_call;
10442}
Tobin Ehlisdb11af62016-05-11 10:38:29 -060010443// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
10444// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
10445// READ_ONLY layout attachments don't have CLEAR as their loadOp.
10446static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
10447 const uint32_t attachment,
10448 const VkAttachmentDescription &attachment_description) {
10449 bool skip_call = false;
10450 // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
10451 if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
10452 if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
10453 (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010454 skip_call |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
10455 VkDebugReportObjectTypeEXT(0), __LINE__, VALIDATION_ERROR_02351, "DS",
10456 "Cannot clear attachment %d with invalid first layout %s. %s", attachment,
10457 string_VkImageLayout(first_layout), validation_error_map[VALIDATION_ERROR_02351]);
Tobin Ehlisdb11af62016-05-11 10:38:29 -060010458 }
10459 }
10460 return skip_call;
10461}
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010462
Chris Forbes3dd83742016-10-03 19:35:49 +130010463static bool ValidateLayouts(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
Dustin Graves8f1eab92016-04-05 09:41:17 -060010464 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010465
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010466 // Track when we're observing the first use of an attachment
10467 std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010468 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10469 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010470 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10471 auto attach_index = subpass.pColorAttachments[j].attachment;
10472 if (attach_index == VK_ATTACHMENT_UNUSED)
10473 continue;
10474
Chris Forbesf02c1742016-08-10 10:41:27 +120010475 switch (subpass.pColorAttachments[j].layout) {
10476 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010477 // This is ideal.
Chris Forbesf02c1742016-08-10 10:41:27 +120010478 break;
10479
10480 case VK_IMAGE_LAYOUT_GENERAL:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010481 // May not be optimal; TODO: reconsider this warning based on other constraints?
Chris Forbes3dd83742016-10-03 19:35:49 +130010482 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10483 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
Chris Forbesf02c1742016-08-10 10:41:27 +120010484 "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
10485 break;
10486
10487 default:
Chris Forbes3dd83742016-10-03 19:35:49 +130010488 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10489 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
Chris Forbesf02c1742016-08-10 10:41:27 +120010490 "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
10491 string_VkImageLayout(subpass.pColorAttachments[j].layout));
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010492 }
Chris Forbesf02c1742016-08-10 10:41:27 +120010493
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010494 if (attach_first_use[attach_index]) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010495 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pColorAttachments[j].layout,
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010496 attach_index, pCreateInfo->pAttachments[attach_index]);
10497 }
10498 attach_first_use[attach_index] = false;
10499 }
Chris Forbesf02c1742016-08-10 10:41:27 +120010500 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
Chris Forbesc5dd9bc2016-08-09 08:17:14 +120010501 switch (subpass.pDepthStencilAttachment->layout) {
10502 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
10503 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010504 // These are ideal.
Chris Forbesc5dd9bc2016-08-09 08:17:14 +120010505 break;
10506
10507 case VK_IMAGE_LAYOUT_GENERAL:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010508 // May not be optimal; TODO: reconsider this warning based on other constraints? GENERAL can be better than doing
10509 // a bunch of transitions.
Chris Forbes3dd83742016-10-03 19:35:49 +130010510 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10511 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
Chris Forbesc5dd9bc2016-08-09 08:17:14 +120010512 "GENERAL layout for depth attachment may not give optimal performance.");
10513 break;
10514
10515 default:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010516 // No other layouts are acceptable
Chris Forbes3dd83742016-10-03 19:35:49 +130010517 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10518 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
Chris Forbesc5dd9bc2016-08-09 08:17:14 +120010519 "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
10520 "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010521 string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010522 }
Chris Forbesc5dd9bc2016-08-09 08:17:14 +120010523
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010524 auto attach_index = subpass.pDepthStencilAttachment->attachment;
10525 if (attach_first_use[attach_index]) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010526 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pDepthStencilAttachment->layout,
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010527 attach_index, pCreateInfo->pAttachments[attach_index]);
10528 }
10529 attach_first_use[attach_index] = false;
10530 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010531 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
Chris Forbesd9c27ca2016-07-01 11:28:00 +120010532 auto attach_index = subpass.pInputAttachments[j].attachment;
10533 if (attach_index == VK_ATTACHMENT_UNUSED)
10534 continue;
10535
Chris Forbesf02c1742016-08-10 10:41:27 +120010536 switch (subpass.pInputAttachments[j].layout) {
10537 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
10538 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010539 // These are ideal.
Chris Forbesf02c1742016-08-10 10:41:27 +120010540 break;
10541
10542 case VK_IMAGE_LAYOUT_GENERAL:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010543 // May not be optimal. TODO: reconsider this warning based on other constraints.
Chris Forbes3dd83742016-10-03 19:35:49 +130010544 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10545 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
Chris Forbesf02c1742016-08-10 10:41:27 +120010546 "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
10547 break;
10548
10549 default:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010550 // No other layouts are acceptable
Chris Forbes3dd83742016-10-03 19:35:49 +130010551 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Chris Forbesf02c1742016-08-10 10:41:27 +120010552 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10553 "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
10554 string_VkImageLayout(subpass.pInputAttachments[j].layout));
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010555 }
Chris Forbesf02c1742016-08-10 10:41:27 +120010556
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010557 if (attach_first_use[attach_index]) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010558 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pInputAttachments[j].layout,
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010559 attach_index, pCreateInfo->pAttachments[attach_index]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010560 }
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010561 attach_first_use[attach_index] = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010562 }
10563 }
10564 return skip;
10565}
10566
Chris Forbes3dd83742016-10-03 19:35:49 +130010567static bool CreatePassDAG(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
Dustin Graves8f1eab92016-04-05 09:41:17 -060010568 std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
10569 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010570 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10571 DAGNode &subpass_node = subpass_to_node[i];
10572 subpass_node.pass = i;
10573 }
10574 for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
10575 const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
Chris Forbes7f075fb2016-09-27 10:54:58 +130010576 if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
10577 if (dependency.srcSubpass == dependency.dstSubpass) {
10578 skip_call |=
Chris Forbes3dd83742016-10-03 19:35:49 +130010579 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Chris Forbes7f075fb2016-09-27 10:54:58 +130010580 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
10581 }
Chris Forbes7f075fb2016-09-27 10:54:58 +130010582 } else if (dependency.srcSubpass > dependency.dstSubpass) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010583 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010584 DRAWSTATE_INVALID_RENDERPASS, "DS",
10585 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010586 } else if (dependency.srcSubpass == dependency.dstSubpass) {
10587 has_self_dependency[dependency.srcSubpass] = true;
Chris Forbes585a32b2016-10-26 15:45:33 +130010588 } else {
10589 subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
10590 subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010591 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010592 }
10593 return skip_call;
10594}
Chris Forbes90da2e92016-03-18 16:30:03 +130010595
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010596
Chia-I Wu629d7cd2016-05-06 11:32:54 +080010597VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
10598 const VkAllocationCallbacks *pAllocator,
10599 VkShaderModule *pShaderModule) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010600 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Dustin Graves8f1eab92016-04-05 09:41:17 -060010601 bool skip_call = false;
Chris Forbesb4afd0f2016-04-04 10:48:35 +120010602
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010603 // Use SPIRV-Tools validator to try and catch any issues with the module itself
Chris Forbesb4afd0f2016-04-04 10:48:35 +120010604 spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
10605 spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
10606 spv_diagnostic diag = nullptr;
10607
10608 auto result = spvValidate(ctx, &binary, &diag);
10609 if (result != SPV_SUCCESS) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010610 skip_call |=
10611 log_msg(dev_data->report_data, result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
10612 VkDebugReportObjectTypeEXT(0), 0, __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
10613 "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010614 }
10615
Chris Forbesb4afd0f2016-04-04 10:48:35 +120010616 spvDiagnosticDestroy(diag);
10617 spvContextDestroy(ctx);
10618
Dustin Graves8f1eab92016-04-05 09:41:17 -060010619 if (skip_call)
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010620 return VK_ERROR_VALIDATION_FAILED_EXT;
10621
Chris Forbesaaa9c282016-10-03 20:01:14 +130010622 VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010623
10624 if (res == VK_SUCCESS) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -060010625 std::lock_guard<std::mutex> lock(global_lock);
Chris Forbes3dd83742016-10-03 19:35:49 +130010626 dev_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010627 }
10628 return res;
10629}
10630
Mark Lobodzinski97810702016-06-17 15:24:01 -060010631static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
Dustin Graves8f1eab92016-04-05 09:41:17 -060010632 bool skip_call = false;
Mark Lobodzinski97810702016-06-17 15:24:01 -060010633 if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
10634 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Chris Forbes2d9b2a82016-11-21 10:45:39 +130010635 VALIDATION_ERROR_00325, "DS",
10636 "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d. %s",
10637 type, attachment, attachment_count, validation_error_map[VALIDATION_ERROR_00325]);
Mark Lobodzinski97810702016-06-17 15:24:01 -060010638 }
10639 return skip_call;
10640}
10641
Chris Forbes689010b2016-06-29 15:12:29 +120010642static bool IsPowerOfTwo(unsigned x) {
10643 return x && !(x & (x-1));
10644}
10645
Mark Lobodzinski97810702016-06-17 15:24:01 -060010646static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
10647 bool skip_call = false;
10648 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10649 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10650 if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010651 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10652 VALIDATION_ERROR_00347, "DS",
10653 "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS. %s",
10654 i, validation_error_map[VALIDATION_ERROR_00347]);
Mark Lobodzinski97810702016-06-17 15:24:01 -060010655 }
10656 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
10657 uint32_t attachment = subpass.pPreserveAttachments[j];
10658 if (attachment == VK_ATTACHMENT_UNUSED) {
10659 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010660 __LINE__, VALIDATION_ERROR_00356, "DS",
10661 "CreateRenderPass: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED. %s", j,
10662 validation_error_map[VALIDATION_ERROR_00356]);
Mark Lobodzinski97810702016-06-17 15:24:01 -060010663 } else {
10664 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
10665 }
10666 }
Chris Forbesd59af1e2016-06-29 11:50:31 +120010667
10668 auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of(
10669 subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
10670 [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
10671
Chris Forbes689010b2016-06-29 15:12:29 +120010672 unsigned sample_count = 0;
10673
Mark Lobodzinski97810702016-06-17 15:24:01 -060010674 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10675 uint32_t attachment;
10676 if (subpass.pResolveAttachments) {
10677 attachment = subpass.pResolveAttachments[j].attachment;
10678 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
Chris Forbesd59af1e2016-06-29 11:50:31 +120010679
10680 if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
10681 pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
10682 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010683 __LINE__, VALIDATION_ERROR_00352, "DS",
Chris Forbesd59af1e2016-06-29 11:50:31 +120010684 "CreateRenderPass: Subpass %u requests multisample resolve into attachment %u, "
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010685 "which must have VK_SAMPLE_COUNT_1_BIT but has %s. %s",
10686 i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
10687 validation_error_map[VALIDATION_ERROR_00352]);
Chris Forbesd59af1e2016-06-29 11:50:31 +120010688 }
Mark Lobodzinski97810702016-06-17 15:24:01 -060010689 }
10690 attachment = subpass.pColorAttachments[j].attachment;
10691 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
Chris Forbesd59af1e2016-06-29 11:50:31 +120010692
Chris Forbes689010b2016-06-29 15:12:29 +120010693 if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10694 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
Chris Forbes689010b2016-06-29 15:12:29 +120010695
Chris Forbes6655bb32016-07-01 18:27:30 +120010696 if (subpass_performs_resolve &&
10697 pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
10698 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010699 __LINE__, VALIDATION_ERROR_00351, "DS",
Chris Forbes6655bb32016-07-01 18:27:30 +120010700 "CreateRenderPass: Subpass %u requests multisample resolve from attachment %u "
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010701 "which has VK_SAMPLE_COUNT_1_BIT. %s",
10702 i, attachment, validation_error_map[VALIDATION_ERROR_00351]);
Chris Forbes6655bb32016-07-01 18:27:30 +120010703 }
Chris Forbesd59af1e2016-06-29 11:50:31 +120010704 }
Mark Lobodzinski97810702016-06-17 15:24:01 -060010705 }
Chris Forbes6655bb32016-07-01 18:27:30 +120010706
Mark Lobodzinski97810702016-06-17 15:24:01 -060010707 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10708 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10709 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
Chris Forbes689010b2016-06-29 15:12:29 +120010710
10711 if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10712 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
10713 }
Mark Lobodzinski97810702016-06-17 15:24:01 -060010714 }
Chris Forbes6655bb32016-07-01 18:27:30 +120010715
Mark Lobodzinski97810702016-06-17 15:24:01 -060010716 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10717 uint32_t attachment = subpass.pInputAttachments[j].attachment;
10718 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
Chris Forbes3e4d7ac2016-05-06 16:11:03 +120010719 }
Chris Forbes689010b2016-06-29 15:12:29 +120010720
10721 if (sample_count && !IsPowerOfTwo(sample_count)) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010722 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10723 VALIDATION_ERROR_00337, "DS", "CreateRenderPass: Subpass %u attempts to render to "
10724 "attachments with inconsistent sample counts. %s",
10725 i, validation_error_map[VALIDATION_ERROR_00337]);
Chris Forbes689010b2016-06-29 15:12:29 +120010726 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010727 }
Mark Lobodzinski97810702016-06-17 15:24:01 -060010728 return skip_call;
10729}
10730
10731VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
10732 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
Mark Lobodzinski97810702016-06-17 15:24:01 -060010733 bool skip_call = false;
10734 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10735
10736 std::unique_lock<std::mutex> lock(global_lock);
10737
Mark Lobodzinski97810702016-06-17 15:24:01 -060010738 // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
10739 // ValidateLayouts.
10740 skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
Tobin Ehlisb093da82017-01-19 12:05:27 -070010741 for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
10742 skip_call |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].srcStageMask, "vkCreateRenderPass()",
10743 VALIDATION_ERROR_00368, VALIDATION_ERROR_00370);
10744 skip_call |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].dstStageMask, "vkCreateRenderPass()",
10745 VALIDATION_ERROR_00369, VALIDATION_ERROR_00371);
10746 }
Chris Forbes71251362016-11-16 14:30:05 +130010747 if (!skip_call) {
10748 skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
10749 }
Chris Forbesbf4a7722016-06-21 18:30:46 +120010750 lock.unlock();
Mark Lobodzinski97810702016-06-17 15:24:01 -060010751
10752 if (skip_call) {
10753 return VK_ERROR_VALIDATION_FAILED_EXT;
10754 }
10755
Chris Forbesaaa9c282016-10-03 20:01:14 +130010756 VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
Chris Forbesbf4a7722016-06-21 18:30:46 +120010757
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010758 if (VK_SUCCESS == result) {
Mark Lobodzinski97810702016-06-17 15:24:01 -060010759 lock.lock();
10760
10761 std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
10762 std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
10763 skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
10764
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060010765 auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
Chris Forbescad81b82016-05-06 16:11:29 +120010766 render_pass->renderPass = *pRenderPass;
Chris Forbes3e4d7ac2016-05-06 16:11:03 +120010767 render_pass->hasSelfDependency = has_self_dependency;
10768 render_pass->subpassToNode = subpass_to_node;
Chris Forbescc836ab2016-09-26 17:04:41 +130010769
Tobin Ehlis54948522016-03-22 13:50:21 -060010770 // TODO: Maybe fill list and then copy instead of locking
Chris Forbes3e4d7ac2016-05-06 16:11:03 +120010771 std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
Mark Lobodzinski67533742016-06-16 13:23:02 -060010772 std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
Tobin Ehlis54948522016-03-22 13:50:21 -060010773 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10774 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
Tobin Ehlis54948522016-03-22 13:50:21 -060010775 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
Mark Lobodzinski97810702016-06-17 15:24:01 -060010776 uint32_t attachment = subpass.pColorAttachments[j].attachment;
10777 if (!attachment_first_read.count(attachment)) {
10778 attachment_first_read.insert(std::make_pair(attachment, false));
10779 attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
Michael Lentine5007eb92016-04-05 11:38:12 -050010780 }
Tobin Ehlis54948522016-03-22 13:50:21 -060010781 }
10782 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10783 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
Mark Lobodzinski97810702016-06-17 15:24:01 -060010784 if (!attachment_first_read.count(attachment)) {
10785 attachment_first_read.insert(std::make_pair(attachment, false));
10786 attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
Michael Lentine9e1290e2016-03-31 14:45:20 -050010787 }
Tobin Ehlis54948522016-03-22 13:50:21 -060010788 }
Michael Lentine4e8777c2016-03-31 13:46:44 -050010789 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10790 uint32_t attachment = subpass.pInputAttachments[j].attachment;
Mark Lobodzinski97810702016-06-17 15:24:01 -060010791 if (!attachment_first_read.count(attachment)) {
10792 attachment_first_read.insert(std::make_pair(attachment, true));
10793 attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
Michael Lentine9e1290e2016-03-31 14:45:20 -050010794 }
Michael Lentine4e8777c2016-03-31 13:46:44 -050010795 }
Tobin Ehlis54948522016-03-22 13:50:21 -060010796 }
Chris Forbescc836ab2016-09-26 17:04:41 +130010797
Chris Forbesef730462016-09-27 12:03:31 +130010798 dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010799 }
10800 return result;
10801}
Mark Lobodzinski97810702016-06-17 15:24:01 -060010802
Chris Forbesb065df02016-05-17 15:45:31 +120010803static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
Dustin Graves8f1eab92016-04-05 09:41:17 -060010804 bool skip_call = false;
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060010805 auto const pRenderPassInfo = getRenderPassState(dev_data, pRenderPassBegin->renderPass)->createInfo.ptr();
Chris Forbesef27c5b2016-09-27 12:03:04 +130010806 auto const & framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010807 if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
10808 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10809 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
10810 "with a different number of attachments.");
10811 }
10812 for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10813 const VkImageView &image_view = framebufferInfo.pAttachments[i];
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010814 auto view_state = getImageViewState(dev_data, image_view);
10815 assert(view_state);
10816 const VkImage &image = view_state->create_info.image;
10817 const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010818 IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
10819 pRenderPassInfo->pAttachments[i].initialLayout};
10820 // TODO: Do not iterate over every possibility - consolidate where possible
10821 for (uint32_t j = 0; j < subRange.levelCount; j++) {
10822 uint32_t level = subRange.baseMipLevel + j;
10823 for (uint32_t k = 0; k < subRange.layerCount; k++) {
10824 uint32_t layer = subRange.baseArrayLayer + k;
10825 VkImageSubresource sub = {subRange.aspectMask, level, layer};
10826 IMAGE_CMD_BUF_LAYOUT_NODE node;
10827 if (!FindLayout(pCB, image, sub, node)) {
10828 SetLayout(pCB, image, sub, newNode);
10829 continue;
10830 }
Chris Forbese8d02092016-06-30 12:43:36 +120010831 if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED &&
10832 newNode.layout != node.layout) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010833 skip_call |=
10834 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Chris Forbese8d02092016-06-30 12:43:36 +120010835 DRAWSTATE_INVALID_RENDERPASS, "DS",
10836 "You cannot start a render pass using attachment %u "
10837 "where the render pass initial layout is %s and the previous "
10838 "known layout of the attachment is %s. The layouts must match, or "
10839 "the render pass initial layout for the attachment must be "
10840 "VK_IMAGE_LAYOUT_UNDEFINED",
Michael Lentine8f6abe72016-03-24 21:36:53 -050010841 i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010842 }
10843 }
10844 }
10845 }
10846 return skip_call;
10847}
10848
Tobin Ehlis04c04272016-10-12 11:54:09 -060010849static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
10850 VkAttachmentReference ref) {
Chris Forbes853022c2016-06-30 16:56:15 +120010851 if (ref.attachment != VK_ATTACHMENT_UNUSED) {
10852 auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment];
10853 SetLayout(dev_data, pCB, image_view, ref.layout);
10854 }
10855}
10856
Chris Forbes59b36542016-05-17 15:31:00 +120010857static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
Dustin Graves8f1eab92016-04-05 09:41:17 -060010858 const int subpass_index) {
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060010859 auto renderPass = getRenderPassState(dev_data, pRenderPassBegin->renderPass);
Chris Forbes967c4682016-05-17 11:36:23 +120010860 if (!renderPass)
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010861 return;
Chris Forbes967c4682016-05-17 11:36:23 +120010862
Tobin Ehlis04c04272016-10-12 11:54:09 -060010863 auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer);
Chris Forbesb065df02016-05-17 15:45:31 +120010864 if (!framebuffer)
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010865 return;
Chris Forbesb065df02016-05-17 15:45:31 +120010866
Chris Forbesef730462016-09-27 12:03:31 +130010867 auto const &subpass = renderPass->createInfo.pSubpasses[subpass_index];
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010868 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
Chris Forbes853022c2016-06-30 16:56:15 +120010869 TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010870 }
10871 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
Chris Forbes853022c2016-06-30 16:56:15 +120010872 TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010873 }
Chris Forbes853022c2016-06-30 16:56:15 +120010874 if (subpass.pDepthStencilAttachment) {
10875 TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010876 }
10877}
10878
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010879static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name,
10880 UNIQUE_VALIDATION_ERROR_CODE error_code) {
Dustin Graves8f1eab92016-04-05 09:41:17 -060010881 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010882 if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010883 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010884 error_code, "DS", "Cannot execute command %s on a secondary command buffer. %s", cmd_name.c_str(),
10885 validation_error_map[error_code]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010886 }
10887 return skip_call;
10888}
10889
Chris Forbes05e03b72016-05-17 15:27:58 +120010890static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060010891 auto renderPass = getRenderPassState(dev_data, pRenderPassBegin->renderPass);
Chris Forbes967c4682016-05-17 11:36:23 +120010892 if (!renderPass)
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010893 return;
Chris Forbes967c4682016-05-17 11:36:23 +120010894
Chris Forbesef730462016-09-27 12:03:31 +130010895 const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->createInfo.ptr();
Tobin Ehlis04c04272016-10-12 11:54:09 -060010896 auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer);
Chris Forbesb065df02016-05-17 15:45:31 +120010897 if (!framebuffer)
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010898 return;
Chris Forbesb065df02016-05-17 15:45:31 +120010899
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010900 for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
Chris Forbes853022c2016-06-30 16:56:15 +120010901 auto image_view = framebuffer->createInfo.pAttachments[i];
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010902 SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
10903 }
10904}
10905
Chris Forbes3dd83742016-10-03 19:35:49 +130010906static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
Michael Lentined4648812016-03-24 20:48:59 -050010907 bool skip_call = false;
Tobin Ehlis04c04272016-10-12 11:54:09 -060010908 const safe_VkFramebufferCreateInfo *pFramebufferInfo =
10909 &getFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
Michael Lentined4648812016-03-24 20:48:59 -050010910 if (pRenderPassBegin->renderArea.offset.x < 0 ||
10911 (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
10912 pRenderPassBegin->renderArea.offset.y < 0 ||
10913 (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
10914 skip_call |= static_cast<bool>(log_msg(
Chris Forbes3dd83742016-10-03 19:35:49 +130010915 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Michael Lentined4648812016-03-24 20:48:59 -050010916 DRAWSTATE_INVALID_RENDER_AREA, "CORE",
10917 "Cannot execute a render pass with renderArea not within the bound of the "
10918 "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
10919 "height %d.",
10920 pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
10921 pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
10922 }
10923 return skip_call;
10924}
10925
Mark Lobodzinski7b58e692016-06-10 15:28:17 -060010926// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
10927// [load|store]Op flag must be checked
10928// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
10929template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
Mark Lobodzinski323c6ba2016-06-21 10:28:30 -060010930 if (color_depth_op != op && stencil_op != op) {
10931 return false;
10932 }
Mark Lobodzinski7b58e692016-06-10 15:28:17 -060010933 bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
10934 bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
Mark Lobodzinski323c6ba2016-06-21 10:28:30 -060010935
10936 return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
10937 ((check_stencil_load_op == true) && (stencil_op == op)));
Mark Lobodzinski7b58e692016-06-10 15:28:17 -060010938}
10939
Chia-I Wu629d7cd2016-05-06 11:32:54 +080010940VKAPI_ATTR void VKAPI_CALL
10941CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060010942 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010943 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060010944 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010945 GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060010946 auto renderPass = pRenderPassBegin ? getRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
Tobin Ehlis04c04272016-10-12 11:54:09 -060010947 auto framebuffer = pRenderPassBegin ? getFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010948 if (cb_node) {
Chris Forbes967c4682016-05-17 11:36:23 +120010949 if (renderPass) {
Tobin Ehlis7f0416c2016-07-15 16:01:13 -060010950 uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010951 cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
Mark Lobodzinski4dc768c2016-10-03 16:01:12 -060010952 for (uint32_t i = 0; i < renderPass->createInfo.attachmentCount; ++i) {
Chris Forbes05e03b72016-05-17 15:27:58 +120010953 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
Chris Forbesef730462016-09-27 12:03:31 +130010954 auto pAttachment = &renderPass->createInfo.pAttachments[i];
Chris Forbescc836ab2016-09-26 17:04:41 +130010955 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10956 pAttachment->stencilLoadOp,
Mark Lobodzinski7b58e692016-06-10 15:28:17 -060010957 VK_ATTACHMENT_LOAD_OP_CLEAR)) {
Mark Lobodzinskia8bbfde2016-07-20 10:02:23 -060010958 clear_op_size = static_cast<uint32_t>(i) + 1;
Chris Forbes967c4682016-05-17 11:36:23 +120010959 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -060010960 SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
Chris Forbes967c4682016-05-17 11:36:23 +120010961 return false;
10962 };
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010963 cb_node->validate_functions.push_back(function);
Chris Forbescc836ab2016-09-26 17:04:41 +130010964 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10965 pAttachment->stencilLoadOp,
Mark Lobodzinski7b58e692016-06-10 15:28:17 -060010966 VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
Chris Forbes967c4682016-05-17 11:36:23 +120010967 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -060010968 SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
Chris Forbes967c4682016-05-17 11:36:23 +120010969 return false;
10970 };
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010971 cb_node->validate_functions.push_back(function);
Chris Forbescc836ab2016-09-26 17:04:41 +130010972 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10973 pAttachment->stencilLoadOp,
Mark Lobodzinski7b58e692016-06-10 15:28:17 -060010974 VK_ATTACHMENT_LOAD_OP_LOAD)) {
Chris Forbes967c4682016-05-17 11:36:23 +120010975 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -060010976 return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
Tobin Ehlis5d461152016-08-10 19:11:54 -060010977 "vkCmdBeginRenderPass()");
Chris Forbes967c4682016-05-17 11:36:23 +120010978 };
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010979 cb_node->validate_functions.push_back(function);
Chris Forbes967c4682016-05-17 11:36:23 +120010980 }
Chris Forbescc836ab2016-09-26 17:04:41 +130010981 if (renderPass->attachment_first_read[i]) {
Chris Forbes967c4682016-05-17 11:36:23 +120010982 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -060010983 return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
Tobin Ehlis5d461152016-08-10 19:11:54 -060010984 "vkCmdBeginRenderPass()");
Chris Forbes967c4682016-05-17 11:36:23 +120010985 };
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010986 cb_node->validate_functions.push_back(function);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010987 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010988 }
Tobin Ehlis7f0416c2016-07-15 16:01:13 -060010989 if (clear_op_size > pRenderPassBegin->clearValueCount) {
Slawomir Cygan0808f392016-11-28 17:53:23 +010010990 skip_call |= log_msg(
10991 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10992 reinterpret_cast<uint64_t &>(renderPass), __LINE__, VALIDATION_ERROR_00442,
10993 "DS", "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
10994 "be at least %u entries in pClearValues array to account for the highest index attachment in renderPass "
10995 "0x%" PRIx64 " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
10996 "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
10997 "attachments that aren't cleared they will be ignored. %s",
10998 pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass), clear_op_size,
10999 clear_op_size - 1, validation_error_map[VALIDATION_ERROR_00442]);
11000 }
11001 if (clear_op_size < pRenderPassBegin->clearValueCount) {
11002 skip_call |= log_msg(
11003 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
11004 reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_TOO_MANY_CLEAR_VALUES, "DS",
11005 "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but only first %u "
11006 "entries in pClearValues array are used. The highest index attachment in renderPass 0x%" PRIx64
11007 " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u - other pClearValues are ignored.",
11008 pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass), clear_op_size);
Tobin Ehlis74aa54a2016-05-31 13:06:24 -060011009 }
Tobin Ehlisfe871282016-06-28 10:28:02 -060011010 skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060011011 skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin);
Mike Weiblen6daea5b2016-12-19 20:41:58 -070011012 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_00440);
Tobin Ehlisfe871282016-06-28 10:28:02 -060011013 skip_call |= ValidateDependencies(dev_data, framebuffer, renderPass);
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011014 skip_call |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass", VALIDATION_ERROR_00441);
Tobin Ehlis1c883a02016-12-19 15:59:16 -070011015 skip_call |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
11016 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BEGINRENDERPASS);
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060011017 cb_node->activeRenderPass = renderPass;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011018 // This is a shallow copy as that is all that is needed for now
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060011019 cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
11020 cb_node->activeSubpass = 0;
11021 cb_node->activeSubpassContents = contents;
11022 cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
Tobin Ehlis9fc34aa2016-09-14 15:26:09 -060011023 // Connect this framebuffer and its children to this cmdBuffer
11024 AddFramebufferBinding(dev_data, cb_node, framebuffer);
Chris Forbesfb2aae32016-06-30 15:42:41 +120011025 // transition attachments to the correct layouts for the first subpass
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060011026 TransitionSubpassLayouts(dev_data, cb_node, &cb_node->activeRenderPassBeginInfo, cb_node->activeSubpass);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011027 }
11028 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011029 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -060011030 if (!skip_call) {
Chris Forbesaaa9c282016-10-03 20:01:14 +130011031 dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011032 }
11033}
11034
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011035VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060011036 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011037 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011038 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011039 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011040 if (pCB) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011041 skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass", VALIDATION_ERROR_00459);
Tobin Ehlis1c883a02016-12-19 15:59:16 -070011042 skip_call |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
11043 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_NEXTSUBPASS);
Mike Weiblen6daea5b2016-12-19 20:41:58 -070011044 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_00458);
Chris Forbes0948afa2016-09-07 11:44:08 +120011045
Chris Forbesef730462016-09-27 12:03:31 +130011046 auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
Chris Forbes0948afa2016-09-07 11:44:08 +120011047 if (pCB->activeSubpass == subpassCount - 1) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011048 skip_call |= log_msg(
11049 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11050 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00453, "DS",
11051 "vkCmdNextSubpass(): Attempted to advance beyond final subpass. %s", validation_error_map[VALIDATION_ERROR_00453]);
Chris Forbes0948afa2016-09-07 11:44:08 +120011052 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011053 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011054 lock.unlock();
Chris Forbes2784bcb2016-09-07 11:43:52 +120011055
11056 if (skip_call)
11057 return;
11058
Chris Forbesaaa9c282016-10-03 20:01:14 +130011059 dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
Chris Forbes2784bcb2016-09-07 11:43:52 +120011060
11061 if (pCB) {
11062 lock.lock();
11063 pCB->activeSubpass++;
11064 pCB->activeSubpassContents = contents;
11065 TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
11066 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011067}
11068
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011069VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060011070 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011071 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011072 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbesc785a102016-05-17 14:59:22 +120011073 auto pCB = getCBNode(dev_data, commandBuffer);
11074 if (pCB) {
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011075 RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
Tobin Ehlis04c04272016-10-12 11:54:09 -060011076 auto framebuffer = getFramebufferState(dev_data, pCB->activeFramebuffer);
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011077 if (rp_state) {
11078 if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011079 skip_call |= log_msg(
11080 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11081 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00460, "DS",
11082 "vkCmdEndRenderPass(): Called before reaching final subpass. %s", validation_error_map[VALIDATION_ERROR_00460]);
Chris Forbes85bb4002016-09-07 14:08:31 +120011083 }
11084
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011085 for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
Chris Forbesb065df02016-05-17 15:45:31 +120011086 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011087 auto pAttachment = &rp_state->createInfo.pAttachments[i];
Chris Forbescc836ab2016-09-26 17:04:41 +130011088 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
11089 pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_STORE)) {
Chris Forbesc785a102016-05-17 14:59:22 +120011090 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -060011091 SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
Chris Forbesc785a102016-05-17 14:59:22 +120011092 return false;
11093 };
11094 pCB->validate_functions.push_back(function);
Chris Forbescc836ab2016-09-26 17:04:41 +130011095 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
11096 pAttachment->stencilStoreOp,
Mark Lobodzinski7b58e692016-06-10 15:28:17 -060011097 VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
Chris Forbesc785a102016-05-17 14:59:22 +120011098 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -060011099 SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
Chris Forbesc785a102016-05-17 14:59:22 +120011100 return false;
11101 };
11102 pCB->validate_functions.push_back(function);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011103 }
11104 }
11105 }
Mike Weiblen6daea5b2016-12-19 20:41:58 -070011106 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_00464);
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011107 skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass", VALIDATION_ERROR_00465);
Tobin Ehlis1c883a02016-12-19 15:59:16 -070011108 skip_call |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
11109 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_ENDRENDERPASS);
Chris Forbes2886a9e2016-09-07 13:52:28 +120011110 }
11111 lock.unlock();
11112
11113 if (skip_call)
11114 return;
11115
Chris Forbesaaa9c282016-10-03 20:01:14 +130011116 dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
Chris Forbes2886a9e2016-09-07 13:52:28 +120011117
11118 if (pCB) {
11119 lock.lock();
Chris Forbes05e03b72016-05-17 15:27:58 +120011120 TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
Chris Forbesc785a102016-05-17 14:59:22 +120011121 pCB->activeRenderPass = nullptr;
11122 pCB->activeSubpass = 0;
11123 pCB->activeFramebuffer = VK_NULL_HANDLE;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011124 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011125}
11126
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011127static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
11128 uint32_t secondaryAttach, const char *msg) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011129 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011130 VALIDATION_ERROR_02059, "DS",
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011131 "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64 " which has a render pass "
11132 "that is not compatible with the Primary Cmd Buffer current render pass. "
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011133 "Attachment %u is not compatible with %u: %s. %s",
11134 reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg,
11135 validation_error_map[VALIDATION_ERROR_02059]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011136}
11137
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011138static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
11139 VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
11140 VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
Dustin Graves8f1eab92016-04-05 09:41:17 -060011141 uint32_t secondaryAttach, bool is_multi) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011142 bool skip_call = false;
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011143 if (primaryPassCI->attachmentCount <= primaryAttach) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011144 primaryAttach = VK_ATTACHMENT_UNUSED;
11145 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011146 if (secondaryPassCI->attachmentCount <= secondaryAttach) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011147 secondaryAttach = VK_ATTACHMENT_UNUSED;
11148 }
11149 if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
11150 return skip_call;
11151 }
11152 if (primaryAttach == VK_ATTACHMENT_UNUSED) {
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011153 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
11154 "The first is unused while the second is not.");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011155 return skip_call;
11156 }
11157 if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011158 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
11159 "The second is unused while the first is not.");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011160 return skip_call;
11161 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011162 if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
11163 skip_call |=
11164 logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011165 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011166 if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
11167 skip_call |=
11168 logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011169 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011170 if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
11171 skip_call |=
11172 logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011173 }
11174 return skip_call;
11175}
11176
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011177static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
11178 VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
11179 VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011180 bool skip_call = false;
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011181 const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
11182 const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011183 uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
11184 for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
11185 uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
11186 if (i < primary_desc.inputAttachmentCount) {
11187 primary_input_attach = primary_desc.pInputAttachments[i].attachment;
11188 }
11189 if (i < secondary_desc.inputAttachmentCount) {
11190 secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
11191 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011192 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
11193 secondaryPassCI, secondary_input_attach, is_multi);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011194 }
11195 uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
11196 for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
11197 uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
11198 if (i < primary_desc.colorAttachmentCount) {
11199 primary_color_attach = primary_desc.pColorAttachments[i].attachment;
11200 }
11201 if (i < secondary_desc.colorAttachmentCount) {
11202 secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
11203 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011204 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
11205 secondaryPassCI, secondary_color_attach, is_multi);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011206 uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
11207 if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
11208 primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
11209 }
11210 if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
11211 secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
11212 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011213 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach,
11214 secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011215 }
11216 uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
11217 if (primary_desc.pDepthStencilAttachment) {
11218 primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
11219 }
11220 if (secondary_desc.pDepthStencilAttachment) {
11221 secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
11222 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011223 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach,
11224 secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011225 return skip_call;
11226}
11227
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011228// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
11229// This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
11230// will then feed into this function
11231static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
11232 VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
11233 VkRenderPassCreateInfo const *secondaryPassCI) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011234 bool skip_call = false;
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011235
11236 if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011237 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11238 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011239 "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
11240 " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
11241 " that has a subpassCount of %u.",
11242 reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
11243 reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
11244 } else {
11245 for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
11246 skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
11247 primaryPassCI->subpassCount > 1);
11248 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011249 }
11250 return skip_call;
11251}
11252
Dustin Graves8f1eab92016-04-05 09:41:17 -060011253static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
11254 VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011255 bool skip_call = false;
11256 if (!pSubCB->beginInfo.pInheritanceInfo) {
11257 return skip_call;
11258 }
Chris Forbes89ca84a2016-05-13 16:23:58 +120011259 VkFramebuffer primary_fb = pCB->activeFramebuffer;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011260 VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
11261 if (secondary_fb != VK_NULL_HANDLE) {
11262 if (primary_fb != secondary_fb) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011263 skip_call |= log_msg(
11264 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11265 VALIDATION_ERROR_02060, "DS",
11266 "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64 " which has a framebuffer 0x%" PRIx64
11267 " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ". %s",
11268 reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
11269 reinterpret_cast<uint64_t &>(primary_fb), validation_error_map[VALIDATION_ERROR_02060]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011270 }
Tobin Ehlis04c04272016-10-12 11:54:09 -060011271 auto fb = getFramebufferState(dev_data, secondary_fb);
Chris Forbesb065df02016-05-17 15:45:31 +120011272 if (!fb) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011273 skip_call |=
11274 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mark Muelleraab36502016-05-03 13:17:29 -060011275 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
11276 "which has invalid framebuffer 0x%" PRIx64 ".",
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011277 (void *)secondaryBuffer, (uint64_t)(secondary_fb));
11278 return skip_call;
11279 }
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011280 auto cb_renderpass = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011281 if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
11282 skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
Chris Forbesef730462016-09-27 12:03:31 +130011283 cb_renderpass->createInfo.ptr());
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011284 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011285 }
11286 return skip_call;
11287}
11288
Dustin Graves8f1eab92016-04-05 09:41:17 -060011289static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060011290 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011291 unordered_set<int> activeTypes;
11292 for (auto queryObject : pCB->activeQueries) {
11293 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
11294 if (queryPoolData != dev_data->queryPoolMap.end()) {
11295 if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
11296 pSubCB->beginInfo.pInheritanceInfo) {
11297 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
11298 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011299 skip_call |=
11300 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11301 VALIDATION_ERROR_02065, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
11302 "which has invalid active query pool 0x%" PRIx64
11303 ". Pipeline statistics is being queried so the command "
11304 "buffer must have all bits set on the queryPool. %s",
11305 pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first),
11306 validation_error_map[VALIDATION_ERROR_02065]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011307 }
11308 }
11309 activeTypes.insert(queryPoolData->second.createInfo.queryType);
11310 }
11311 }
11312 for (auto queryObject : pSubCB->startedQueries) {
11313 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
11314 if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060011315 skip_call |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011316 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11317 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -060011318 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
11319 "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
11320 "secondary Cmd Buffer 0x%p.",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011321 pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first),
11322 queryPoolData->second.createInfo.queryType, pSubCB->commandBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011323 }
11324 }
Mark Lobodzinskieb30fd72016-08-09 16:42:24 -060011325
11326 auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
11327 auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
11328 if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011329 skip_call |=
11330 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11331 reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
11332 "vkCmdExecuteCommands(): Primary command buffer 0x%p"
11333 " created in queue family %d has secondary command buffer 0x%p created in queue family %d.",
11334 pCB->commandBuffer, primary_pool->queueFamilyIndex, pSubCB->commandBuffer, secondary_pool->queueFamilyIndex);
Mark Lobodzinskieb30fd72016-08-09 16:42:24 -060011335 }
11336
Tobin Ehlisfe871282016-06-28 10:28:02 -060011337 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011338}
11339
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011340VKAPI_ATTR void VKAPI_CALL
11341CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060011342 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011343 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011344 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011345 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
11346 if (pCB) {
11347 GLOBAL_CB_NODE *pSubCB = NULL;
11348 for (uint32_t i = 0; i < commandBuffersCount; i++) {
11349 pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
Tobin Ehlis44ba5fc2017-01-03 14:07:17 -070011350 assert(pSubCB);
11351 if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060011352 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
Mike Weiblen25c90822016-12-06 17:10:22 -070011353 __LINE__, VALIDATION_ERROR_00153, "DS",
Tobin Ehlisfe871282016-06-28 10:28:02 -060011354 "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
Mike Weiblen25c90822016-12-06 17:10:22 -070011355 "array. All cmd buffers in pCommandBuffers array must be secondary. %s",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011356 pCommandBuffers[i], i, validation_error_map[VALIDATION_ERROR_00153]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011357 } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011358 auto secondary_rp_state = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011359 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060011360 skip_call |= log_msg(
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011361 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Mike Weiblen25c90822016-12-06 17:10:22 -070011362 (uint64_t)pCommandBuffers[i], __LINE__, VALIDATION_ERROR_02057, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -060011363 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
Mike Weiblen25c90822016-12-06 17:10:22 -070011364 ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set. %s",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011365 pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass,
Mike Weiblen25c90822016-12-06 17:10:22 -070011366 validation_error_map[VALIDATION_ERROR_02057]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011367 } else {
11368 // Make sure render pass is compatible with parent command buffer pass if has continue
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011369 if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
Chris Forbesef730462016-09-27 12:03:31 +130011370 skip_call |=
11371 validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011372 pCommandBuffers[i], secondary_rp_state->createInfo.ptr());
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011373 }
Tobin Ehlisf77f5cc2016-07-19 10:45:24 -060011374 // If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
Tobin Ehlisfe871282016-06-28 10:28:02 -060011375 skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011376 }
11377 string errorString = "";
Tobin Ehlisf77f5cc2016-07-19 10:45:24 -060011378 // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011379 if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) &&
Chris Forbesef730462016-09-27 12:03:31 +130011380 !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011381 secondary_rp_state->createInfo.ptr(), errorString)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060011382 skip_call |= log_msg(
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011383 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11384 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -060011385 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
11386 ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011387 pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, commandBuffer,
Chris Forbesa4937a72016-05-06 16:31:14 +120011388 (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011389 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011390 }
11391 // TODO(mlentine): Move more logic into this method
Tobin Ehlisfe871282016-06-28 10:28:02 -060011392 skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
Tobin Ehlisf7cf9152016-09-27 13:10:33 -060011393 skip_call |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011394 // Secondary cmdBuffers are considered pending execution starting w/
11395 // being recorded
11396 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
11397 if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
Mike Weiblen25c90822016-12-06 17:10:22 -070011398 skip_call |=
11399 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11400 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(pCB->commandBuffer), __LINE__,
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011401 VALIDATION_ERROR_00154, "DS", "Attempt to simultaneously execute command buffer 0x%p"
Mike Weiblen25c90822016-12-06 17:10:22 -070011402 " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set! %s",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011403 pCB->commandBuffer, validation_error_map[VALIDATION_ERROR_00154]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011404 }
11405 if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
11406 // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
Tobin Ehlisfe871282016-06-28 10:28:02 -060011407 skip_call |= log_msg(
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011408 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11409 (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011410 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) "
11411 "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
11412 "(0x%p) to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
Tobin Ehlisfe871282016-06-28 10:28:02 -060011413 "set, even though it does.",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011414 pCommandBuffers[i], pCB->commandBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011415 pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
11416 }
11417 }
Chris Forbes94c5f532016-10-03 17:42:38 +130011418 if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011419 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11420 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(pCommandBuffers[i]),
11421 __LINE__, VALIDATION_ERROR_02062, "DS", "vkCmdExecuteCommands(): Secondary Command Buffer "
11422 "(0x%p) cannot be submitted with a query in "
11423 "flight and inherited queries not "
11424 "supported on this device. %s",
11425 pCommandBuffers[i], validation_error_map[VALIDATION_ERROR_02062]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011426 }
Tobin Ehlisd6280b12016-11-03 10:46:44 -060011427 // Propagate layout transitions to the primary cmd buffer
11428 for (auto ilm_entry : pSubCB->imageLayoutMap) {
11429 SetLayout(pCB, ilm_entry.first, ilm_entry.second);
11430 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011431 pSubCB->primaryCommandBuffer = pCB->commandBuffer;
11432 pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
11433 dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
Michael Lentinef01fb382016-07-21 17:24:56 -050011434 for (auto &function : pSubCB->queryUpdates) {
11435 pCB->queryUpdates.push_back(function);
11436 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011437 }
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011438 skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands", VALIDATION_ERROR_00163);
Tobin Ehlis1c883a02016-12-19 15:59:16 -070011439 skip_call |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
11440 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_EXECUTECOMMANDS);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011441 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011442 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -060011443 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +130011444 dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011445}
11446
Tobin Ehlis9e40f0d2016-08-04 10:17:19 -060011447// For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
Tobin Ehlis3d2c3162016-08-10 16:08:00 -060011448static bool ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset,
Tobin Ehlis0a78ef92016-08-12 14:12:44 -060011449 VkDeviceSize end_offset) {
Dustin Graves8f1eab92016-04-05 09:41:17 -060011450 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011451 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis3d2c3162016-08-10 16:08:00 -060011452 // Iterate over all bound image ranges and verify that for any that overlap the
11453 // map ranges, the layouts are VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
11454 // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range
11455 for (auto image_handle : mem_info->bound_images) {
11456 auto img_it = mem_info->bound_ranges.find(image_handle);
11457 if (img_it != mem_info->bound_ranges.end()) {
Tobin Ehlis0a78ef92016-08-12 14:12:44 -060011458 if (rangesIntersect(dev_data, &img_it->second, offset, end_offset)) {
Tobin Ehlis9e40f0d2016-08-04 10:17:19 -060011459 std::vector<VkImageLayout> layouts;
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -060011460 if (FindLayouts(dev_data, VkImage(image_handle), layouts)) {
Tobin Ehlis9e40f0d2016-08-04 10:17:19 -060011461 for (auto layout : layouts) {
11462 if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
11463 skip_call |=
11464 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
11465 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
11466 "GENERAL or PREINITIALIZED are supported.",
11467 string_VkImageLayout(layout));
11468 }
11469 }
11470 }
11471 }
11472 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011473 }
11474 return skip_call;
11475}
11476
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011477VKAPI_ATTR VkResult VKAPI_CALL
11478MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011479 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11480
Dustin Graves8f1eab92016-04-05 09:41:17 -060011481 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011482 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011483 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis3d2c3162016-08-10 16:08:00 -060011484 DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
11485 if (mem_info) {
Tobin Ehlisc3e9c7b2016-08-10 17:00:51 -060011486 // TODO : This could me more fine-grained to track just region that is valid
11487 mem_info->global_valid = true;
Tobin Ehlis0a78ef92016-08-12 14:12:44 -060011488 auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
11489 skip_call |= ValidateMapImageLayouts(device, mem_info, offset, end_offset);
Tobin Ehlis3d2c3162016-08-10 16:08:00 -060011490 // TODO : Do we need to create new "bound_range" for the mapped range?
Tobin Ehlis0a78ef92016-08-12 14:12:44 -060011491 SetMemRangesValid(dev_data, mem_info, offset, end_offset);
Tobin Ehlis3d2c3162016-08-10 16:08:00 -060011492 if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
Tobin Ehlise54be7b2016-04-11 14:49:55 -060011493 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
Mike Weiblend3fb3132016-12-06 10:28:00 -070011494 skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11495 (uint64_t)mem, __LINE__, VALIDATION_ERROR_00629, "MEM",
11496 "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64 ". %s",
11497 (uint64_t)mem, validation_error_map[VALIDATION_ERROR_00629]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011498 }
11499 }
Tobin Ehlisb495d5f2016-08-04 09:33:02 -060011500 skip_call |= ValidateMapMemRange(dev_data, mem, offset, size);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011501 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011502
Dustin Graves8f1eab92016-04-05 09:41:17 -060011503 if (!skip_call) {
Chris Forbesaaa9c282016-10-03 20:01:14 +130011504 result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
Tobin Ehlisd094c272016-05-12 08:31:32 -060011505 if (VK_SUCCESS == result) {
Tobin Ehlisd094c272016-05-12 08:31:32 -060011506 lock.lock();
Tobin Ehlis3d2c3162016-08-10 16:08:00 -060011507 // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
Tobin Ehlisd094c272016-05-12 08:31:32 -060011508 storeMemRanges(dev_data, mem, offset, size);
Mark Lobodzinski066b8422016-08-15 14:27:26 -060011509 initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
Tobin Ehlisd094c272016-05-12 08:31:32 -060011510 lock.unlock();
Tobin Ehlisd094c272016-05-12 08:31:32 -060011511 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011512 }
11513 return result;
11514}
11515
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011516VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
Chris Forbes3dd83742016-10-03 19:35:49 +130011517 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisfe871282016-06-28 10:28:02 -060011518 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011519
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011520 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbes3dd83742016-10-03 19:35:49 +130011521 skip_call |= deleteMemRanges(dev_data, mem);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011522 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -060011523 if (!skip_call) {
Chris Forbesaaa9c282016-10-03 20:01:14 +130011524 dev_data->dispatch_table.UnmapMemory(device, mem);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011525 }
11526}
11527
Chris Forbes3dd83742016-10-03 19:35:49 +130011528static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
Dustin Graves8f1eab92016-04-05 09:41:17 -060011529 const VkMappedMemoryRange *pMemRanges) {
Mark Lobodzinski4a95cfe2016-11-15 13:55:22 -070011530 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011531 for (uint32_t i = 0; i < memRangeCount; ++i) {
Chris Forbes3dd83742016-10-03 19:35:49 +130011532 auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
Tobin Ehlis997b2582016-06-02 08:43:37 -060011533 if (mem_info) {
Mark Lobodzinskib3c675e2016-11-15 08:56:03 -070011534 if (pMemRanges[i].size == VK_WHOLE_SIZE) {
11535 if (mem_info->mem_range.offset > pMemRanges[i].offset) {
Mark Lobodzinski4a95cfe2016-11-15 13:55:22 -070011536 skip |=
Mark Lobodzinskib3c675e2016-11-15 08:56:03 -070011537 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11538 (uint64_t)pMemRanges[i].memory, __LINE__, VALIDATION_ERROR_00643, "MEM",
11539 "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
11540 "(" PRINTF_SIZE_T_SPECIFIER "). %s",
11541 funcName, static_cast<size_t>(pMemRanges[i].offset),
11542 static_cast<size_t>(mem_info->mem_range.offset), validation_error_map[VALIDATION_ERROR_00643]);
11543 }
11544 } else {
11545 const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
11546 ? mem_info->alloc_info.allocationSize
11547 : (mem_info->mem_range.offset + mem_info->mem_range.size);
11548 if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
11549 (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
Mark Lobodzinski4a95cfe2016-11-15 13:55:22 -070011550 skip |=
Mark Lobodzinskib3c675e2016-11-15 08:56:03 -070011551 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11552 (uint64_t)pMemRanges[i].memory, __LINE__, VALIDATION_ERROR_00642, "MEM",
11553 "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
11554 ") exceed the Memory Object's upper-bound "
11555 "(" PRINTF_SIZE_T_SPECIFIER "). %s",
11556 funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
11557 static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end),
11558 validation_error_map[VALIDATION_ERROR_00642]);
11559 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011560 }
11561 }
11562 }
Mark Lobodzinski4a95cfe2016-11-15 13:55:22 -070011563 return skip;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011564}
11565
Mark Lobodzinski945c6872016-11-15 07:48:49 -070011566static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
11567 const VkMappedMemoryRange *mem_ranges) {
11568 bool skip = false;
11569 for (uint32_t i = 0; i < mem_range_count; ++i) {
11570 auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
Tobin Ehlis997b2582016-06-02 08:43:37 -060011571 if (mem_info) {
Mark Lobodzinski066b8422016-08-15 14:27:26 -060011572 if (mem_info->shadow_copy) {
11573 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
11574 ? mem_info->mem_range.size
Tobin Ehlis968f5dd2016-10-05 07:50:25 -060011575 : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
Mark Lobodzinski066b8422016-08-15 14:27:26 -060011576 char *data = static_cast<char *>(mem_info->shadow_copy);
11577 for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
11578 if (data[j] != NoncoherentMemoryFillValue) {
Mark Lobodzinski945c6872016-11-15 07:48:49 -070011579 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11580 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__,
11581 MEMTRACK_INVALID_MAP, "MEM", "Memory underflow was detected on mem obj 0x%" PRIxLEAST64,
11582 (uint64_t)mem_ranges[i].memory);
Mark Lobodzinski066b8422016-08-15 14:27:26 -060011583 }
11584 }
11585 for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011586 if (data[j] != NoncoherentMemoryFillValue) {
Mark Lobodzinski945c6872016-11-15 07:48:49 -070011587 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11588 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__,
11589 MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
11590 (uint64_t)mem_ranges[i].memory);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011591 }
11592 }
Mark Lobodzinski066b8422016-08-15 14:27:26 -060011593 memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011594 }
11595 }
11596 }
Mark Lobodzinski945c6872016-11-15 07:48:49 -070011597 return skip;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011598}
11599
Mark Lobodzinski945c6872016-11-15 07:48:49 -070011600static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
11601 for (uint32_t i = 0; i < mem_range_count; ++i) {
Chris Forbes3dd83742016-10-03 19:35:49 +130011602 auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
Mark Lobodzinski066b8422016-08-15 14:27:26 -060011603 if (mem_info && mem_info->shadow_copy) {
11604 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
11605 ? mem_info->mem_range.size
11606 : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
11607 char *data = static_cast<char *>(mem_info->shadow_copy);
11608 memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
Mark Lobodzinskibc4d6202016-08-16 09:06:15 -060011609 }
11610 }
11611}
11612
Mark Lobodzinskidcefe7f2016-11-14 16:28:01 -070011613static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
11614 const VkMappedMemoryRange *mem_ranges) {
11615 bool skip = false;
11616 for (uint32_t i = 0; i < mem_range_count; ++i) {
11617 uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
11618 if (vk_safe_modulo(mem_ranges[i].offset, atom_size) != 0) {
11619 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
11620 __LINE__, VALIDATION_ERROR_00644, "MEM",
11621 "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
11622 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
11623 func_name, i, mem_ranges[i].offset, atom_size, validation_error_map[VALIDATION_ERROR_00644]);
11624 }
11625 if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (vk_safe_modulo(mem_ranges[i].size, atom_size) != 0)) {
11626 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
11627 __LINE__, VALIDATION_ERROR_00645, "MEM",
11628 "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
11629 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
11630 func_name, i, mem_ranges[i].size, atom_size, validation_error_map[VALIDATION_ERROR_00645]);
11631 }
11632 }
11633 return skip;
11634}
11635
Mark Lobodzinski16ae4402016-11-15 07:59:58 -070011636static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
11637 const VkMappedMemoryRange *mem_ranges) {
11638 bool skip = false;
11639 std::lock_guard<std::mutex> lock(global_lock);
11640 skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
11641 skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
11642 return skip;
11643}
11644
Mark Lobodzinski945c6872016-11-15 07:48:49 -070011645VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
11646 const VkMappedMemoryRange *pMemRanges) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011647 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbes3dd83742016-10-03 19:35:49 +130011648 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011649
Mark Lobodzinski16ae4402016-11-15 07:59:58 -070011650 if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
Chris Forbesaaa9c282016-10-03 20:01:14 +130011651 result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011652 }
11653 return result;
11654}
11655
Mark Lobodzinski16ae4402016-11-15 07:59:58 -070011656static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
11657 const VkMappedMemoryRange *mem_ranges) {
11658 bool skip = false;
11659 std::lock_guard<std::mutex> lock(global_lock);
11660 skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
11661 return skip;
11662}
11663
11664static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
11665 const VkMappedMemoryRange *mem_ranges) {
11666 std::lock_guard<std::mutex> lock(global_lock);
11667 // Update our shadow copy with modified driver data
11668 CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
11669}
11670
Mark Lobodzinski945c6872016-11-15 07:48:49 -070011671VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
11672 const VkMappedMemoryRange *pMemRanges) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011673 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbes3dd83742016-10-03 19:35:49 +130011674 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011675
Mark Lobodzinski16ae4402016-11-15 07:59:58 -070011676 if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
Chris Forbesaaa9c282016-10-03 20:01:14 +130011677 result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
Mark Lobodzinski16ae4402016-11-15 07:59:58 -070011678 if (result == VK_SUCCESS) {
11679 PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
11680 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011681 }
11682 return result;
11683}
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011684
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011685VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011686 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11687 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Tobin Ehlisfe871282016-06-28 10:28:02 -060011688 bool skip_call = false;
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011689 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis30df15c2016-10-12 17:17:57 -060011690 auto image_state = getImageState(dev_data, image);
11691 if (image_state) {
Tobin Ehlisf263ba42016-04-05 13:33:00 -060011692 // Track objects tied to memory
Mark Lobodzinskif2904db2016-05-03 15:31:26 -060011693 uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
Tobin Ehlis4ff58172016-09-22 10:52:00 -060011694 skip_call = SetMemBinding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
Tobin Ehlis9cb84402016-11-17 13:51:54 -070011695 if (!image_state->memory_requirements_checked) {
11696 // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
11697 // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
11698 // vkGetImageMemoryRequirements()
11699 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11700 image_handle, __LINE__, DRAWSTATE_INVALID_IMAGE, "DS",
Mark Lobodzinskifa572262016-11-22 15:29:38 -070011701 "vkBindImageMemory(): Binding memory to image 0x%" PRIxLEAST64
Tobin Ehlis9cb84402016-11-17 13:51:54 -070011702 " but vkGetImageMemoryRequirements() has not been called on that image.",
11703 image_handle);
11704 // Make the call for them so we can verify the state
11705 lock.unlock();
11706 dev_data->dispatch_table.GetImageMemoryRequirements(device, image, &image_state->requirements);
11707 lock.lock();
11708 }
Mark Lobodzinskif2904db2016-05-03 15:31:26 -060011709
11710 // Track and validate bound memory range information
Tobin Ehlis997b2582016-06-02 08:43:37 -060011711 auto mem_info = getMemObjInfo(dev_data, mem);
11712 if (mem_info) {
Tobin Ehlis9cb84402016-11-17 13:51:54 -070011713 skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
Tobin Ehlis30df15c2016-10-12 17:17:57 -060011714 image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
Mike Weiblendcca8592016-12-15 12:24:24 -070011715 skip_call |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, "vkBindImageMemory()",
11716 VALIDATION_ERROR_00806);
Mark Lobodzinskif2904db2016-05-03 15:31:26 -060011717 }
11718
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011719 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -060011720 if (!skip_call) {
Chris Forbesaaa9c282016-10-03 20:01:14 +130011721 result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011722 lock.lock();
Tobin Ehlis54108272016-10-11 14:26:49 -060011723 image_state->binding.mem = mem;
11724 image_state->binding.offset = memoryOffset;
Tobin Ehlis9cb84402016-11-17 13:51:54 -070011725 image_state->binding.size = image_state->requirements.size;
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011726 lock.unlock();
Tobin Ehlisf263ba42016-04-05 13:33:00 -060011727 }
11728 } else {
11729 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11730 reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
Mark Muelleraab36502016-05-03 13:17:29 -060011731 "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
Tobin Ehlisf263ba42016-04-05 13:33:00 -060011732 reinterpret_cast<const uint64_t &>(image));
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011733 }
11734 return result;
11735}
11736
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011737VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
Tobin Ehlis3f9e8f92016-04-13 16:18:28 -060011738 bool skip_call = false;
11739 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011740 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011741 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis1af17132016-10-20 14:17:21 -060011742 auto event_state = getEventNode(dev_data, event);
11743 if (event_state) {
11744 event_state->needsSignaled = false;
11745 event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
11746 if (event_state->write_in_use) {
Tobin Ehlis3f9e8f92016-04-13 16:18:28 -060011747 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
11748 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -060011749 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
Tobin Ehlis3f9e8f92016-04-13 16:18:28 -060011750 reinterpret_cast<const uint64_t &>(event));
11751 }
11752 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011753 lock.unlock();
Tobin Ehlise6ab6932016-04-07 11:35:46 -060011754 // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
11755 // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
11756 // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
11757 for (auto queue_data : dev_data->queueMap) {
11758 auto event_entry = queue_data.second.eventToStageMap.find(event);
11759 if (event_entry != queue_data.second.eventToStageMap.end()) {
11760 event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
11761 }
11762 }
Tobin Ehlis3f9e8f92016-04-13 16:18:28 -060011763 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +130011764 result = dev_data->dispatch_table.SetEvent(device, event);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011765 return result;
11766}
11767
11768VKAPI_ATTR VkResult VKAPI_CALL
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011769QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011770 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11771 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Dustin Graves8f1eab92016-04-05 09:41:17 -060011772 bool skip_call = false;
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011773 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbesd73299b2016-06-10 15:25:45 +120011774 auto pFence = getFenceNode(dev_data, fence);
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -070011775 auto pQueue = getQueueState(dev_data, queue);
Chris Forbesd73299b2016-06-10 15:25:45 +120011776
Tobin Ehlisb13975e2016-04-14 07:02:43 -060011777 // First verify that fence is not in use
Chris Forbesd73299b2016-06-10 15:25:45 +120011778 skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11779
Chris Forbes8320a8d2016-08-01 15:15:30 +120011780 if (pFence) {
11781 SubmitFence(pQueue, pFence, bindInfoCount);
Tobin Ehlisb13975e2016-04-14 07:02:43 -060011782 }
Chris Forbesd73299b2016-06-10 15:25:45 +120011783
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011784 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
11785 const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011786 // Track objects tied to memory
11787 for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
11788 for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
Tobin Ehlise89829a2016-10-11 17:29:32 -060011789 auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
11790 if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
11791 (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
11792 "vkQueueBindSparse"))
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011793 skip_call = true;
11794 }
11795 }
11796 for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
11797 for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
Tobin Ehlise89829a2016-10-11 17:29:32 -060011798 auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
11799 if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
11800 (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11801 "vkQueueBindSparse"))
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011802 skip_call = true;
11803 }
11804 }
11805 for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
11806 for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
Tobin Ehlise89829a2016-10-11 17:29:32 -060011807 auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
11808 // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
11809 VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
11810 if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
11811 (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11812 "vkQueueBindSparse"))
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011813 skip_call = true;
11814 }
11815 }
Chris Forbes8320a8d2016-08-01 15:15:30 +120011816
11817 std::vector<SEMAPHORE_WAIT> semaphore_waits;
11818 std::vector<VkSemaphore> semaphore_signals;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011819 for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
Chris Forbes220fd472016-06-21 18:59:28 +120011820 VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
11821 auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11822 if (pSemaphore) {
11823 if (pSemaphore->signaled) {
Chris Forbes8320a8d2016-08-01 15:15:30 +120011824 if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
11825 semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
11826 pSemaphore->in_use.fetch_add(1);
11827 }
11828 pSemaphore->signaler.first = VK_NULL_HANDLE;
Chris Forbes220fd472016-06-21 18:59:28 +120011829 pSemaphore->signaled = false;
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011830 } else {
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011831 skip_call |= log_msg(
11832 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11833 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11834 "vkQueueBindSparse: Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
11835 queue, reinterpret_cast<const uint64_t &>(semaphore));
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011836 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011837 }
11838 }
11839 for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
Chris Forbes220fd472016-06-21 18:59:28 +120011840 VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
11841 auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11842 if (pSemaphore) {
11843 if (pSemaphore->signaled) {
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011844 skip_call =
11845 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11846 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011847 "vkQueueBindSparse: Queue 0x%p is signaling semaphore 0x%" PRIx64
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011848 ", but that semaphore is already signaled.",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011849 queue, reinterpret_cast<const uint64_t &>(semaphore));
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011850 }
Chris Forbes8320a8d2016-08-01 15:15:30 +120011851 else {
11852 pSemaphore->signaler.first = queue;
11853 pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
11854 pSemaphore->signaled = true;
11855 pSemaphore->in_use.fetch_add(1);
11856 semaphore_signals.push_back(semaphore);
11857 }
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011858 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011859 }
Chris Forbes8320a8d2016-08-01 15:15:30 +120011860
11861 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11862 semaphore_waits,
11863 semaphore_signals,
11864 bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011865 }
Chris Forbes8320a8d2016-08-01 15:15:30 +120011866
11867 if (pFence && !bindInfoCount) {
11868 // No work to do, just dropping a fence in the queue by itself.
11869 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11870 std::vector<SEMAPHORE_WAIT>(),
11871 std::vector<VkSemaphore>(),
11872 fence);
11873 }
11874
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011875 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011876
Dustin Graves8f1eab92016-04-05 09:41:17 -060011877 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +130011878 return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011879
11880 return result;
11881}
11882
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011883VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
11884 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011885 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +130011886 VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011887 if (result == VK_SUCCESS) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011888 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011889 SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
Chris Forbes8320a8d2016-08-01 15:15:30 +120011890 sNode->signaler.first = VK_NULL_HANDLE;
11891 sNode->signaler.second = 0;
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011892 sNode->signaled = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011893 }
11894 return result;
11895}
11896
11897VKAPI_ATTR VkResult VKAPI_CALL
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011898CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011899 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +130011900 VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011901 if (result == VK_SUCCESS) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011902 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011903 dev_data->eventMap[*pEvent].needsSignaled = false;
Tony Barbour06465372016-06-06 10:55:04 -060011904 dev_data->eventMap[*pEvent].write_in_use = 0;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011905 dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011906 }
11907 return result;
11908}
11909
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011910static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
11911 VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
11912 SWAPCHAIN_NODE *old_swapchain_state) {
Chris Forbesbc19b5c2016-10-06 13:01:33 +130011913 auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
11914
Chris Forbes7be86f82016-11-25 16:17:28 +130011915 // TODO: revisit this. some of these rules are being relaxed.
Chris Forbesbc19b5c2016-10-06 13:01:33 +130011916 if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
11917 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11918 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011919 "%s: surface has an existing swapchain other than oldSwapchain", func_name))
Chris Forbesbc19b5c2016-10-06 13:01:33 +130011920 return true;
11921 }
11922 if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
11923 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11924 reinterpret_cast<uint64_t const &>(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011925 "DS", "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
Chris Forbesbc19b5c2016-10-06 13:01:33 +130011926 return true;
11927 }
Chris Forbesb52e0602016-10-11 16:21:32 +130011928 auto physical_device_state = getPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
11929 if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
11930 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11931 reinterpret_cast<uint64_t>(dev_data->physical_device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011932 "%s: surface capabilities not retrieved for this physical device", func_name))
Chris Forbesb52e0602016-10-11 16:21:32 +130011933 return true;
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011934 } else { // have valid capabilities
11935 auto &capabilities = physical_device_state->surfaceCapabilities;
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011936 // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
Mike Weiblend3fb3132016-12-06 10:28:00 -070011937 if (pCreateInfo->minImageCount < capabilities.minImageCount) {
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011938 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011939 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02331, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011940 "%s called with minImageCount = %d, which is outside the bounds returned "
Mike Weiblend3fb3132016-12-06 10:28:00 -070011941 "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011942 func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011943 validation_error_map[VALIDATION_ERROR_02331]))
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011944 return true;
11945 }
Mike Weiblend3fb3132016-12-06 10:28:00 -070011946
11947 if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
11948 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11949 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02332, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011950 "%s called with minImageCount = %d, which is outside the bounds returned "
Mike Weiblend3fb3132016-12-06 10:28:00 -070011951 "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011952 func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011953 validation_error_map[VALIDATION_ERROR_02332]))
11954 return true;
11955 }
11956
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011957 // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
Jamie Madill6069c822016-12-15 09:35:36 -050011958 if ((capabilities.currentExtent.width == kSurfaceSizeFromSwapchain) &&
11959 ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
11960 (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
11961 (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
11962 (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height))) {
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011963 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011964 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011965 "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
11966 "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
11967 "maxImageExtent = (%d,%d). %s",
11968 func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
11969 capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
11970 capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011971 validation_error_map[VALIDATION_ERROR_02334]))
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011972 return true;
11973 }
Jamie Madill6069c822016-12-15 09:35:36 -050011974 if ((capabilities.currentExtent.width != kSurfaceSizeFromSwapchain) &&
11975 ((pCreateInfo->imageExtent.width != capabilities.currentExtent.width) ||
11976 (pCreateInfo->imageExtent.height != capabilities.currentExtent.height))) {
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011977 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011978 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011979 "%s called with imageExtent = (%d,%d), which is not equal to the currentExtent = (%d,%d) returned by "
11980 "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(). %s",
11981 func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
11982 capabilities.currentExtent.width, capabilities.currentExtent.height,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011983 validation_error_map[VALIDATION_ERROR_02334]))
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011984 return true;
11985 }
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011986 // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
11987 // VkSurfaceCapabilitiesKHR::supportedTransforms.
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011988 if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
11989 !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011990 // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
11991 // it up a little at a time, and then log it:
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011992 std::string errorString = "";
11993 char str[1024];
11994 // Here's the first part of the message:
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011995 sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name,
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011996 string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
11997 errorString += str;
11998 for (int i = 0; i < 32; i++) {
11999 // Build up the rest of the message:
12000 if ((1 << i) & capabilities.supportedTransforms) {
12001 const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
12002 sprintf(str, " %s\n", newStr);
12003 errorString += str;
12004 }
12005 }
12006 // Log the message that we've built up:
12007 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012008 reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02339, "DS", "%s. %s",
12009 errorString.c_str(), validation_error_map[VALIDATION_ERROR_02339]))
Chris Forbesfc77bbe2016-10-12 12:24:44 +130012010 return true;
12011 }
Chris Forbesf13f5412016-10-31 17:20:22 +130012012
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012013 // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
12014 // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
Chris Forbesfc77bbe2016-10-12 12:24:44 +130012015 if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
12016 !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012017 // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
12018 // it up a little at a time, and then log it:
Chris Forbesfc77bbe2016-10-12 12:24:44 +130012019 std::string errorString = "";
12020 char str[1024];
12021 // Here's the first part of the message:
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012022 sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n",
12023 func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
Chris Forbesfc77bbe2016-10-12 12:24:44 +130012024 errorString += str;
12025 for (int i = 0; i < 32; i++) {
12026 // Build up the rest of the message:
12027 if ((1 << i) & capabilities.supportedCompositeAlpha) {
12028 const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
12029 sprintf(str, " %s\n", newStr);
12030 errorString += str;
12031 }
12032 }
12033 // Log the message that we've built up:
12034 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012035 reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02340, "DS", "%s. %s",
12036 errorString.c_str(), validation_error_map[VALIDATION_ERROR_02340]))
Chris Forbesfc77bbe2016-10-12 12:24:44 +130012037 return true;
12038 }
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012039 // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
Chris Forbesfc77bbe2016-10-12 12:24:44 +130012040 if ((pCreateInfo->imageArrayLayers < 1) || (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers)) {
12041 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012042 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02335, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012043 "%s called with a non-supported imageArrayLayers (i.e. %d). Minimum value is 1, maximum value is %d. %s",
12044 func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012045 validation_error_map[VALIDATION_ERROR_02335]))
Chris Forbesfc77bbe2016-10-12 12:24:44 +130012046 return true;
12047 }
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012048 // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
Chris Forbesfc77bbe2016-10-12 12:24:44 +130012049 if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
12050 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012051 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02336, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012052 "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x. %s",
12053 func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags,
12054 validation_error_map[VALIDATION_ERROR_02336]))
Chris Forbesfc77bbe2016-10-12 12:24:44 +130012055 return true;
12056 }
Chris Forbesb52e0602016-10-11 16:21:32 +130012057 }
Chris Forbesbc19b5c2016-10-06 13:01:33 +130012058
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012059 // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
Chris Forbes11ab1712016-11-25 16:37:41 +130012060 if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
12061 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12062 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012063 "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
Chris Forbes11ab1712016-11-25 16:37:41 +130012064 return true;
12065 } else {
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012066 // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
Chris Forbes11ab1712016-11-25 16:37:41 +130012067 bool foundFormat = false;
12068 bool foundColorSpace = false;
12069 bool foundMatch = false;
12070 for (auto const &format : physical_device_state->surface_formats) {
12071 if (pCreateInfo->imageFormat == format.format) {
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012072 // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
Chris Forbes11ab1712016-11-25 16:37:41 +130012073 foundFormat = true;
12074 if (pCreateInfo->imageColorSpace == format.colorSpace) {
12075 foundMatch = true;
12076 break;
12077 }
12078 } else {
12079 if (pCreateInfo->imageColorSpace == format.colorSpace) {
12080 foundColorSpace = true;
12081 }
12082 }
12083 }
12084 if (!foundMatch) {
12085 if (!foundFormat) {
Chris Forbes11ab1712016-11-25 16:37:41 +130012086 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012087 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012088 "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d). %s",
12089 func_name, pCreateInfo->imageFormat, validation_error_map[VALIDATION_ERROR_02333]))
Mike Weiblend3fb3132016-12-06 10:28:00 -070012090 return true;
12091 }
12092 if (!foundColorSpace) {
12093 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12094 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012095 "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d). %s",
12096 func_name, pCreateInfo->imageColorSpace, validation_error_map[VALIDATION_ERROR_02333]))
Chris Forbes11ab1712016-11-25 16:37:41 +130012097 return true;
12098 }
12099 }
12100 }
12101
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012102 // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
Chris Forbesad22fc32016-11-25 13:17:36 +130012103 if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070012104 // FIFO is required to always be supported
Chris Forbesad22fc32016-11-25 13:17:36 +130012105 if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
12106 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012107 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
12108 "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
Chris Forbesad22fc32016-11-25 13:17:36 +130012109 return true;
12110 }
12111 } else {
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012112 // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
Chris Forbesad22fc32016-11-25 13:17:36 +130012113 bool foundMatch = std::find(physical_device_state->present_modes.begin(),
12114 physical_device_state->present_modes.end(),
12115 pCreateInfo->presentMode) != physical_device_state->present_modes.end();
12116 if (!foundMatch) {
12117 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012118 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02341, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012119 "%s called with a non-supported presentMode (i.e. %s). %s", func_name,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012120 string_VkPresentModeKHR(pCreateInfo->presentMode), validation_error_map[VALIDATION_ERROR_02341]))
Chris Forbesad22fc32016-11-25 13:17:36 +130012121 return true;
12122 }
12123 }
12124
Chris Forbesbc19b5c2016-10-06 13:01:33 +130012125 return false;
12126}
12127
Mark Lobodzinskid1c34362017-01-11 12:09:31 -070012128static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
12129 VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
12130 SWAPCHAIN_NODE *old_swapchain_state) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012131 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012132 std::lock_guard<std::mutex> lock(global_lock);
Chris Forbesc3b08152016-10-06 13:01:17 +130012133 auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
12134 surface_state->swapchain = swapchain_state.get();
12135 dev_data->device_extensions.swapchainMap[*pSwapchain] = std::move(swapchain_state);
12136 } else {
12137 surface_state->swapchain = nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012138 }
Chris Forbesc3b08152016-10-06 13:01:17 +130012139 // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
Chris Forbes8a047d02016-12-09 10:39:26 +130012140 if (old_swapchain_state) {
12141 old_swapchain_state->replaced = true;
12142 }
Chris Forbesc3b08152016-10-06 13:01:17 +130012143 surface_state->old_swapchain = old_swapchain_state;
Mark Lobodzinskid1c34362017-01-11 12:09:31 -070012144 return;
12145}
12146
12147VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
12148 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
12149 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
12150 auto surface_state = getSurfaceState(dev_data->instance_data, pCreateInfo->surface);
12151 auto old_swapchain_state = getSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
12152
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012153 if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
Mark Lobodzinskid1c34362017-01-11 12:09:31 -070012154 return VK_ERROR_VALIDATION_FAILED_EXT;
12155 }
12156
12157 VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
12158
12159 PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
Chris Forbesc3b08152016-10-06 13:01:17 +130012160
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012161 return result;
12162}
12163
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012164VKAPI_ATTR void VKAPI_CALL
12165DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012166 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisfe871282016-06-28 10:28:02 -060012167 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012168
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012169 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis4e380592016-06-02 12:41:47 -060012170 auto swapchain_data = getSwapchainNode(dev_data, swapchain);
12171 if (swapchain_data) {
12172 if (swapchain_data->images.size() > 0) {
12173 for (auto swapchain_image : swapchain_data->images) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012174 auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
12175 if (image_sub != dev_data->imageSubresourceMap.end()) {
12176 for (auto imgsubpair : image_sub->second) {
12177 auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
12178 if (image_item != dev_data->imageLayoutMap.end()) {
12179 dev_data->imageLayoutMap.erase(image_item);
12180 }
12181 }
12182 dev_data->imageSubresourceMap.erase(image_sub);
12183 }
Tobin Ehlisfe871282016-06-28 10:28:02 -060012184 skip_call =
Tobin Ehlise89829a2016-10-11 17:29:32 -060012185 ClearMemoryObjectBindings(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
Tobin Ehlisf263ba42016-04-05 13:33:00 -060012186 dev_data->imageMap.erase(swapchain_image);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012187 }
12188 }
Chris Forbesc3b08152016-10-06 13:01:17 +130012189
12190 auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
12191 if (surface_state) {
12192 if (surface_state->swapchain == swapchain_data)
12193 surface_state->swapchain = nullptr;
12194 if (surface_state->old_swapchain == swapchain_data)
12195 surface_state->old_swapchain = nullptr;
12196 }
12197
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012198 dev_data->device_extensions.swapchainMap.erase(swapchain);
12199 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012200 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -060012201 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +130012202 dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012203}
12204
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012205VKAPI_ATTR VkResult VKAPI_CALL
12206GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012207 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +130012208 VkResult result = dev_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012209
12210 if (result == VK_SUCCESS && pSwapchainImages != NULL) {
12211 // This should never happen and is checked by param checker.
12212 if (!pCount)
12213 return result;
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012214 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012215 const size_t count = *pCount;
Tobin Ehlis4e380592016-06-02 12:41:47 -060012216 auto swapchain_node = getSwapchainNode(dev_data, swapchain);
12217 if (swapchain_node && !swapchain_node->images.empty()) {
Tobin Ehlis43d7c522016-03-16 13:52:20 -060012218 // TODO : Not sure I like the memcmp here, but it works
12219 const bool mismatch = (swapchain_node->images.size() != count ||
12220 memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012221 if (mismatch) {
12222 // TODO: Verify against Valid Usage section of extension
12223 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12224 (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
Mark Muelleraab36502016-05-03 13:17:29 -060012225 "vkGetSwapchainInfoKHR(0x%" PRIx64
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012226 ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
12227 (uint64_t)(swapchain));
12228 }
12229 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012230 for (uint32_t i = 0; i < *pCount; ++i) {
12231 IMAGE_LAYOUT_NODE image_layout_node;
12232 image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012233 image_layout_node.format = swapchain_node->createInfo.imageFormat;
Tobin Ehlis1c9c55f2016-06-02 11:49:22 -060012234 // Add imageMap entries for each swapchain image
12235 VkImageCreateInfo image_ci = {};
12236 image_ci.mipLevels = 1;
12237 image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
12238 image_ci.usage = swapchain_node->createInfo.imageUsage;
12239 image_ci.format = swapchain_node->createInfo.imageFormat;
Tobin Ehlisdae051d2016-06-22 14:16:06 -060012240 image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
Tobin Ehlis1c9c55f2016-06-02 11:49:22 -060012241 image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
12242 image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
12243 image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
Tobin Ehlis30df15c2016-10-12 17:17:57 -060012244 dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
12245 auto &image_state = dev_data->imageMap[pSwapchainImages[i]];
12246 image_state->valid = false;
Tobin Ehlis54108272016-10-11 14:26:49 -060012247 image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012248 swapchain_node->images.push_back(pSwapchainImages[i]);
12249 ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
12250 dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
12251 dev_data->imageLayoutMap[subpair] = image_layout_node;
12252 dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
12253 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012254 }
12255 return result;
12256}
12257
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012258VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012259 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012260 bool skip_call = false;
12261
Chris Forbes95b4fa32016-06-16 17:14:34 +120012262 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -070012263 auto queue_state = getQueueState(dev_data, queue);
Chris Forbesb998bf92016-10-12 10:21:19 +130012264
Chris Forbes95b4fa32016-06-16 17:14:34 +120012265 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
12266 auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
12267 if (pSemaphore && !pSemaphore->signaled) {
Tobin Ehlis50b6c172016-12-22 10:42:36 -070012268 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12269 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
12270 "DS", "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
12271 reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012272 }
Chris Forbes95b4fa32016-06-16 17:14:34 +120012273 }
Tobin Ehlis5611e922016-06-28 15:52:55 -060012274
Chris Forbes95b4fa32016-06-16 17:14:34 +120012275 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
12276 auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
Chris Forbesf6f063d2016-09-22 18:34:20 +120012277 if (swapchain_data) {
12278 if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
12279 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
Chris Forbesf54f4c72016-09-26 15:18:57 +130012280 reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
Chris Forbesf6f063d2016-09-22 18:34:20 +120012281 "DS", "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
12282 pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
12283 }
12284 else {
12285 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
Tobin Ehlis30df15c2016-10-12 17:17:57 -060012286 auto image_state = getImageState(dev_data, image);
12287 skip_call |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
Chris Forbesf6f063d2016-09-22 18:34:20 +120012288
Tobin Ehlis30df15c2016-10-12 17:17:57 -060012289 if (!image_state->acquired) {
Chris Forbesf6f063d2016-09-22 18:34:20 +120012290 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12291 reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED,
12292 "DS", "vkQueuePresentKHR: Swapchain image index %u has not been acquired.",
12293 pPresentInfo->pImageIndices[i]);
12294 }
12295
12296 vector<VkImageLayout> layouts;
12297 if (FindLayouts(dev_data, image, layouts)) {
12298 for (auto layout : layouts) {
12299 if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
12300 skip_call |=
Mike Weiblend3fb3132016-12-06 10:28:00 -070012301 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
12302 reinterpret_cast<uint64_t &>(queue), __LINE__, VALIDATION_ERROR_01964, "DS",
12303 "Images passed to present must be in layout "
12304 "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s. %s",
12305 string_VkImageLayout(layout), validation_error_map[VALIDATION_ERROR_01964]);
Chris Forbesf6f063d2016-09-22 18:34:20 +120012306 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012307 }
12308 }
12309 }
Chris Forbesb998bf92016-10-12 10:21:19 +130012310
12311 // All physical devices and queue families are required to be able
12312 // to present to any native window on Android; require the
12313 // application to have established support on any other platform.
12314 if (!dev_data->instance_data->androidSurfaceExtensionEnabled) {
12315 auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
12316 auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
12317
12318 if (support_it == surface_state->gpu_queue_support.end()) {
12319 skip_call |=
12320 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12321 reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__,
12322 DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE, "DS", "vkQueuePresentKHR: Presenting image without calling "
12323 "vkGetPhysicalDeviceSurfaceSupportKHR");
12324 } else if (!support_it->second) {
Mike Weiblend3fb3132016-12-06 10:28:00 -070012325 skip_call |=
12326 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12327 reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, VALIDATION_ERROR_01961,
12328 "DS", "vkQueuePresentKHR: Presenting image on queue that cannot "
12329 "present to this surface. %s",
12330 validation_error_map[VALIDATION_ERROR_01961]);
Chris Forbesb998bf92016-10-12 10:21:19 +130012331 }
12332 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012333 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012334 }
12335
Chris Forbes95b4fa32016-06-16 17:14:34 +120012336 if (skip_call) {
12337 return VK_ERROR_VALIDATION_FAILED_EXT;
12338 }
12339
Chris Forbesaaa9c282016-10-03 20:01:14 +130012340 VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
Chris Forbes95b4fa32016-06-16 17:14:34 +120012341
12342 if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
12343 // Semaphore waits occur before error generation, if the call reached
12344 // the ICD. (Confirm?)
12345 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
12346 auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
Chris Forbes8320a8d2016-08-01 15:15:30 +120012347 if (pSemaphore) {
12348 pSemaphore->signaler.first = VK_NULL_HANDLE;
Chris Forbes95b4fa32016-06-16 17:14:34 +120012349 pSemaphore->signaled = false;
12350 }
12351 }
Chris Forbes8320a8d2016-08-01 15:15:30 +120012352
Chris Forbes048399d2016-09-22 17:11:06 +120012353 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
12354 // Note: this is imperfect, in that we can get confused about what
12355 // did or didn't succeed-- but if the app does that, it's confused
12356 // itself just as much.
12357 auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
12358
12359 if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR)
12360 continue; // this present didn't actually happen.
12361
12362 // Mark the image as having been released to the WSI
12363 auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
12364 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
Tobin Ehlis30df15c2016-10-12 17:17:57 -060012365 auto image_state = getImageState(dev_data, image);
12366 image_state->acquired = false;
Chris Forbes048399d2016-09-22 17:11:06 +120012367 }
12368
Chris Forbes8320a8d2016-08-01 15:15:30 +120012369 // Note: even though presentation is directed to a queue, there is no
12370 // direct ordering between QP and subsequent work, so QP (and its
12371 // semaphore waits) /never/ participate in any completion proof.
Chris Forbes95b4fa32016-06-16 17:14:34 +120012372 }
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060012373
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012374 return result;
12375}
12376
Mark Lobodzinski8ba5e252017-01-11 13:14:36 -070012377static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
12378 const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
12379 std::vector<SURFACE_STATE *> &surface_state,
12380 std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
Mark Lobodzinskif4d069f2017-01-10 18:10:03 -070012381 if (pCreateInfos) {
Mark Lobodzinski8ba5e252017-01-11 13:14:36 -070012382 std::lock_guard<std::mutex> lock(global_lock);
Mark Lobodzinskif4d069f2017-01-10 18:10:03 -070012383 for (uint32_t i = 0; i < swapchainCount; i++) {
12384 surface_state.push_back(getSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
12385 old_swapchain_state.push_back(getSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012386 std::stringstream func_name;
12387 func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
12388 if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i], old_swapchain_state[i])) {
Mark Lobodzinski8ba5e252017-01-11 13:14:36 -070012389 return true;
Mark Lobodzinskif4d069f2017-01-10 18:10:03 -070012390 }
12391 }
12392 }
Mark Lobodzinski8ba5e252017-01-11 13:14:36 -070012393 return false;
12394}
Mark Lobodzinskif4d069f2017-01-10 18:10:03 -070012395
Mark Lobodzinski8ba5e252017-01-11 13:14:36 -070012396static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
12397 const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
12398 std::vector<SURFACE_STATE *> &surface_state,
12399 std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
Mark Lobodzinskif4d069f2017-01-10 18:10:03 -070012400 if (VK_SUCCESS == result) {
12401 for (uint32_t i = 0; i < swapchainCount; i++) {
12402 auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
12403 surface_state[i]->swapchain = swapchain_state.get();
12404 dev_data->device_extensions.swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
12405 }
12406 } else {
12407 for (uint32_t i = 0; i < swapchainCount; i++) {
12408 surface_state[i]->swapchain = nullptr;
12409 }
12410 }
Mark Lobodzinskif4d069f2017-01-10 18:10:03 -070012411 // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
12412 for (uint32_t i = 0; i < swapchainCount; i++) {
12413 if (old_swapchain_state[i]) {
12414 old_swapchain_state[i]->replaced = true;
12415 }
12416 surface_state[i]->old_swapchain = old_swapchain_state[i];
12417 }
Mark Lobodzinski8ba5e252017-01-11 13:14:36 -070012418 return;
12419}
12420
12421VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
12422 const VkSwapchainCreateInfoKHR *pCreateInfos,
12423 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
12424 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
12425 std::vector<SURFACE_STATE *> surface_state;
12426 std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
12427
12428 if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
12429 old_swapchain_state)) {
12430 return VK_ERROR_VALIDATION_FAILED_EXT;
12431 }
12432
12433 VkResult result =
12434 dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
12435
12436 PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
12437 old_swapchain_state);
Mark Lobodzinskif4d069f2017-01-10 18:10:03 -070012438
Mark Young1a867442016-07-01 15:18:27 -060012439 return result;
12440}
12441
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012442VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
12443 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012444 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisfe871282016-06-28 10:28:02 -060012445 bool skip_call = false;
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060012446
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012447 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbes76fa6c62016-09-22 16:40:27 +120012448
12449 if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
12450 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Chris Forbesf54f4c72016-09-26 15:18:57 +130012451 reinterpret_cast<uint64_t &>(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
Chris Forbes76fa6c62016-09-22 16:40:27 +120012452 "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
12453 "to determine the completion of this operation.");
12454 }
12455
Chris Forbes8784e952016-06-16 12:20:32 +120012456 auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
12457 if (pSemaphore && pSemaphore->signaled) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060012458 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012459 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, VALIDATION_ERROR_01952, "DS",
12460 "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state. %s",
12461 validation_error_map[VALIDATION_ERROR_01952]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012462 }
Chris Forbes8784e952016-06-16 12:20:32 +120012463
12464 auto pFence = getFenceNode(dev_data, fence);
12465 if (pFence) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060012466 skip_call |= ValidateFenceForSubmit(dev_data, pFence);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012467 }
Chris Forbes73de0852016-10-12 13:27:13 +130012468
12469 auto swapchain_data = getSwapchainNode(dev_data, swapchain);
Chris Forbes9a936d72016-12-09 11:00:21 +130012470
12471 if (swapchain_data->replaced) {
12472 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12473 reinterpret_cast<uint64_t &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_REPLACED, "DS",
12474 "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still "
12475 "present any images it has acquired, but cannot acquire any more.");
12476 }
12477
Chris Forbes73de0852016-10-12 13:27:13 +130012478 auto physical_device_state = getPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
12479 if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
Mark Lobodzinski9e023442016-11-23 11:28:30 -070012480 uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
12481 [=](VkImage image) { return getImageState(dev_data, image)->acquired; });
Chris Forbes73de0852016-10-12 13:27:13 +130012482 if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
Mark Lobodzinski9e023442016-11-23 11:28:30 -070012483 skip_call |=
12484 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12485 reinterpret_cast<uint64_t const &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES, "DS",
12486 "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
12487 acquired_images);
Chris Forbes73de0852016-10-12 13:27:13 +130012488 }
12489 }
Thomas Louis628139e2017-01-11 00:17:08 +010012490
12491 if (swapchain_data->images.size() == 0) {
12492 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12493 reinterpret_cast<uint64_t const &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND, "DS",
12494 "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
12495 "vkGetSwapchainImagesKHR after swapchain creation.");
12496 }
12497
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012498 lock.unlock();
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060012499
Tobin Ehlisfe871282016-06-28 10:28:02 -060012500 if (skip_call)
Chris Forbes8784e952016-06-16 12:20:32 +120012501 return VK_ERROR_VALIDATION_FAILED_EXT;
12502
Chris Forbesaaa9c282016-10-03 20:01:14 +130012503 VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
Chris Forbes8784e952016-06-16 12:20:32 +120012504
12505 lock.lock();
12506 if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
12507 if (pFence) {
12508 pFence->state = FENCE_INFLIGHT;
Chris Forbes8320a8d2016-08-01 15:15:30 +120012509 pFence->signaler.first = VK_NULL_HANDLE; // ANI isn't on a queue, so this can't participate in a completion proof.
Chris Forbes8784e952016-06-16 12:20:32 +120012510 }
12511
12512 // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
12513 if (pSemaphore) {
12514 pSemaphore->signaled = true;
Chris Forbes8320a8d2016-08-01 15:15:30 +120012515 pSemaphore->signaler.first = VK_NULL_HANDLE;
Chris Forbes8784e952016-06-16 12:20:32 +120012516 }
Chris Forbes048399d2016-09-22 17:11:06 +120012517
12518 // Mark the image as acquired.
Chris Forbes048399d2016-09-22 17:11:06 +120012519 auto image = swapchain_data->images[*pImageIndex];
Tobin Ehlis30df15c2016-10-12 17:17:57 -060012520 auto image_state = getImageState(dev_data, image);
12521 image_state->acquired = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012522 }
Chris Forbes8784e952016-06-16 12:20:32 +120012523 lock.unlock();
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060012524
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012525 return result;
12526}
12527
Mark Lobodzinski51695432016-06-27 16:47:24 -060012528VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
12529 VkPhysicalDevice *pPhysicalDevices) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060012530 bool skip_call = false;
Chris Forbesfb06dd62016-10-03 19:14:25 +130012531 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
Tobin Ehlis8b0fbe22017-01-04 07:53:33 -070012532 assert(instance_data);
Chris Forbesa13fe522016-10-13 15:34:59 +130012533
Tobin Ehlis8b0fbe22017-01-04 07:53:33 -070012534 // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
12535 if (NULL == pPhysicalDevices) {
12536 instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
Mark Lobodzinski51695432016-06-27 16:47:24 -060012537 } else {
Tobin Ehlis8b0fbe22017-01-04 07:53:33 -070012538 if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
12539 // Flag warning here. You can call this without having queried the count, but it may not be
12540 // robust on platforms with multiple physical devices.
12541 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12542 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
12543 "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
12544 "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
12545 } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
12546 else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
12547 // Having actual count match count from app is not a requirement, so this can be a warning
12548 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12549 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12550 "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
12551 "supported by this instance is %u.",
12552 *pPhysicalDeviceCount, instance_data->physical_devices_count);
12553 }
12554 instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
Mark Lobodzinski51695432016-06-27 16:47:24 -060012555 }
Tobin Ehlis8b0fbe22017-01-04 07:53:33 -070012556 if (skip_call) {
12557 return VK_ERROR_VALIDATION_FAILED_EXT;
12558 }
12559 VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
12560 if (NULL == pPhysicalDevices) {
12561 instance_data->physical_devices_count = *pPhysicalDeviceCount;
12562 } else if (result == VK_SUCCESS) { // Save physical devices
12563 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
12564 auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
12565 phys_device_state.phys_device = pPhysicalDevices[i];
12566 // Init actual features for each physical device
12567 instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
12568 }
12569 }
12570 return result;
Mark Lobodzinski51695432016-06-27 16:47:24 -060012571}
12572
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012573VKAPI_ATTR void VKAPI_CALL
12574GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
12575 VkQueueFamilyProperties *pQueueFamilyProperties) {
12576 bool skip_call = false;
Chris Forbesfb06dd62016-10-03 19:14:25 +130012577 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
Chris Forbesa88f31b2016-10-03 17:57:18 +130012578 auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
Chris Forbes7ff421e2016-10-03 17:55:48 +130012579 if (physical_device_state) {
12580 if (!pQueueFamilyProperties) {
12581 physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012582 }
12583 else {
12584 // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to
12585 // get count
Chris Forbes7ff421e2016-10-03 17:55:48 +130012586 if (UNCALLED == physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
Chris Forbesa88f31b2016-10-03 17:57:18 +130012587 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012588 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
12589 "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL "
12590 "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ "
12591 "NULL pQueueFamilyProperties to query pCount.");
12592 }
12593 // Then verify that pCount that is passed in on second call matches what was returned
Chris Forbes7ff421e2016-10-03 17:55:48 +130012594 if (physical_device_state->queueFamilyPropertiesCount != *pCount) {
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012595
12596 // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
12597 // provide as warning
Chris Forbesa88f31b2016-10-03 17:57:18 +130012598 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012599 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12600 "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count "
12601 "supported by this physicalDevice is %u.",
Chris Forbes7ff421e2016-10-03 17:55:48 +130012602 *pCount, physical_device_state->queueFamilyPropertiesCount);
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012603 }
Chris Forbes7ff421e2016-10-03 17:55:48 +130012604 physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012605 }
12606 if (skip_call) {
12607 return;
12608 }
Chris Forbes65724852016-10-03 19:54:31 +130012609 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, pQueueFamilyProperties);
Chris Forbes7ff421e2016-10-03 17:55:48 +130012610 if (!pQueueFamilyProperties) {
12611 physical_device_state->queueFamilyPropertiesCount = *pCount;
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012612 }
12613 else { // Save queue family properties
Chris Forbes8c09adb2016-10-03 18:06:20 +130012614 if (physical_device_state->queue_family_properties.size() < *pCount)
12615 physical_device_state->queue_family_properties.resize(*pCount);
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012616 for (uint32_t i = 0; i < *pCount; i++) {
Chris Forbes8c09adb2016-10-03 18:06:20 +130012617 physical_device_state->queue_family_properties[i] = pQueueFamilyProperties[i];
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012618 }
12619 }
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012620 }
12621 else {
Chris Forbesa88f31b2016-10-03 17:57:18 +130012622 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012623 __LINE__, VALIDATION_ERROR_00028, "DL",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070012624 "Invalid physicalDevice (0x%p) passed into vkGetPhysicalDeviceQueueFamilyProperties(). %s", physicalDevice,
12625 validation_error_map[VALIDATION_ERROR_00028]);
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012626 }
12627}
12628
Chris Forbesf9f87832016-10-04 17:42:54 +130012629template<typename TCreateInfo, typename FPtr>
12630static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo,
12631 VkAllocationCallbacks const *pAllocator, VkSurfaceKHR *pSurface,
12632 FPtr fptr)
12633{
12634 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12635
12636 // Call down the call chain:
12637 VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
12638
12639 if (result == VK_SUCCESS) {
12640 std::unique_lock<std::mutex> lock(global_lock);
12641 instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
12642 lock.unlock();
12643 }
12644
12645 return result;
12646}
12647
12648VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
12649 bool skip_call = false;
12650 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12651 std::unique_lock<std::mutex> lock(global_lock);
12652 auto surface_state = getSurfaceState(instance_data, surface);
12653
12654 if (surface_state) {
12655 // TODO: track swapchains created from this surface.
12656 instance_data->surface_map.erase(surface);
12657 }
12658 lock.unlock();
12659
12660 if (!skip_call) {
12661 // Call down the call chain:
12662 instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
12663 }
12664}
12665
Norbert Nopper1dec9a52016-11-25 07:55:13 +010012666VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
12667 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12668 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
12669}
12670
Chris Forbesf9f87832016-10-04 17:42:54 +130012671#ifdef VK_USE_PLATFORM_ANDROID_KHR
12672VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
12673 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12674 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
12675}
12676#endif // VK_USE_PLATFORM_ANDROID_KHR
12677
12678#ifdef VK_USE_PLATFORM_MIR_KHR
12679VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
12680 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12681 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
12682}
12683#endif // VK_USE_PLATFORM_MIR_KHR
12684
12685#ifdef VK_USE_PLATFORM_WAYLAND_KHR
12686VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
12687 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
Tobin Ehlisce0dcd22016-10-06 09:11:25 -060012688 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
Chris Forbesf9f87832016-10-04 17:42:54 +130012689}
12690#endif // VK_USE_PLATFORM_WAYLAND_KHR
12691
12692#ifdef VK_USE_PLATFORM_WIN32_KHR
12693VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
12694 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12695 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
12696}
12697#endif // VK_USE_PLATFORM_WIN32_KHR
12698
12699#ifdef VK_USE_PLATFORM_XCB_KHR
12700VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
12701 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12702 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
12703}
12704#endif // VK_USE_PLATFORM_XCB_KHR
12705
12706#ifdef VK_USE_PLATFORM_XLIB_KHR
12707VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
12708 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12709 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
12710}
12711#endif // VK_USE_PLATFORM_XLIB_KHR
12712
12713
Chris Forbes6c2bc8f2016-10-11 15:57:55 +130012714VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12715 VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
12716 auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12717
12718 std::unique_lock<std::mutex> lock(global_lock);
12719 auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12720 lock.unlock();
12721
12722 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface,
12723 pSurfaceCapabilities);
12724
12725 if (result == VK_SUCCESS) {
12726 physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
12727 physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
12728 }
12729
12730 return result;
12731}
12732
Chris Forbes97058a62016-10-12 08:55:03 +130012733
12734VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
12735 VkSurfaceKHR surface, VkBool32 *pSupported) {
12736 auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12737 std::unique_lock<std::mutex> lock(global_lock);
12738 auto surface_state = getSurfaceState(instance_data, surface);
12739 lock.unlock();
12740
12741 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface,
12742 pSupported);
12743
12744 if (result == VK_SUCCESS) {
Mark Lobodzinski9e023442016-11-23 11:28:30 -070012745 surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported != 0);
Chris Forbes97058a62016-10-12 08:55:03 +130012746 }
12747
12748 return result;
12749}
12750
Chris Forbesad22fc32016-11-25 13:17:36 +130012751
12752VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12753 uint32_t *pPresentModeCount,
12754 VkPresentModeKHR *pPresentModes) {
12755 bool skip_call = false;
12756 auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12757 std::unique_lock<std::mutex> lock(global_lock);
12758 // TODO: this isn't quite right. available modes may differ by surface AND physical device.
12759 auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12760 auto & call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
12761
12762 if (pPresentModes) {
12763 // Compare the preliminary value of *pPresentModeCount with the value this time:
12764 auto prev_mode_count = (uint32_t) physical_device_state->present_modes.size();
12765 switch (call_state) {
12766 case UNCALLED:
12767 skip_call |= log_msg(
12768 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12769 reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
12770 "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior positive "
12771 "value has been seen for pPresentModeCount.");
12772 break;
12773 default:
12774 // both query count and query details
12775 if (*pPresentModeCount != prev_mode_count) {
12776 skip_call |= log_msg(
12777 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12778 reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12779 "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that differs from the value "
12780 "(%u) that was returned when pPresentModes was NULL.",
12781 *pPresentModeCount, prev_mode_count);
12782 }
12783 break;
12784 }
12785 }
12786 lock.unlock();
12787
12788 if (skip_call)
12789 return VK_ERROR_VALIDATION_FAILED_EXT;
12790
12791 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount, pPresentModes);
12792
12793 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
12794
12795 lock.lock();
12796
12797 if (*pPresentModeCount) {
12798 if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
12799 if (*pPresentModeCount > physical_device_state->present_modes.size())
12800 physical_device_state->present_modes.resize(*pPresentModeCount);
12801 }
12802 if (pPresentModes) {
12803 if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
12804 for (uint32_t i = 0; i < *pPresentModeCount; i++) {
12805 physical_device_state->present_modes[i] = pPresentModes[i];
12806 }
12807 }
Chris Forbesad22fc32016-11-25 13:17:36 +130012808 }
12809
12810 return result;
12811}
12812
Chris Forbes11ab1712016-11-25 16:37:41 +130012813
12814VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12815 uint32_t *pSurfaceFormatCount,
12816 VkSurfaceFormatKHR *pSurfaceFormats) {
12817 bool skip_call = false;
12818 auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12819 std::unique_lock<std::mutex> lock(global_lock);
12820 auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12821 auto & call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
12822
12823 if (pSurfaceFormats) {
12824 auto prev_format_count = (uint32_t) physical_device_state->surface_formats.size();
12825
12826 switch (call_state) {
12827 case UNCALLED:
12828 // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application didn't
12829 // previously call this function with a NULL value of pSurfaceFormats:
12830 skip_call |= log_msg(
12831 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12832 reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
12833 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior positive "
12834 "value has been seen for pSurfaceFormats.");
12835 break;
12836 default:
12837 if (prev_format_count != *pSurfaceFormatCount) {
12838 skip_call |= log_msg(
12839 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12840 reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12841 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with pSurfaceFormats set to "
12842 "a value (%u) that is greater than the value (%u) that was returned when pSurfaceFormatCount was NULL.",
12843 *pSurfaceFormatCount, prev_format_count);
12844 }
12845 break;
12846 }
12847 }
12848 lock.unlock();
12849
12850 if (skip_call)
12851 return VK_ERROR_VALIDATION_FAILED_EXT;
12852
12853 // Call down the call chain:
12854 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
12855 pSurfaceFormats);
12856
12857 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
12858
12859 lock.lock();
12860
12861 if (*pSurfaceFormatCount) {
12862 if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
12863 if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
12864 physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
12865 }
12866 if (pSurfaceFormats) {
12867 if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
12868 for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
12869 physical_device_state->surface_formats[i] = pSurfaceFormats[i];
12870 }
12871 }
12872 }
12873 return result;
12874}
12875
12876
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012877VKAPI_ATTR VkResult VKAPI_CALL
12878CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
12879 const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
Chris Forbes3dd83742016-10-03 19:35:49 +130012880 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
Chris Forbes65724852016-10-03 19:54:31 +130012881 VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012882 if (VK_SUCCESS == res) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012883 std::lock_guard<std::mutex> lock(global_lock);
Chris Forbes3dd83742016-10-03 19:35:49 +130012884 res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012885 }
12886 return res;
12887}
12888
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012889VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
12890 VkDebugReportCallbackEXT msgCallback,
12891 const VkAllocationCallbacks *pAllocator) {
Chris Forbes3dd83742016-10-03 19:35:49 +130012892 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
Chris Forbes65724852016-10-03 19:54:31 +130012893 instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012894 std::lock_guard<std::mutex> lock(global_lock);
Chris Forbes3dd83742016-10-03 19:35:49 +130012895 layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012896}
12897
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012898VKAPI_ATTR void VKAPI_CALL
12899DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
12900 size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
Chris Forbes3dd83742016-10-03 19:35:49 +130012901 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
Chris Forbes65724852016-10-03 19:54:31 +130012902 instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012903}
12904
Chia-I Wub02600c2016-05-20 07:11:22 +080012905VKAPI_ATTR VkResult VKAPI_CALL
12906EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
12907 return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
12908}
12909
12910VKAPI_ATTR VkResult VKAPI_CALL
12911EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
12912 return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
12913}
12914
12915VKAPI_ATTR VkResult VKAPI_CALL
12916EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
12917 if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
12918 return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
12919
12920 return VK_ERROR_LAYER_NOT_PRESENT;
12921}
12922
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012923VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12924 const char *pLayerName, uint32_t *pCount,
12925 VkExtensionProperties *pProperties) {
Chia-I Wu3ee80232016-05-06 11:38:37 +080012926 if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
Chia-I Wudbe54242016-05-06 11:17:16 +080012927 return util_GetExtensionProperties(0, NULL, pCount, pProperties);
Chia-I Wu3ee80232016-05-06 11:38:37 +080012928
12929 assert(physicalDevice);
12930
Chris Forbes65724852016-10-03 19:54:31 +130012931 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12932 return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
Chia-I Wudbe54242016-05-06 11:17:16 +080012933}
12934
Chia-I Wud5186232016-05-06 11:44:32 +080012935static PFN_vkVoidFunction
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080012936intercept_core_instance_command(const char *name);
12937
12938static PFN_vkVoidFunction
Chia-I Wud5186232016-05-06 11:44:32 +080012939intercept_core_device_command(const char *name);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012940
Chia-I Wu0730d522016-05-06 11:51:11 +080012941static PFN_vkVoidFunction
12942intercept_khr_swapchain_command(const char *name, VkDevice dev);
12943
Chris Forbesf9f87832016-10-04 17:42:54 +130012944static PFN_vkVoidFunction
12945intercept_khr_surface_command(const char *name, VkInstance instance);
12946
Mark Young39389872017-01-19 21:10:49 -070012947static PFN_vkVoidFunction
12948intercept_extension_instance_commands(const char *name, VkInstance instance);
12949
Chia-I Wud5186232016-05-06 11:44:32 +080012950VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
12951 PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
12952 if (proc)
12953 return proc;
12954
12955 assert(dev);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012956
Chia-I Wu0730d522016-05-06 11:51:11 +080012957 proc = intercept_khr_swapchain_command(funcName, dev);
12958 if (proc)
12959 return proc;
12960
Chris Forbesfb06dd62016-10-03 19:14:25 +130012961 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012962
Chris Forbesaaa9c282016-10-03 20:01:14 +130012963 auto &table = dev_data->dispatch_table;
12964 if (!table.GetDeviceProcAddr)
12965 return nullptr;
12966 return table.GetDeviceProcAddr(dev, funcName);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012967}
12968
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012969VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080012970 PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
Chia-I Wudc6e5a22016-05-06 12:04:23 +080012971 if (!proc)
12972 proc = intercept_core_device_command(funcName);
12973 if (!proc)
12974 proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
Chris Forbesf9f87832016-10-04 17:42:54 +130012975 if (!proc)
12976 proc = intercept_khr_surface_command(funcName, instance);
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080012977 if (proc)
12978 return proc;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012979
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080012980 assert(instance);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012981
Chris Forbes3dd83742016-10-03 19:35:49 +130012982 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12983 proc = debug_report_get_instance_proc_addr(instance_data->report_data, funcName);
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080012984 if (proc)
12985 return proc;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012986
Mark Young39389872017-01-19 21:10:49 -070012987 proc = intercept_extension_instance_commands(funcName, instance);
12988 if (proc)
12989 return proc;
12990
Chris Forbesaaa9c282016-10-03 20:01:14 +130012991 auto &table = instance_data->dispatch_table;
12992 if (!table.GetInstanceProcAddr)
12993 return nullptr;
12994 return table.GetInstanceProcAddr(instance, funcName);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012995}
Chia-I Wudbe54242016-05-06 11:17:16 +080012996
Mark Young39389872017-01-19 21:10:49 -070012997VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
12998 assert(instance);
12999
13000 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
13001
13002 auto &table = instance_data->dispatch_table;
13003 if (!table.GetPhysicalDeviceProcAddr)
13004 return nullptr;
13005 return table.GetPhysicalDeviceProcAddr(instance, funcName);
13006}
13007
Chia-I Wud5186232016-05-06 11:44:32 +080013008static PFN_vkVoidFunction
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080013009intercept_core_instance_command(const char *name) {
13010 static const struct {
13011 const char *name;
13012 PFN_vkVoidFunction proc;
13013 } core_instance_commands[] = {
13014 { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
Mark Young39389872017-01-19 21:10:49 -070013015 { "vk_layerGetPhysicalDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceProcAddr) },
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080013016 { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
13017 { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
13018 { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
Mark Lobodzinski51695432016-06-27 16:47:24 -060013019 { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) },
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060013020 { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) },
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080013021 { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
Chia-I Wub02600c2016-05-20 07:11:22 +080013022 { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
13023 { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
13024 { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080013025 { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
13026 };
13027
13028 for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
13029 if (!strcmp(core_instance_commands[i].name, name))
13030 return core_instance_commands[i].proc;
13031 }
13032
13033 return nullptr;
13034}
13035
13036static PFN_vkVoidFunction
Chia-I Wud5186232016-05-06 11:44:32 +080013037intercept_core_device_command(const char *name) {
13038 static const struct {
13039 const char *name;
13040 PFN_vkVoidFunction proc;
13041 } core_device_commands[] = {
Tobin Ehlisd9867fc2016-05-12 16:57:14 -060013042 {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
13043 {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
13044 {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
13045 {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
13046 {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
13047 {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
13048 {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
13049 {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
13050 {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
13051 {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
13052 {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
13053 {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
13054 {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
13055 {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
13056 {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
13057 {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
13058 {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
13059 {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
13060 {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
13061 {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
13062 {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
13063 {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
13064 {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
13065 {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
13066 {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
13067 {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
13068 {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
13069 {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
13070 {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
13071 {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
13072 {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
13073 {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
13074 {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
13075 {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
13076 {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
13077 {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
13078 {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
13079 {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
13080 {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
13081 {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
13082 {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
13083 {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
13084 {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
13085 {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
13086 {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
13087 {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
13088 {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
13089 {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
13090 {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
13091 {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
13092 {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
13093 {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
13094 {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
13095 {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
13096 {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
13097 {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
13098 {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
13099 {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
13100 {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
13101 {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
13102 {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
13103 {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
13104 {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
13105 {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
13106 {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
13107 {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
13108 {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
13109 {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
13110 {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
13111 {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
13112 {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
13113 {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
13114 {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
13115 {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
13116 {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
13117 {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
13118 {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
13119 {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
13120 {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
13121 {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
13122 {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
13123 {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
13124 {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
13125 {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
13126 {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
13127 {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
13128 {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
13129 {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
13130 {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
13131 {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
13132 {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
13133 {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
13134 {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
13135 {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
13136 {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
13137 {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
13138 {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
13139 {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
13140 {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
13141 {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
13142 {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
13143 {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
13144 {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
13145 {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
13146 {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
13147 {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
13148 {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
13149 {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
13150 {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
13151 {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
13152 {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
13153 {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
13154 {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
13155 {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
13156 {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
13157 {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
Chia-I Wud5186232016-05-06 11:44:32 +080013158 };
13159
13160 for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
13161 if (!strcmp(core_device_commands[i].name, name))
13162 return core_device_commands[i].proc;
13163 }
13164
13165 return nullptr;
13166}
13167
Chia-I Wu0730d522016-05-06 11:51:11 +080013168static PFN_vkVoidFunction
13169intercept_khr_swapchain_command(const char *name, VkDevice dev) {
13170 static const struct {
13171 const char *name;
13172 PFN_vkVoidFunction proc;
13173 } khr_swapchain_commands[] = {
13174 { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
13175 { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
13176 { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
13177 { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
13178 { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
13179 };
Mark Young1a867442016-07-01 15:18:27 -060013180 layer_data *dev_data = nullptr;
Chia-I Wu0730d522016-05-06 11:51:11 +080013181
Chia-I Wudc6e5a22016-05-06 12:04:23 +080013182 if (dev) {
Mark Young1a867442016-07-01 15:18:27 -060013183 dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
Chia-I Wudc6e5a22016-05-06 12:04:23 +080013184 if (!dev_data->device_extensions.wsi_enabled)
13185 return nullptr;
13186 }
Chia-I Wu0730d522016-05-06 11:51:11 +080013187
13188 for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
13189 if (!strcmp(khr_swapchain_commands[i].name, name))
13190 return khr_swapchain_commands[i].proc;
13191 }
13192
Mark Young1a867442016-07-01 15:18:27 -060013193 if (dev_data) {
13194 if (!dev_data->device_extensions.wsi_display_swapchain_enabled)
13195 return nullptr;
13196 }
13197
13198 if (!strcmp("vkCreateSharedSwapchainsKHR", name))
13199 return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR);
13200
Chia-I Wu0730d522016-05-06 11:51:11 +080013201 return nullptr;
13202}
13203
Chris Forbesf9f87832016-10-04 17:42:54 +130013204static PFN_vkVoidFunction
13205intercept_khr_surface_command(const char *name, VkInstance instance) {
13206 static const struct {
13207 const char *name;
13208 PFN_vkVoidFunction proc;
13209 bool instance_layer_data::*enable;
13210 } khr_surface_commands[] = {
13211#ifdef VK_USE_PLATFORM_ANDROID_KHR
13212 {"vkCreateAndroidSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateAndroidSurfaceKHR),
13213 &instance_layer_data::androidSurfaceExtensionEnabled},
13214#endif // VK_USE_PLATFORM_ANDROID_KHR
13215#ifdef VK_USE_PLATFORM_MIR_KHR
13216 {"vkCreateMirSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateMirSurfaceKHR),
13217 &instance_layer_data::mirSurfaceExtensionEnabled},
13218#endif // VK_USE_PLATFORM_MIR_KHR
13219#ifdef VK_USE_PLATFORM_WAYLAND_KHR
13220 {"vkCreateWaylandSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWaylandSurfaceKHR),
13221 &instance_layer_data::waylandSurfaceExtensionEnabled},
13222#endif // VK_USE_PLATFORM_WAYLAND_KHR
13223#ifdef VK_USE_PLATFORM_WIN32_KHR
13224 {"vkCreateWin32SurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWin32SurfaceKHR),
13225 &instance_layer_data::win32SurfaceExtensionEnabled},
13226#endif // VK_USE_PLATFORM_WIN32_KHR
13227#ifdef VK_USE_PLATFORM_XCB_KHR
13228 {"vkCreateXcbSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXcbSurfaceKHR),
13229 &instance_layer_data::xcbSurfaceExtensionEnabled},
13230#endif // VK_USE_PLATFORM_XCB_KHR
13231#ifdef VK_USE_PLATFORM_XLIB_KHR
13232 {"vkCreateXlibSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXlibSurfaceKHR),
13233 &instance_layer_data::xlibSurfaceExtensionEnabled},
13234#endif // VK_USE_PLATFORM_XLIB_KHR
Norbert Nopper1dec9a52016-11-25 07:55:13 +010013235 { "vkCreateDisplayPlaneSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateDisplayPlaneSurfaceKHR),
13236 &instance_layer_data::displayExtensionEnabled},
Chris Forbesf9f87832016-10-04 17:42:54 +130013237 {"vkDestroySurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySurfaceKHR),
13238 &instance_layer_data::surfaceExtensionEnabled},
Chris Forbes6c2bc8f2016-10-11 15:57:55 +130013239 {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceCapabilitiesKHR),
13240 &instance_layer_data::surfaceExtensionEnabled},
Chris Forbes97058a62016-10-12 08:55:03 +130013241 {"vkGetPhysicalDeviceSurfaceSupportKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceSupportKHR),
13242 &instance_layer_data::surfaceExtensionEnabled},
Chris Forbesad22fc32016-11-25 13:17:36 +130013243 {"vkGetPhysicalDeviceSurfacePresentModesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfacePresentModesKHR),
13244 &instance_layer_data::surfaceExtensionEnabled},
Chris Forbes11ab1712016-11-25 16:37:41 +130013245 {"vkGetPhysicalDeviceSurfaceFormatsKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceFormatsKHR),
13246 &instance_layer_data::surfaceExtensionEnabled},
Chris Forbesf9f87832016-10-04 17:42:54 +130013247 };
13248
13249 instance_layer_data *instance_data = nullptr;
13250 if (instance) {
13251 instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
13252 }
13253
13254 for (size_t i = 0; i < ARRAY_SIZE(khr_surface_commands); i++) {
13255 if (!strcmp(khr_surface_commands[i].name, name)) {
13256 if (instance_data && !(instance_data->*(khr_surface_commands[i].enable)))
13257 return nullptr;
13258 return khr_surface_commands[i].proc;
13259 }
13260 }
13261
13262 return nullptr;
13263}
13264
Mark Young39389872017-01-19 21:10:49 -070013265static PFN_vkVoidFunction
13266intercept_extension_instance_commands(const char *name, VkInstance instance) {
13267 return NULL;
13268}
13269
Chia-I Wua67f6842016-05-06 11:20:20 +080013270} // namespace core_validation
13271
13272// vk_layer_logging.h expects these to be defined
13273
13274VKAPI_ATTR VkResult VKAPI_CALL
13275vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
13276 const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
Chia-I Wu629d7cd2016-05-06 11:32:54 +080013277 return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
Chia-I Wua67f6842016-05-06 11:20:20 +080013278}
13279
13280VKAPI_ATTR void VKAPI_CALL
13281vkDestroyDebugReportCallbackEXT(VkInstance instance,
13282 VkDebugReportCallbackEXT msgCallback,
13283 const VkAllocationCallbacks *pAllocator) {
Chia-I Wu629d7cd2016-05-06 11:32:54 +080013284 core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
Chia-I Wua67f6842016-05-06 11:20:20 +080013285}
13286
13287VKAPI_ATTR void VKAPI_CALL
13288vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
13289 size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
Chia-I Wu629d7cd2016-05-06 11:32:54 +080013290 core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
Chia-I Wua67f6842016-05-06 11:20:20 +080013291}
13292
Chia-I Wub02600c2016-05-20 07:11:22 +080013293// loader-layer interface v0, just wrappers since there is only a layer
Chia-I Wua67f6842016-05-06 11:20:20 +080013294
Chia-I Wudbe54242016-05-06 11:17:16 +080013295VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
13296vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
Chia-I Wub02600c2016-05-20 07:11:22 +080013297 return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
Chia-I Wudbe54242016-05-06 11:17:16 +080013298}
13299
13300VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
13301vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
Chia-I Wub02600c2016-05-20 07:11:22 +080013302 return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
Chia-I Wudbe54242016-05-06 11:17:16 +080013303}
13304
13305VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
13306vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
Chia-I Wub02600c2016-05-20 07:11:22 +080013307 // the layer command handles VK_NULL_HANDLE just fine internally
13308 assert(physicalDevice == VK_NULL_HANDLE);
13309 return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
Chia-I Wua67f6842016-05-06 11:20:20 +080013310}
13311
13312VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
13313 const char *pLayerName, uint32_t *pCount,
13314 VkExtensionProperties *pProperties) {
Chia-I Wub02600c2016-05-20 07:11:22 +080013315 // the layer command handles VK_NULL_HANDLE just fine internally
13316 assert(physicalDevice == VK_NULL_HANDLE);
Chia-I Wu3ee80232016-05-06 11:38:37 +080013317 return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
Chia-I Wua67f6842016-05-06 11:20:20 +080013318}
13319
13320VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
Chia-I Wu629d7cd2016-05-06 11:32:54 +080013321 return core_validation::GetDeviceProcAddr(dev, funcName);
Chia-I Wua67f6842016-05-06 11:20:20 +080013322}
13323
13324VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
Chia-I Wu629d7cd2016-05-06 11:32:54 +080013325 return core_validation::GetInstanceProcAddr(instance, funcName);
Chia-I Wudbe54242016-05-06 11:17:16 +080013326}
Mark Young39389872017-01-19 21:10:49 -070013327
13328VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
13329 return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
13330}
13331
13332VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
13333 assert(pVersionStruct != NULL);
13334 assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
13335
13336 // Fill in the function pointers if our version is at least capable of having the structure contain them.
13337 if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
13338 pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
13339 pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
13340 pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
13341 }
13342
13343 if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
13344 core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
13345 } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
13346 pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
13347 }
13348
13349 return VK_SUCCESS;
13350}
13351