blob: 0ecc6c794d365199cca24f99636a485b046cfcfa [file] [log] [blame]
Karl Schultz7b024b42018-08-30 16:18:18 -06001/* Copyright (c) 2015-2019 The Khronos Group Inc.
2 * Copyright (c) 2015-2019 Valve Corporation
3 * Copyright (c) 2015-2019 LunarG, Inc.
4 * Copyright (C) 2015-2019 Google Inc.
Chris Forbes47567b72017-06-09 12:09:45 -07005 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Chris Forbes <chrisf@ijw.co.nz>
19 */
20#ifndef VULKAN_SHADER_VALIDATION_H
21#define VULKAN_SHADER_VALIDATION_H
22
John Zulauf14c355b2019-06-27 16:09:37 -060023#include <unordered_map>
24
Jeff Bolz104e7432019-02-09 08:04:42 -060025#include <SPIRV/spirv.hpp>
Mike Schuchardtb61f9c92019-07-01 15:14:46 -070026#include <generated/spirv_tools_commit_id.h>
Mark Lobodzinskibbaa6a22018-11-02 12:03:35 -060027#include "spirv-tools/optimizer.hpp"
Cort Strattona81851c2017-11-06 19:13:53 -080028
Chris Forbes47567b72017-06-09 12:09:45 -070029// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
30// without the caller needing to care too much about the physical SPIRV module layout.
31struct spirv_inst_iter {
32 std::vector<uint32_t>::const_iterator zero;
33 std::vector<uint32_t>::const_iterator it;
34
John Zulauf14c355b2019-06-27 16:09:37 -060035 uint32_t len() const {
Chris Forbes47567b72017-06-09 12:09:45 -070036 auto result = *it >> 16;
37 assert(result > 0);
38 return result;
39 }
40
41 uint32_t opcode() { return *it & 0x0ffffu; }
42
John Zulauf14c355b2019-06-27 16:09:37 -060043 uint32_t const &word(unsigned n) const {
Chris Forbes47567b72017-06-09 12:09:45 -070044 assert(n < len());
45 return it[n];
46 }
47
48 uint32_t offset() { return (uint32_t)(it - zero); }
49
50 spirv_inst_iter() {}
51
52 spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
53
John Zulauf14c355b2019-06-27 16:09:37 -060054 bool operator==(spirv_inst_iter const &other) const { return it == other.it; }
Chris Forbes47567b72017-06-09 12:09:45 -070055
John Zulauf14c355b2019-06-27 16:09:37 -060056 bool operator!=(spirv_inst_iter const &other) const { return it != other.it; }
Chris Forbes47567b72017-06-09 12:09:45 -070057
58 spirv_inst_iter operator++(int) { // x++
59 spirv_inst_iter ii = *this;
60 it += len();
61 return ii;
62 }
63
64 spirv_inst_iter operator++() { // ++x;
65 it += len();
66 return *this;
67 }
68
69 // The iterator and the value are the same thing.
70 spirv_inst_iter &operator*() { return *this; }
71 spirv_inst_iter const &operator*() const { return *this; }
72};
73
Chris Forbes8a6d8cb2019-02-14 14:33:08 -080074struct decoration_set {
75 enum {
76 location_bit = 1 << 0,
77 patch_bit = 1 << 1,
78 relaxed_precision_bit = 1 << 2,
79 block_bit = 1 << 3,
80 buffer_block_bit = 1 << 4,
81 component_bit = 1 << 5,
82 input_attachment_index_bit = 1 << 6,
83 descriptor_set_bit = 1 << 7,
84 binding_bit = 1 << 8,
85 nonwritable_bit = 1 << 9,
86 builtin_bit = 1 << 10,
87 };
88 uint32_t flags = 0;
89 uint32_t location = static_cast<uint32_t>(-1);
90 uint32_t component = 0;
91 uint32_t input_attachment_index = 0;
92 uint32_t descriptor_set = 0;
93 uint32_t binding = 0;
94 uint32_t builtin = static_cast<uint32_t>(-1);
95
96 void merge(decoration_set const &other) {
97 if (other.flags & location_bit) location = other.location;
98 if (other.flags & component_bit) component = other.component;
99 if (other.flags & input_attachment_index_bit) input_attachment_index = other.input_attachment_index;
100 if (other.flags & descriptor_set_bit) descriptor_set = other.descriptor_set;
101 if (other.flags & binding_bit) binding = other.binding;
102 if (other.flags & builtin_bit) builtin = other.builtin;
103 flags |= other.flags;
104 }
105
106 void add(uint32_t decoration, uint32_t value);
107};
108
Mark Lobodzinski3c59d972019-04-25 11:28:14 -0600109struct SHADER_MODULE_STATE {
Chris Forbes47567b72017-06-09 12:09:45 -0700110 // The spirv image itself
111 std::vector<uint32_t> words;
112 // A mapping of <id> to the first word of its def. this is useful because walking type
113 // trees, constant expressions, etc requires jumping all over the instruction stream.
114 std::unordered_map<unsigned, unsigned> def_index;
Chris Forbes8a6d8cb2019-02-14 14:33:08 -0800115 std::unordered_map<unsigned, decoration_set> decorations;
John Zulauf14c355b2019-06-27 16:09:37 -0600116 struct EntryPoint {
117 uint32_t offset;
118 VkShaderStageFlags stage;
119 };
120 std::unordered_multimap<std::string, EntryPoint> entry_points;
Chris Forbes47567b72017-06-09 12:09:45 -0700121 bool has_valid_spirv;
Mark Young4e919b22018-05-21 15:53:59 -0600122 VkShaderModule vk_shader_module;
Karl Schultz7b024b42018-08-30 16:18:18 -0600123 uint32_t gpu_validation_shader_id;
Chris Forbes47567b72017-06-09 12:09:45 -0700124
Mark Lobodzinskibbaa6a22018-11-02 12:03:35 -0600125 std::vector<uint32_t> PreprocessShaderBinary(uint32_t *src_binary, size_t binary_size, spv_target_env env) {
Jeff Bolz104e7432019-02-09 08:04:42 -0600126 std::vector<uint32_t> src(src_binary, src_binary + binary_size / sizeof(uint32_t));
127
128 // Check if there are any group decoration instructions, and flatten them if found.
129 bool has_group_decoration = false;
130 bool done = false;
131
132 // Walk through the first part of the SPIR-V module, looking for group decoration instructions.
133 // Skip the header (5 words).
134 auto itr = spirv_inst_iter(src.begin(), src.begin() + 5);
135 auto itrend = spirv_inst_iter(src.begin(), src.end());
136 while (itr != itrend && !done) {
137 spv::Op opcode = (spv::Op)itr.opcode();
138 switch (opcode) {
139 case spv::OpDecorationGroup:
140 case spv::OpGroupDecorate:
141 case spv::OpGroupMemberDecorate:
142 has_group_decoration = true;
143 done = true;
144 break;
145 case spv::OpFunction:
146 // An OpFunction indicates there are no more decorations
147 done = true;
148 break;
149 default:
150 break;
151 }
152 itr++;
153 }
154
155 if (has_group_decoration) {
156 spvtools::Optimizer optimizer(env);
157 optimizer.RegisterPass(spvtools::CreateFlattenDecorationPass());
158 std::vector<uint32_t> optimized_binary;
Mark Lobodzinskif2bad662019-04-16 15:06:09 -0600159 // Run optimizer to flatten decorations only, set skip_validation so as to not re-run validator
160 auto result =
161 optimizer.Run(src_binary, binary_size / sizeof(uint32_t), &optimized_binary, spvtools::ValidatorOptions(), true);
Jeff Bolz104e7432019-02-09 08:04:42 -0600162 if (result) {
163 return optimized_binary;
164 }
165 }
166 // Return the original module.
167 return src;
Mark Lobodzinskibbaa6a22018-11-02 12:03:35 -0600168 }
169
Mark Lobodzinski3c59d972019-04-25 11:28:14 -0600170 SHADER_MODULE_STATE(VkShaderModuleCreateInfo const *pCreateInfo, VkShaderModule shaderModule, spv_target_env env,
171 uint32_t unique_shader_id)
Mark Lobodzinskibbaa6a22018-11-02 12:03:35 -0600172 : words(PreprocessShaderBinary((uint32_t *)pCreateInfo->pCode, pCreateInfo->codeSize, env)),
Chris Forbes47567b72017-06-09 12:09:45 -0700173 def_index(),
Mark Young4e919b22018-05-21 15:53:59 -0600174 has_valid_spirv(true),
Karl Schultz7b024b42018-08-30 16:18:18 -0600175 vk_shader_module(shaderModule),
176 gpu_validation_shader_id(unique_shader_id) {
Shannon McPhersonc06c33d2018-06-28 17:21:12 -0600177 BuildDefIndex();
Chris Forbes47567b72017-06-09 12:09:45 -0700178 }
179
Mark Lobodzinski3c59d972019-04-25 11:28:14 -0600180 SHADER_MODULE_STATE() : has_valid_spirv(false), vk_shader_module(VK_NULL_HANDLE) {}
Chris Forbes47567b72017-06-09 12:09:45 -0700181
Chris Forbes8a6d8cb2019-02-14 14:33:08 -0800182 decoration_set get_decorations(unsigned id) const {
183 // return the actual decorations for this id, or a default set.
184 auto it = decorations.find(id);
185 if (it != decorations.end()) return it->second;
186 return decoration_set();
187 }
188
Chris Forbes47567b72017-06-09 12:09:45 -0700189 // Expose begin() / end() to enable range-based for
190 spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } // First insn
191 spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); } // Just past last insn
192 // Given an offset into the module, produce an iterator there.
193 spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
194
195 // Gets an iterator to the definition of an id
196 spirv_inst_iter get_def(unsigned id) const {
197 auto it = def_index.find(id);
198 if (it == def_index.end()) {
199 return end();
200 }
201 return at(it->second);
202 }
203
Shannon McPhersonc06c33d2018-06-28 17:21:12 -0600204 void BuildDefIndex();
Chris Forbes47567b72017-06-09 12:09:45 -0700205};
206
Chris Forbes9a61e082017-07-24 15:35:29 -0700207class ValidationCache {
208 // hashes of shaders that have passed validation before, and can be skipped.
209 // we don't store negative results, as we would have to also store what was
210 // wrong with them; also, we expect they will get fixed, so we're less
211 // likely to see them again.
212 std::unordered_set<uint32_t> good_shader_hashes;
213 ValidationCache() {}
214
Dave Houltona9df0ce2018-02-07 10:51:23 -0700215 public:
Chris Forbes9a61e082017-07-24 15:35:29 -0700216 static VkValidationCacheEXT Create(VkValidationCacheCreateInfoEXT const *pCreateInfo) {
217 auto cache = new ValidationCache();
218 cache->Load(pCreateInfo);
219 return VkValidationCacheEXT(cache);
220 }
221
222 void Load(VkValidationCacheCreateInfoEXT const *pCreateInfo) {
Cort Stratton77955d82018-02-01 23:14:50 -0800223 const auto headerSize = 2 * sizeof(uint32_t) + VK_UUID_SIZE;
224 auto size = headerSize;
Dave Houltona9df0ce2018-02-07 10:51:23 -0700225 if (!pCreateInfo->pInitialData || pCreateInfo->initialDataSize < size) return;
Chris Forbes9a61e082017-07-24 15:35:29 -0700226
227 uint32_t const *data = (uint32_t const *)pCreateInfo->pInitialData;
Dave Houltona9df0ce2018-02-07 10:51:23 -0700228 if (data[0] != size) return;
229 if (data[1] != VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT) return;
Cort Stratton77955d82018-02-01 23:14:50 -0800230 uint8_t expected_uuid[VK_UUID_SIZE];
231 Sha1ToVkUuid(SPIRV_TOOLS_COMMIT_ID, expected_uuid);
Dave Houltona9df0ce2018-02-07 10:51:23 -0700232 if (memcmp(&data[2], expected_uuid, VK_UUID_SIZE) != 0) return; // different version
Chris Forbes9a61e082017-07-24 15:35:29 -0700233
Dave Houltona9df0ce2018-02-07 10:51:23 -0700234 data = (uint32_t const *)(reinterpret_cast<uint8_t const *>(data) + headerSize);
Chris Forbes9a61e082017-07-24 15:35:29 -0700235
Dave Houltona9df0ce2018-02-07 10:51:23 -0700236 for (; size < pCreateInfo->initialDataSize; data++, size += sizeof(uint32_t)) {
Chris Forbes9a61e082017-07-24 15:35:29 -0700237 good_shader_hashes.insert(*data);
238 }
239 }
240
241 void Write(size_t *pDataSize, void *pData) {
Dave Houltona9df0ce2018-02-07 10:51:23 -0700242 const auto headerSize = 2 * sizeof(uint32_t) + VK_UUID_SIZE; // 4 bytes for header size + 4 bytes for version number + UUID
Chris Forbes9a61e082017-07-24 15:35:29 -0700243 if (!pData) {
244 *pDataSize = headerSize + good_shader_hashes.size() * sizeof(uint32_t);
245 return;
246 }
247
248 if (*pDataSize < headerSize) {
249 *pDataSize = 0;
Dave Houltona9df0ce2018-02-07 10:51:23 -0700250 return; // Too small for even the header!
Chris Forbes9a61e082017-07-24 15:35:29 -0700251 }
252
253 uint32_t *out = (uint32_t *)pData;
254 size_t actualSize = headerSize;
255
256 // Write the header
257 *out++ = headerSize;
258 *out++ = VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT;
Dave Houltona9df0ce2018-02-07 10:51:23 -0700259 Sha1ToVkUuid(SPIRV_TOOLS_COMMIT_ID, reinterpret_cast<uint8_t *>(out));
260 out = (uint32_t *)(reinterpret_cast<uint8_t *>(out) + VK_UUID_SIZE);
Chris Forbes9a61e082017-07-24 15:35:29 -0700261
Dave Houltona9df0ce2018-02-07 10:51:23 -0700262 for (auto it = good_shader_hashes.begin(); it != good_shader_hashes.end() && actualSize < *pDataSize;
Chris Forbes9a61e082017-07-24 15:35:29 -0700263 it++, out++, actualSize += sizeof(uint32_t)) {
264 *out = *it;
265 }
266
267 *pDataSize = actualSize;
268 }
269
270 void Merge(ValidationCache const *other) {
Chris Forbesf73483b2017-11-22 16:54:46 -0800271 good_shader_hashes.reserve(good_shader_hashes.size() + other->good_shader_hashes.size());
Dave Houltona9df0ce2018-02-07 10:51:23 -0700272 for (auto h : other->good_shader_hashes) good_shader_hashes.insert(h);
Chris Forbes9a61e082017-07-24 15:35:29 -0700273 }
274
275 static uint32_t MakeShaderHash(VkShaderModuleCreateInfo const *smci);
276
Dave Houltona9df0ce2018-02-07 10:51:23 -0700277 bool Contains(uint32_t hash) { return good_shader_hashes.count(hash) != 0; }
Chris Forbes9a61e082017-07-24 15:35:29 -0700278
Dave Houltona9df0ce2018-02-07 10:51:23 -0700279 void Insert(uint32_t hash) { good_shader_hashes.insert(hash); }
280
281 private:
282 void Sha1ToVkUuid(const char *sha1_str, uint8_t uuid[VK_UUID_SIZE]) {
283 // Convert sha1_str from a hex string to binary. We only need VK_UUID_BYTES of
284 // output, so pad with zeroes if the input string is shorter than that, and truncate
285 // if it's longer.
286 char padded_sha1_str[2 * VK_UUID_SIZE + 1] = {};
287 strncpy(padded_sha1_str, sha1_str, 2 * VK_UUID_SIZE + 1);
288 char byte_str[3] = {};
289 for (uint32_t i = 0; i < VK_UUID_SIZE; ++i) {
290 byte_str[0] = padded_sha1_str[2 * i + 0];
291 byte_str[1] = padded_sha1_str[2 * i + 1];
292 uuid[i] = static_cast<uint8_t>(strtol(byte_str, NULL, 16));
293 }
Cort Strattonb614d332017-11-22 16:05:49 -0800294 }
Chris Forbes9a61e082017-07-24 15:35:29 -0700295};
296
Dave Houltona9df0ce2018-02-07 10:51:23 -0700297#endif // VULKAN_SHADER_VALIDATION_H