blob: 835a33414740f3544d929117d13642b28add95ea [file] [log] [blame]
Chris Forbesaab9d112015-04-02 13:22:31 +13001/*
2 * Vulkan
3 *
4 * Copyright (C) 2015 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#include <string.h>
25#include <stdlib.h>
26#include <assert.h>
Chris Forbes67cc36f2015-04-13 12:14:52 +120027#include <map>
Chris Forbesaab9d112015-04-02 13:22:31 +130028#include <unordered_map>
Chris Forbesbb164b62015-04-08 10:19:16 +120029#include <map>
Chris Forbes4396ff52015-04-08 10:11:59 +120030#include <vector>
Chris Forbesaab9d112015-04-02 13:22:31 +130031#include "loader_platform.h"
32#include "vk_dispatch_table_helper.h"
33#include "vkLayer.h"
Chris Forbes1b466bd2015-04-15 06:59:41 +120034#include "layers_config.h"
35#include "layers_msg.h"
Chris Forbesaab9d112015-04-02 13:22:31 +130036// The following is #included again to catch certain OS-specific functions
37// being used:
38#include "loader_platform.h"
39
40#include "SPIRV/spirv.h"
41
Chris Forbesaab9d112015-04-02 13:22:31 +130042
Chris Forbes1b466bd2015-04-15 06:59:41 +120043static std::unordered_map<void *, VkLayerDispatchTable *> tableMap;
44static LOADER_PLATFORM_THREAD_ONCE_DECLARATION(g_initOnce);
Chris Forbes4396ff52015-04-08 10:11:59 +120045
Chris Forbes1bb5a2e2015-04-10 11:41:20 +120046
47static void
48build_type_def_index(std::vector<unsigned> const &words, std::unordered_map<unsigned, unsigned> &type_def_index)
49{
50 unsigned int const *code = (unsigned int const *)&words[0];
51 size_t size = words.size();
52
53 unsigned word = 5;
54 while (word < size) {
55 unsigned opcode = code[word] & 0x0ffffu;
56 unsigned oplen = (code[word] & 0xffff0000u) >> 16;
57
58 switch (opcode) {
59 case spv::OpTypeVoid:
60 case spv::OpTypeBool:
61 case spv::OpTypeInt:
62 case spv::OpTypeFloat:
63 case spv::OpTypeVector:
64 case spv::OpTypeMatrix:
65 case spv::OpTypeSampler:
66 case spv::OpTypeFilter:
67 case spv::OpTypeArray:
68 case spv::OpTypeRuntimeArray:
69 case spv::OpTypeStruct:
70 case spv::OpTypeOpaque:
71 case spv::OpTypePointer:
72 case spv::OpTypeFunction:
73 case spv::OpTypeEvent:
74 case spv::OpTypeDeviceEvent:
75 case spv::OpTypeReserveId:
76 case spv::OpTypeQueue:
77 case spv::OpTypePipe:
78 type_def_index[code[word+1]] = word;
79 break;
80
81 default:
82 /* We only care about type definitions */
83 break;
84 }
85
86 word += oplen;
87 }
88}
89
Chris Forbes4396ff52015-04-08 10:11:59 +120090struct shader_source {
Chris Forbes1bb5a2e2015-04-10 11:41:20 +120091 /* the spirv image itself */
Chris Forbes4396ff52015-04-08 10:11:59 +120092 std::vector<uint32_t> words;
Chris Forbes1bb5a2e2015-04-10 11:41:20 +120093 /* a mapping of <id> to the first word of its def. this is useful because walking type
94 * trees requires jumping all over the instruction stream.
95 */
96 std::unordered_map<unsigned, unsigned> type_def_index;
Chris Forbes4396ff52015-04-08 10:11:59 +120097
98 shader_source(VkShaderCreateInfo const *pCreateInfo) :
99 words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)) {
Chris Forbes1bb5a2e2015-04-10 11:41:20 +1200100
101 build_type_def_index(words, type_def_index);
Chris Forbes4396ff52015-04-08 10:11:59 +1200102 }
103};
104
105
106static std::unordered_map<void *, shader_source *> shader_map;
107
108
Chris Forbes1b466bd2015-04-15 06:59:41 +1200109static void
110initLayer()
111{
112 const char *strOpt;
113 // initialize ShaderChecker options
114 getLayerOptionEnum("ShaderCheckerReportLevel", (uint32_t *) &g_reportingLevel);
115 g_actionIsDefault = getLayerOptionEnum("ShaderCheckerDebugAction", (uint32_t *) &g_debugAction);
116
117 if (g_debugAction & VK_DBG_LAYER_ACTION_LOG_MSG)
118 {
119 strOpt = getLayerOption("ShaderCheckerLogFilename");
120 if (strOpt)
121 {
122 g_logFile = fopen(strOpt, "w");
123 }
124 if (g_logFile == NULL)
125 g_logFile = stdout;
126 }
127}
128
129
Chris Forbesaab9d112015-04-02 13:22:31 +1300130static VkLayerDispatchTable * initLayerTable(const VkBaseLayerObject *gpuw)
131{
132 VkLayerDispatchTable *pTable;
133
134 assert(gpuw);
135 std::unordered_map<void *, VkLayerDispatchTable *>::const_iterator it = tableMap.find((void *) gpuw->baseObject);
136 if (it == tableMap.end())
137 {
138 pTable = new VkLayerDispatchTable;
139 tableMap[(void *) gpuw->baseObject] = pTable;
140 } else
141 {
142 return it->second;
143 }
144
145 layer_initialize_dispatch_table(pTable, gpuw->pGPA, (VkPhysicalGpu) gpuw->nextObject);
146
147 return pTable;
148}
149
150
151VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
152{
153 VkLayerDispatchTable* pTable = tableMap[gpu];
154 VkResult result = pTable->CreateDevice(gpu, pCreateInfo, pDevice);
Chris Forbes1b466bd2015-04-15 06:59:41 +1200155
156 loader_platform_thread_once(&g_initOnce, initLayer);
Chris Forbesaab9d112015-04-02 13:22:31 +1300157 // create a mapping for the device object into the dispatch table
158 tableMap.emplace(*pDevice, pTable);
159 return result;
160}
161
162
163VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
164{
165 if (pOutLayerCount == NULL || pOutLayers == NULL || pOutLayers[0] == NULL || pOutLayers[1] == NULL || pReserved == NULL)
166 return VK_ERROR_INVALID_POINTER;
167
168 if (maxLayerCount < 1)
169 return VK_ERROR_INITIALIZATION_FAILED;
170 *pOutLayerCount = 1;
171 strncpy((char *) pOutLayers[0], "ShaderChecker", maxStringSize);
172 return VK_SUCCESS;
173}
174
175
176struct extProps {
177 uint32_t version;
178 const char * const name;
179};
180#define SHADER_CHECKER_LAYER_EXT_ARRAY_SIZE 1
181static const struct extProps shaderCheckerExts[SHADER_CHECKER_LAYER_EXT_ARRAY_SIZE] = {
182 // TODO what is the version?
183 0x10, "ShaderChecker",
184};
185
186
187VK_LAYER_EXPORT VkResult VKAPI vkGetGlobalExtensionInfo(
188 VkExtensionInfoType infoType,
189 uint32_t extensionIndex,
190 size_t* pDataSize,
191 void* pData)
192{
193 VkResult result;
194
195 /* This entrypoint is NOT going to init it's own dispatch table since loader calls here early */
196 VkExtensionProperties *ext_props;
197 uint32_t *count;
198
199 if (pDataSize == NULL)
200 return VK_ERROR_INVALID_POINTER;
201
202 switch (infoType) {
203 case VK_EXTENSION_INFO_TYPE_COUNT:
204 *pDataSize = sizeof(uint32_t);
205 if (pData == NULL)
206 return VK_SUCCESS;
207 count = (uint32_t *) pData;
208 *count = SHADER_CHECKER_LAYER_EXT_ARRAY_SIZE;
209 break;
210 case VK_EXTENSION_INFO_TYPE_PROPERTIES:
211 *pDataSize = sizeof(VkExtensionProperties);
212 if (pData == NULL)
213 return VK_SUCCESS;
214 if (extensionIndex >= SHADER_CHECKER_LAYER_EXT_ARRAY_SIZE)
215 return VK_ERROR_INVALID_VALUE;
216 ext_props = (VkExtensionProperties *) pData;
217 ext_props->version = shaderCheckerExts[extensionIndex].version;
218 strncpy(ext_props->extName, shaderCheckerExts[extensionIndex].name,
219 VK_MAX_EXTENSION_NAME);
220 ext_props->extName[VK_MAX_EXTENSION_NAME - 1] = '\0';
221 break;
222 default:
223 return VK_ERROR_INVALID_VALUE;
224 };
225
226 return VK_SUCCESS;
227}
228
229
Chris Forbes1bb5a2e2015-04-10 11:41:20 +1200230static char const *
231storage_class_name(unsigned sc)
232{
233 switch (sc) {
234 case spv::StorageInput: return "input";
235 case spv::StorageOutput: return "output";
236 case spv::StorageConstantUniform: return "const uniform";
237 case spv::StorageUniform: return "uniform";
238 case spv::StorageWorkgroupLocal: return "workgroup local";
239 case spv::StorageWorkgroupGlobal: return "workgroup global";
240 case spv::StoragePrivateGlobal: return "private global";
241 case spv::StorageFunction: return "function";
242 case spv::StorageGeneric: return "generic";
243 case spv::StoragePrivate: return "private";
244 case spv::StorageAtomicCounter: return "atomic counter";
245 default: return "unknown";
246 }
247}
248
249
250/* returns ptr to null terminator */
251static char *
252describe_type(char *dst, shader_source const *src, unsigned type)
253{
254 auto type_def_it = src->type_def_index.find(type);
255
256 if (type_def_it == src->type_def_index.end()) {
257 return dst + sprintf(dst, "undef");
258 }
259
260 unsigned int const *code = (unsigned int const *)&src->words[type_def_it->second];
261 unsigned opcode = code[0] & 0x0ffffu;
262 switch (opcode) {
263 case spv::OpTypeBool:
264 return dst + sprintf(dst, "bool");
265 case spv::OpTypeInt:
266 return dst + sprintf(dst, "%cint%d", code[3] ? 's' : 'u', code[2]);
267 case spv::OpTypeFloat:
268 return dst + sprintf(dst, "float%d", code[2]);
269 case spv::OpTypeVector:
270 dst += sprintf(dst, "vec%d of ", code[3]);
271 return describe_type(dst, src, code[2]);
272 case spv::OpTypeMatrix:
273 dst += sprintf(dst, "mat%d of ", code[3]);
274 return describe_type(dst, src, code[2]);
275 case spv::OpTypeArray:
276 dst += sprintf(dst, "arr[%d] of ", code[3]);
277 return describe_type(dst, src, code[2]);
278 case spv::OpTypePointer:
279 dst += sprintf(dst, "ptr to %s ", storage_class_name(code[2]));
280 return describe_type(dst, src, code[3]);
281 case spv::OpTypeStruct:
282 {
283 unsigned oplen = code[0] >> 16;
284 dst += sprintf(dst, "struct of (");
285 for (int i = 2; i < oplen; i++) {
286 dst = describe_type(dst, src, code[i]);
287 dst += sprintf(dst, i == oplen-1 ? ")" : ", ");
288 }
289 return dst;
290 }
291 default:
292 return dst + sprintf(dst, "oddtype");
293 }
294}
295
296
297static bool
298types_match(shader_source const *a, shader_source const *b, unsigned a_type, unsigned b_type)
299{
300 auto a_type_def_it = a->type_def_index.find(a_type);
301 auto b_type_def_it = b->type_def_index.find(b_type);
302
303 if (a_type_def_it == a->type_def_index.end()) {
304 printf("ERR: can't find def for type %d in producing shader %p; SPIRV probably invalid.\n",
305 a_type, a);
306 return false;
307 }
308
309 if (b_type_def_it == b->type_def_index.end()) {
310 printf("ERR: can't find def for type %d in consuming shader %p; SPIRV probably invalid.\n",
311 b_type, b);
312 return false;
313 }
314
315 /* walk two type trees together, and complain about differences */
316 unsigned int const *a_code = (unsigned int const *)&a->words[a_type_def_it->second];
317 unsigned int const *b_code = (unsigned int const *)&b->words[b_type_def_it->second];
318
319 unsigned a_opcode = a_code[0] & 0x0ffffu;
320 unsigned b_opcode = b_code[0] & 0x0ffffu;
321
322 if (a_opcode != b_opcode) {
323 printf(" - FAIL: type def opcodes differ: %d vs %d\n", a_opcode, b_opcode);
324 return false;
325 }
326
327 switch (a_opcode) {
328 case spv::OpTypeBool:
329 return true;
330 case spv::OpTypeInt:
331 /* match on width, signedness */
332 return a_code[2] == b_code[2] && a_code[3] == b_code[3];
333 case spv::OpTypeFloat:
334 /* match on width */
335 return a_code[2] == b_code[2];
336 case spv::OpTypeVector:
337 case spv::OpTypeMatrix:
338 case spv::OpTypeArray:
339 /* match on element type, count. these all have the same layout */
340 return types_match(a, b, a_code[2], b_code[2]) && a_code[3] == b_code[3];
341 case spv::OpTypeStruct:
342 /* match on all element types */
343 {
344 unsigned a_len = a_code[0] >> 16;
345 unsigned b_len = b_code[0] >> 16;
346
347 if (a_len != b_len) {
348 return false; /* structs cannot match if member counts differ */
349 }
350
351 for (int i = 2; i < a_len; i++) {
352 if (!types_match(a, b, a_code[i], b_code[i])) {
353 return false;
354 }
355 }
356
357 return true;
358 }
359 case spv::OpTypePointer:
360 /* match on pointee type. storage class is expected to differ */
361 return types_match(a, b, a_code[3], b_code[3]);
362
363 default:
364 /* remaining types are CLisms, or may not appear in the interfaces we
365 * are interested in. Just claim no match.
366 */
367 return false;
368
369 }
370}
371
372
Chris Forbes67cc36f2015-04-13 12:14:52 +1200373static int
374value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def)
375{
376 auto it = map.find(id);
377 if (it == map.end())
378 return def;
379 else
380 return it->second;
381}
382
383
384struct interface_var {
385 uint32_t id;
386 uint32_t type_id;
387 /* TODO: collect the name, too? Isn't required to be present. */
388};
389
390
391static void
392collect_interface_by_location(shader_source const *src, spv::StorageClass interface,
393 std::map<uint32_t, interface_var> &out,
394 std::map<uint32_t, interface_var> &builtins_out)
395{
396 unsigned int const *code = (unsigned int const *)&src->words[0];
397 size_t size = src->words.size();
398
399 if (code[0] != spv::MagicNumber) {
400 printf("Invalid magic.\n");
401 return;
402 }
403
404 std::unordered_map<unsigned, unsigned> var_locations;
405 std::unordered_map<unsigned, unsigned> var_builtins;
406
407 unsigned word = 5;
408 while (word < size) {
409
410 unsigned opcode = code[word] & 0x0ffffu;
411 unsigned oplen = (code[word] & 0xffff0000u) >> 16;
412
413 /* We consider two interface models: SSO rendezvous-by-location, and
414 * builtins. Complain about anything that fits neither model.
415 */
416 if (opcode == spv::OpDecorate) {
417 if (code[word+2] == spv::DecLocation) {
418 var_locations[code[word+1]] = code[word+3];
419 }
420
421 if (code[word+2] == spv::DecBuiltIn) {
422 var_builtins[code[word+1]] = code[word+3];
423 }
424 }
425
426 /* TODO: handle grouped decorations */
427 /* TODO: handle index=1 dual source outputs from FS -- two vars will
428 * have the same location, and we DONT want to clobber. */
429
430 if (opcode == spv::OpVariable && code[word+3] == interface) {
431 int location = value_or_default(var_locations, code[word+2], -1);
432 int builtin = value_or_default(var_builtins, code[word+2], -1);
433
434 if (location == -1 && builtin == -1) {
435 /* No location defined, and not bound to an API builtin.
436 * The spec says nothing about how this case works (or doesn't)
437 * for interface matching.
438 */
439 printf("WARN: var %d (type %d) in %s interface has no Location or Builtin decoration\n",
440 code[word+2], code[word+1], interface == spv::StorageInput ? "input" : "output");
441 }
442 else if (location != -1) {
443 /* A user-defined interface variable, with a location. */
444 interface_var v;
445 v.id = code[word+2];
446 v.type_id = code[word+1];
447 out[location] = v;
448 }
449 else {
450 /* A builtin interface variable */
451 interface_var v;
452 v.id = code[word+2];
453 v.type_id = code[word+1];
454 builtins_out[builtin] = v;
455 }
456 }
457
458 word += oplen;
459 }
460}
461
462
Chris Forbesaab9d112015-04-02 13:22:31 +1300463VK_LAYER_EXPORT VkResult VKAPI vkCreateShader(VkDevice device, const VkShaderCreateInfo *pCreateInfo,
464 VkShader *pShader)
465{
466 VkLayerDispatchTable* pTable = tableMap[(VkBaseLayerObject *)device];
467 VkResult res = pTable->CreateShader(device, pCreateInfo, pShader);
Chris Forbes4396ff52015-04-08 10:11:59 +1200468
469 shader_map[(VkBaseLayerObject *) *pShader] = new shader_source(pCreateInfo);
Chris Forbesaab9d112015-04-02 13:22:31 +1300470 return res;
471}
472
473
Chris Forbesbb164b62015-04-08 10:19:16 +1200474static void
475validate_interface_between_stages(shader_source const *producer, char const *producer_name,
476 shader_source const *consumer, char const *consumer_name)
477{
478 std::map<uint32_t, interface_var> outputs;
479 std::map<uint32_t, interface_var> inputs;
480
481 std::map<uint32_t, interface_var> builtin_outputs;
482 std::map<uint32_t, interface_var> builtin_inputs;
483
484 printf("Begin validate_interface_between_stages %s -> %s\n",
485 producer_name, consumer_name);
486
487 collect_interface_by_location(producer, spv::StorageOutput, outputs, builtin_outputs);
488 collect_interface_by_location(consumer, spv::StorageInput, inputs, builtin_inputs);
489
490 auto a_it = outputs.begin();
491 auto b_it = inputs.begin();
492
493 /* maps sorted by key (location); walk them together to find mismatches */
494 while (a_it != outputs.end() || b_it != inputs.end()) {
495 if (b_it == inputs.end() || a_it->first < b_it->first) {
496 printf(" WARN: %s writes to output location %d which is not consumed by %s\n",
497 producer_name, a_it->first, consumer_name);
498 a_it++;
499 }
500 else if (a_it == outputs.end() || a_it->first > b_it->first) {
501 printf(" ERR: %s consumes input location %d which is not written by %s\n",
502 consumer_name, b_it->first, producer_name);
503 b_it++;
504 }
505 else {
Chris Forbes1bb5a2e2015-04-10 11:41:20 +1200506 if (types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id)) {
507 printf(" OK: match on location %d\n", a_it->first);
508 }
509 else {
510 char producer_type[1024];
511 char consumer_type[1024];
512 describe_type(producer_type, producer, a_it->second.type_id);
513 describe_type(consumer_type, consumer, b_it->second.type_id);
514
515 printf(" ERR: type mismatch on location %d: '%s' vs '%s'\n", a_it->first,
516 producer_type, consumer_type);
517 }
Chris Forbesbb164b62015-04-08 10:19:16 +1200518 a_it++;
519 b_it++;
520 }
521 }
522
523 printf("End validate_interface_between_stages\n");
524}
525
526
Chris Forbes9b9f5fe2015-04-08 10:37:20 +1200527enum FORMAT_TYPE {
528 FORMAT_TYPE_UNDEFINED,
529 FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
530 FORMAT_TYPE_SINT,
531 FORMAT_TYPE_UINT,
532};
533
534
535static unsigned
536get_format_type(VkFormat fmt) {
537 switch (fmt) {
538 case VK_FMT_UNDEFINED:
539 return FORMAT_TYPE_UNDEFINED;
540 case VK_FMT_R8_SINT:
541 case VK_FMT_R8G8_SINT:
542 case VK_FMT_R8G8B8_SINT:
543 case VK_FMT_R8G8B8A8_SINT:
544 case VK_FMT_R16_SINT:
545 case VK_FMT_R16G16_SINT:
546 case VK_FMT_R16G16B16_SINT:
547 case VK_FMT_R16G16B16A16_SINT:
548 case VK_FMT_R32_SINT:
549 case VK_FMT_R32G32_SINT:
550 case VK_FMT_R32G32B32_SINT:
551 case VK_FMT_R32G32B32A32_SINT:
552 case VK_FMT_B8G8R8_SINT:
553 case VK_FMT_B8G8R8A8_SINT:
554 case VK_FMT_R10G10B10A2_SINT:
555 case VK_FMT_B10G10R10A2_SINT:
556 return FORMAT_TYPE_SINT;
557 case VK_FMT_R8_UINT:
558 case VK_FMT_R8G8_UINT:
559 case VK_FMT_R8G8B8_UINT:
560 case VK_FMT_R8G8B8A8_UINT:
561 case VK_FMT_R16_UINT:
562 case VK_FMT_R16G16_UINT:
563 case VK_FMT_R16G16B16_UINT:
564 case VK_FMT_R16G16B16A16_UINT:
565 case VK_FMT_R32_UINT:
566 case VK_FMT_R32G32_UINT:
567 case VK_FMT_R32G32B32_UINT:
568 case VK_FMT_R32G32B32A32_UINT:
569 case VK_FMT_B8G8R8_UINT:
570 case VK_FMT_B8G8R8A8_UINT:
571 case VK_FMT_R10G10B10A2_UINT:
572 case VK_FMT_B10G10R10A2_UINT:
573 return FORMAT_TYPE_UINT;
574 default:
575 return FORMAT_TYPE_FLOAT;
576 }
577}
578
579
Chris Forbesfcd05f12015-04-08 10:36:37 +1200580static void
581validate_vi_against_vs_inputs(VkPipelineVertexInputCreateInfo const *vi, shader_source const *vs)
582{
583 std::map<uint32_t, interface_var> inputs;
584 /* we collect builtin inputs, but they will never appear in the VI state --
585 * the vs builtin inputs are generated in the pipeline, not sourced from buffers (VertexID, etc)
586 */
587 std::map<uint32_t, interface_var> builtin_inputs;
588
589 printf("Begin validate_vi_against_vs_inputs\n");
590
591 collect_interface_by_location(vs, spv::StorageInput, inputs, builtin_inputs);
592
593 /* Build index by location */
594 std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
595 for (int i = 0; i < vi->attributeCount; i++)
596 attribs[vi->pVertexAttributeDescriptions[i].location] = &vi->pVertexAttributeDescriptions[i];
597
598 auto it_a = attribs.begin();
599 auto it_b = inputs.begin();
600
601 while (it_a != attribs.end() || it_b != inputs.end()) {
602 if (it_b == inputs.end() || it_a->first < it_b->first) {
603 printf(" WARN: attribute at location %d not consumed by the vertex shader\n",
604 it_a->first);
605 it_a++;
606 }
607 else if (it_a == attribs.end() || it_b->first < it_a->first) {
608 printf(" ERR: vertex shader consumes input at location %d but not provided\n",
609 it_b->first);
610 it_b++;
611 }
612 else {
613 /* TODO: type check */
614 printf(" OK: match on attribute location %d\n",
615 it_a->first);
616 it_a++;
617 it_b++;
618 }
619 }
620
621 printf("End validate_vi_against_vs_inputs\n");
622}
623
624
Chris Forbes9b9f5fe2015-04-08 10:37:20 +1200625static void
626validate_fs_outputs_against_cb(shader_source const *fs, VkPipelineCbStateCreateInfo const *cb)
627{
628 std::map<uint32_t, interface_var> outputs;
629 std::map<uint32_t, interface_var> builtin_outputs;
630
631 printf("Begin validate_fs_outputs_against_cb\n");
632
633 /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
634
635 collect_interface_by_location(fs, spv::StorageOutput, outputs, builtin_outputs);
636
637 /* Check for legacy gl_FragColor broadcast: In this case, we should have no user-defined outputs,
638 * and all color attachment should be UNORM/SNORM/FLOAT.
639 */
640 if (builtin_outputs.find(spv::BuiltInFragColor) != builtin_outputs.end()) {
641 bool broadcast_err = false;
642 if (outputs.size()) {
643 printf(" ERR: should not have user-defined FS outputs when using broadcast\n");
644 broadcast_err = true;
645 }
646
647 for (int i = 0; i < cb->attachmentCount; i++) {
648 unsigned attachmentType = get_format_type(cb->pAttachments[i].format);
649 if (attachmentType == FORMAT_TYPE_SINT || attachmentType == FORMAT_TYPE_UINT) {
650 printf(" ERR: CB fomat should not be SINT or UINT when using broadcast\n");
651 broadcast_err = true;
652 }
653 }
654
655 if (!broadcast_err)
656 printf(" OK: FS broadcast to all color attachments\n");
657
658 /* Skip the usual matching -- all attachments are considered written to. */
659 printf("End validate_fs_outputs_against_cb\n");
660 return;
661 }
662
663 auto it = outputs.begin();
664 uint32_t attachment = 0;
665
666 /* Walk attachment list and outputs together -- this is a little overpowered since attachments
667 * are currently dense, but the parallel with matching between shader stages is nice.
668 */
669
670 while (it != outputs.end() || attachment < cb->attachmentCount) {
671 if (attachment == cb->attachmentCount || it->first < attachment) {
672 printf(" ERR: fragment shader writes to output location %d with no matching attachment\n",
673 it->first);
674 it++;
675 }
676 else if (it == outputs.end() || it->first > attachment) {
677 printf(" ERR: attachment %d not written by fragment shader\n",
678 attachment);
679 attachment++;
680 }
681 else {
682 printf(" OK: match on attachment index %d\n",
683 it->first);
684 /* TODO: typecheck */
685 it++;
686 attachment++;
687 }
688 }
689
690 printf("End validate_fs_outputs_against_cb\n");
691}
692
693
Chris Forbes60540932015-04-08 10:15:35 +1200694VK_LAYER_EXPORT VkResult VKAPI vkCreateGraphicsPipeline(VkDevice device,
695 const VkGraphicsPipelineCreateInfo *pCreateInfo,
696 VkPipeline *pPipeline)
697{
698 /* TODO: run cross-stage validation */
Chris Forbes60540932015-04-08 10:15:35 +1200699 /* - Support GS, TCS, TES stages */
700
Chris Forbes8f600932015-04-08 10:16:45 +1200701 /* We seem to allow pipeline stages to be specified out of order, so collect and identify them
702 * before trying to do anything more: */
703
704 shader_source const *vs_source = 0;
705 shader_source const *fs_source = 0;
706 VkPipelineCbStateCreateInfo const *cb = 0;
707 VkPipelineVertexInputCreateInfo const *vi = 0;
708
709 for (auto stage = pCreateInfo; stage; stage = (decltype(stage))stage->pNext) {
710 if (stage->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO) {
711 auto shader_stage = (VkPipelineShaderStageCreateInfo const *)stage;
712
713 if (shader_stage->shader.stage == VK_SHADER_STAGE_VERTEX)
714 vs_source = shader_map[(void *)(shader_stage->shader.shader)];
715 else if (shader_stage->shader.stage == VK_SHADER_STAGE_FRAGMENT)
716 fs_source = shader_map[(void *)(shader_stage->shader.shader)];
717 else
718 printf("Unknown shader stage %d\n", shader_stage->shader.stage);
719 }
720 else if (stage->sType == VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO) {
721 cb = (VkPipelineCbStateCreateInfo const *)stage;
722 }
723 else if (stage->sType == VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO) {
724 vi = (VkPipelineVertexInputCreateInfo const *)stage;
725 }
726 }
727
728 printf("Pipeline: vi=%p vs=%p fs=%p cb=%p\n", vi, vs_source, fs_source, cb);
729
Chris Forbesfcd05f12015-04-08 10:36:37 +1200730 if (vi && vs_source) {
731 validate_vi_against_vs_inputs(vi, vs_source);
732 }
733
Chris Forbesbb164b62015-04-08 10:19:16 +1200734 if (vs_source && fs_source) {
735 validate_interface_between_stages(vs_source, "vertex shader",
736 fs_source, "fragment shader");
737 }
738
Chris Forbes9b9f5fe2015-04-08 10:37:20 +1200739 if (fs_source && cb) {
740 validate_fs_outputs_against_cb(fs_source, cb);
741 }
742
Chris Forbes60540932015-04-08 10:15:35 +1200743 VkLayerDispatchTable *pTable = tableMap[(VkBaseLayerObject *)device];
744 VkResult res = pTable->CreateGraphicsPipeline(device, pCreateInfo, pPipeline);
745 return res;
746}
747
748
Chris Forbesaab9d112015-04-02 13:22:31 +1300749VK_LAYER_EXPORT void * VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* pName)
750{
751 if (gpu == NULL)
752 return NULL;
753
754 initLayerTable((const VkBaseLayerObject *) gpu);
755
Chris Forbes1b466bd2015-04-15 06:59:41 +1200756 loader_platform_thread_once(&g_initOnce, initLayer);
757
Chris Forbesaab9d112015-04-02 13:22:31 +1300758#define ADD_HOOK(fn) \
759 if (!strncmp(#fn, pName, sizeof(#fn))) \
760 return (void *) fn
761
762 ADD_HOOK(vkGetProcAddr);
763 ADD_HOOK(vkEnumerateLayers);
764 ADD_HOOK(vkCreateDevice);
765 ADD_HOOK(vkCreateShader);
Chris Forbes60540932015-04-08 10:15:35 +1200766 ADD_HOOK(vkCreateGraphicsPipeline);
Chris Forbesaab9d112015-04-02 13:22:31 +1300767
768 VkBaseLayerObject* gpuw = (VkBaseLayerObject *) gpu;
769 if (gpuw->pGPA == NULL)
770 return NULL;
771 return gpuw->pGPA((VkPhysicalGpu) gpuw->nextObject, pName);
772}