blob: 8b496603eda22d863b81252bec815ba0c6e5a8ed [file] [log] [blame]
Chris Forbes2778f302015-04-02 13:22:31 +13001/*
2 * Vulkan
3 *
4 * Copyright (C) 2015 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#include <string.h>
25#include <stdlib.h>
26#include <assert.h>
Chris Forbes06e8fc32015-04-13 12:14:52 +120027#include <map>
Chris Forbes2778f302015-04-02 13:22:31 +130028#include <unordered_map>
Chris Forbes41002452015-04-08 10:19:16 +120029#include <map>
Chris Forbes3b1c4212015-04-08 10:11:59 +120030#include <vector>
Chris Forbes2778f302015-04-02 13:22:31 +130031#include "loader_platform.h"
32#include "vk_dispatch_table_helper.h"
33#include "vkLayer.h"
34// The following is #included again to catch certain OS-specific functions
35// being used:
36#include "loader_platform.h"
37
38#include "SPIRV/spirv.h"
39
40static std::unordered_map<void *, VkLayerDispatchTable *> tableMap;
41
Chris Forbes3b1c4212015-04-08 10:11:59 +120042
Chris Forbes3a5e99a2015-04-10 11:41:20 +120043
44static void
45build_type_def_index(std::vector<unsigned> const &words, std::unordered_map<unsigned, unsigned> &type_def_index)
46{
47 unsigned int const *code = (unsigned int const *)&words[0];
48 size_t size = words.size();
49
50 unsigned word = 5;
51 while (word < size) {
52 unsigned opcode = code[word] & 0x0ffffu;
53 unsigned oplen = (code[word] & 0xffff0000u) >> 16;
54
55 switch (opcode) {
56 case spv::OpTypeVoid:
57 case spv::OpTypeBool:
58 case spv::OpTypeInt:
59 case spv::OpTypeFloat:
60 case spv::OpTypeVector:
61 case spv::OpTypeMatrix:
62 case spv::OpTypeSampler:
63 case spv::OpTypeFilter:
64 case spv::OpTypeArray:
65 case spv::OpTypeRuntimeArray:
66 case spv::OpTypeStruct:
67 case spv::OpTypeOpaque:
68 case spv::OpTypePointer:
69 case spv::OpTypeFunction:
70 case spv::OpTypeEvent:
71 case spv::OpTypeDeviceEvent:
72 case spv::OpTypeReserveId:
73 case spv::OpTypeQueue:
74 case spv::OpTypePipe:
75 type_def_index[code[word+1]] = word;
76 break;
77
78 default:
79 /* We only care about type definitions */
80 break;
81 }
82
83 word += oplen;
84 }
85}
86
Chris Forbes3b1c4212015-04-08 10:11:59 +120087struct shader_source {
Chris Forbes3a5e99a2015-04-10 11:41:20 +120088 /* the spirv image itself */
Chris Forbes3b1c4212015-04-08 10:11:59 +120089 std::vector<uint32_t> words;
Chris Forbes3a5e99a2015-04-10 11:41:20 +120090 /* a mapping of <id> to the first word of its def. this is useful because walking type
91 * trees requires jumping all over the instruction stream.
92 */
93 std::unordered_map<unsigned, unsigned> type_def_index;
Chris Forbes3b1c4212015-04-08 10:11:59 +120094
95 shader_source(VkShaderCreateInfo const *pCreateInfo) :
96 words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)) {
Chris Forbes3a5e99a2015-04-10 11:41:20 +120097
98 build_type_def_index(words, type_def_index);
Chris Forbes3b1c4212015-04-08 10:11:59 +120099 }
100};
101
102
103static std::unordered_map<void *, shader_source *> shader_map;
104
105
Chris Forbes2778f302015-04-02 13:22:31 +1300106static VkLayerDispatchTable * initLayerTable(const VkBaseLayerObject *gpuw)
107{
108 VkLayerDispatchTable *pTable;
109
110 assert(gpuw);
111 std::unordered_map<void *, VkLayerDispatchTable *>::const_iterator it = tableMap.find((void *) gpuw->baseObject);
112 if (it == tableMap.end())
113 {
114 pTable = new VkLayerDispatchTable;
115 tableMap[(void *) gpuw->baseObject] = pTable;
116 } else
117 {
118 return it->second;
119 }
120
121 layer_initialize_dispatch_table(pTable, gpuw->pGPA, (VkPhysicalGpu) gpuw->nextObject);
122
123 return pTable;
124}
125
126
127VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
128{
129 VkLayerDispatchTable* pTable = tableMap[gpu];
130 VkResult result = pTable->CreateDevice(gpu, pCreateInfo, pDevice);
131 // create a mapping for the device object into the dispatch table
132 tableMap.emplace(*pDevice, pTable);
133 return result;
134}
135
136
137VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
138{
139 if (pOutLayerCount == NULL || pOutLayers == NULL || pOutLayers[0] == NULL || pOutLayers[1] == NULL || pReserved == NULL)
140 return VK_ERROR_INVALID_POINTER;
141
142 if (maxLayerCount < 1)
143 return VK_ERROR_INITIALIZATION_FAILED;
144 *pOutLayerCount = 1;
145 strncpy((char *) pOutLayers[0], "ShaderChecker", maxStringSize);
146 return VK_SUCCESS;
147}
148
149
150struct extProps {
151 uint32_t version;
152 const char * const name;
153};
154#define SHADER_CHECKER_LAYER_EXT_ARRAY_SIZE 1
155static const struct extProps shaderCheckerExts[SHADER_CHECKER_LAYER_EXT_ARRAY_SIZE] = {
156 // TODO what is the version?
157 0x10, "ShaderChecker",
158};
159
160
161VK_LAYER_EXPORT VkResult VKAPI vkGetGlobalExtensionInfo(
162 VkExtensionInfoType infoType,
163 uint32_t extensionIndex,
164 size_t* pDataSize,
165 void* pData)
166{
167 VkResult result;
168
169 /* This entrypoint is NOT going to init it's own dispatch table since loader calls here early */
170 VkExtensionProperties *ext_props;
171 uint32_t *count;
172
173 if (pDataSize == NULL)
174 return VK_ERROR_INVALID_POINTER;
175
176 switch (infoType) {
177 case VK_EXTENSION_INFO_TYPE_COUNT:
178 *pDataSize = sizeof(uint32_t);
179 if (pData == NULL)
180 return VK_SUCCESS;
181 count = (uint32_t *) pData;
182 *count = SHADER_CHECKER_LAYER_EXT_ARRAY_SIZE;
183 break;
184 case VK_EXTENSION_INFO_TYPE_PROPERTIES:
185 *pDataSize = sizeof(VkExtensionProperties);
186 if (pData == NULL)
187 return VK_SUCCESS;
188 if (extensionIndex >= SHADER_CHECKER_LAYER_EXT_ARRAY_SIZE)
189 return VK_ERROR_INVALID_VALUE;
190 ext_props = (VkExtensionProperties *) pData;
191 ext_props->version = shaderCheckerExts[extensionIndex].version;
192 strncpy(ext_props->extName, shaderCheckerExts[extensionIndex].name,
193 VK_MAX_EXTENSION_NAME);
194 ext_props->extName[VK_MAX_EXTENSION_NAME - 1] = '\0';
195 break;
196 default:
197 return VK_ERROR_INVALID_VALUE;
198 };
199
200 return VK_SUCCESS;
201}
202
203
Chris Forbes3a5e99a2015-04-10 11:41:20 +1200204static char const *
205storage_class_name(unsigned sc)
206{
207 switch (sc) {
208 case spv::StorageInput: return "input";
209 case spv::StorageOutput: return "output";
210 case spv::StorageConstantUniform: return "const uniform";
211 case spv::StorageUniform: return "uniform";
212 case spv::StorageWorkgroupLocal: return "workgroup local";
213 case spv::StorageWorkgroupGlobal: return "workgroup global";
214 case spv::StoragePrivateGlobal: return "private global";
215 case spv::StorageFunction: return "function";
216 case spv::StorageGeneric: return "generic";
217 case spv::StoragePrivate: return "private";
218 case spv::StorageAtomicCounter: return "atomic counter";
219 default: return "unknown";
220 }
221}
222
223
224/* returns ptr to null terminator */
225static char *
226describe_type(char *dst, shader_source const *src, unsigned type)
227{
228 auto type_def_it = src->type_def_index.find(type);
229
230 if (type_def_it == src->type_def_index.end()) {
231 return dst + sprintf(dst, "undef");
232 }
233
234 unsigned int const *code = (unsigned int const *)&src->words[type_def_it->second];
235 unsigned opcode = code[0] & 0x0ffffu;
236 switch (opcode) {
237 case spv::OpTypeBool:
238 return dst + sprintf(dst, "bool");
239 case spv::OpTypeInt:
240 return dst + sprintf(dst, "%cint%d", code[3] ? 's' : 'u', code[2]);
241 case spv::OpTypeFloat:
242 return dst + sprintf(dst, "float%d", code[2]);
243 case spv::OpTypeVector:
244 dst += sprintf(dst, "vec%d of ", code[3]);
245 return describe_type(dst, src, code[2]);
246 case spv::OpTypeMatrix:
247 dst += sprintf(dst, "mat%d of ", code[3]);
248 return describe_type(dst, src, code[2]);
249 case spv::OpTypeArray:
250 dst += sprintf(dst, "arr[%d] of ", code[3]);
251 return describe_type(dst, src, code[2]);
252 case spv::OpTypePointer:
253 dst += sprintf(dst, "ptr to %s ", storage_class_name(code[2]));
254 return describe_type(dst, src, code[3]);
255 case spv::OpTypeStruct:
256 {
257 unsigned oplen = code[0] >> 16;
258 dst += sprintf(dst, "struct of (");
259 for (int i = 2; i < oplen; i++) {
260 dst = describe_type(dst, src, code[i]);
261 dst += sprintf(dst, i == oplen-1 ? ")" : ", ");
262 }
263 return dst;
264 }
265 default:
266 return dst + sprintf(dst, "oddtype");
267 }
268}
269
270
271static bool
272types_match(shader_source const *a, shader_source const *b, unsigned a_type, unsigned b_type)
273{
274 auto a_type_def_it = a->type_def_index.find(a_type);
275 auto b_type_def_it = b->type_def_index.find(b_type);
276
277 if (a_type_def_it == a->type_def_index.end()) {
278 printf("ERR: can't find def for type %d in producing shader %p; SPIRV probably invalid.\n",
279 a_type, a);
280 return false;
281 }
282
283 if (b_type_def_it == b->type_def_index.end()) {
284 printf("ERR: can't find def for type %d in consuming shader %p; SPIRV probably invalid.\n",
285 b_type, b);
286 return false;
287 }
288
289 /* walk two type trees together, and complain about differences */
290 unsigned int const *a_code = (unsigned int const *)&a->words[a_type_def_it->second];
291 unsigned int const *b_code = (unsigned int const *)&b->words[b_type_def_it->second];
292
293 unsigned a_opcode = a_code[0] & 0x0ffffu;
294 unsigned b_opcode = b_code[0] & 0x0ffffu;
295
296 if (a_opcode != b_opcode) {
297 printf(" - FAIL: type def opcodes differ: %d vs %d\n", a_opcode, b_opcode);
298 return false;
299 }
300
301 switch (a_opcode) {
302 case spv::OpTypeBool:
303 return true;
304 case spv::OpTypeInt:
305 /* match on width, signedness */
306 return a_code[2] == b_code[2] && a_code[3] == b_code[3];
307 case spv::OpTypeFloat:
308 /* match on width */
309 return a_code[2] == b_code[2];
310 case spv::OpTypeVector:
311 case spv::OpTypeMatrix:
312 case spv::OpTypeArray:
313 /* match on element type, count. these all have the same layout */
314 return types_match(a, b, a_code[2], b_code[2]) && a_code[3] == b_code[3];
315 case spv::OpTypeStruct:
316 /* match on all element types */
317 {
318 unsigned a_len = a_code[0] >> 16;
319 unsigned b_len = b_code[0] >> 16;
320
321 if (a_len != b_len) {
322 return false; /* structs cannot match if member counts differ */
323 }
324
325 for (int i = 2; i < a_len; i++) {
326 if (!types_match(a, b, a_code[i], b_code[i])) {
327 return false;
328 }
329 }
330
331 return true;
332 }
333 case spv::OpTypePointer:
334 /* match on pointee type. storage class is expected to differ */
335 return types_match(a, b, a_code[3], b_code[3]);
336
337 default:
338 /* remaining types are CLisms, or may not appear in the interfaces we
339 * are interested in. Just claim no match.
340 */
341 return false;
342
343 }
344}
345
346
Chris Forbes06e8fc32015-04-13 12:14:52 +1200347static int
348value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def)
349{
350 auto it = map.find(id);
351 if (it == map.end())
352 return def;
353 else
354 return it->second;
355}
356
357
358struct interface_var {
359 uint32_t id;
360 uint32_t type_id;
361 /* TODO: collect the name, too? Isn't required to be present. */
362};
363
364
365static void
366collect_interface_by_location(shader_source const *src, spv::StorageClass interface,
367 std::map<uint32_t, interface_var> &out,
368 std::map<uint32_t, interface_var> &builtins_out)
369{
370 unsigned int const *code = (unsigned int const *)&src->words[0];
371 size_t size = src->words.size();
372
373 if (code[0] != spv::MagicNumber) {
374 printf("Invalid magic.\n");
375 return;
376 }
377
378 std::unordered_map<unsigned, unsigned> var_locations;
379 std::unordered_map<unsigned, unsigned> var_builtins;
380
381 unsigned word = 5;
382 while (word < size) {
383
384 unsigned opcode = code[word] & 0x0ffffu;
385 unsigned oplen = (code[word] & 0xffff0000u) >> 16;
386
387 /* We consider two interface models: SSO rendezvous-by-location, and
388 * builtins. Complain about anything that fits neither model.
389 */
390 if (opcode == spv::OpDecorate) {
391 if (code[word+2] == spv::DecLocation) {
392 var_locations[code[word+1]] = code[word+3];
393 }
394
395 if (code[word+2] == spv::DecBuiltIn) {
396 var_builtins[code[word+1]] = code[word+3];
397 }
398 }
399
400 /* TODO: handle grouped decorations */
401 /* TODO: handle index=1 dual source outputs from FS -- two vars will
402 * have the same location, and we DONT want to clobber. */
403
404 if (opcode == spv::OpVariable && code[word+3] == interface) {
405 int location = value_or_default(var_locations, code[word+2], -1);
406 int builtin = value_or_default(var_builtins, code[word+2], -1);
407
408 if (location == -1 && builtin == -1) {
409 /* No location defined, and not bound to an API builtin.
410 * The spec says nothing about how this case works (or doesn't)
411 * for interface matching.
412 */
413 printf("WARN: var %d (type %d) in %s interface has no Location or Builtin decoration\n",
414 code[word+2], code[word+1], interface == spv::StorageInput ? "input" : "output");
415 }
416 else if (location != -1) {
417 /* A user-defined interface variable, with a location. */
418 interface_var v;
419 v.id = code[word+2];
420 v.type_id = code[word+1];
421 out[location] = v;
422 }
423 else {
424 /* A builtin interface variable */
425 interface_var v;
426 v.id = code[word+2];
427 v.type_id = code[word+1];
428 builtins_out[builtin] = v;
429 }
430 }
431
432 word += oplen;
433 }
434}
435
436
Chris Forbes2778f302015-04-02 13:22:31 +1300437VK_LAYER_EXPORT VkResult VKAPI vkCreateShader(VkDevice device, const VkShaderCreateInfo *pCreateInfo,
438 VkShader *pShader)
439{
440 VkLayerDispatchTable* pTable = tableMap[(VkBaseLayerObject *)device];
441 VkResult res = pTable->CreateShader(device, pCreateInfo, pShader);
Chris Forbes3b1c4212015-04-08 10:11:59 +1200442
443 shader_map[(VkBaseLayerObject *) *pShader] = new shader_source(pCreateInfo);
Chris Forbes2778f302015-04-02 13:22:31 +1300444 return res;
445}
446
447
Chris Forbes41002452015-04-08 10:19:16 +1200448static void
449validate_interface_between_stages(shader_source const *producer, char const *producer_name,
450 shader_source const *consumer, char const *consumer_name)
451{
452 std::map<uint32_t, interface_var> outputs;
453 std::map<uint32_t, interface_var> inputs;
454
455 std::map<uint32_t, interface_var> builtin_outputs;
456 std::map<uint32_t, interface_var> builtin_inputs;
457
458 printf("Begin validate_interface_between_stages %s -> %s\n",
459 producer_name, consumer_name);
460
461 collect_interface_by_location(producer, spv::StorageOutput, outputs, builtin_outputs);
462 collect_interface_by_location(consumer, spv::StorageInput, inputs, builtin_inputs);
463
464 auto a_it = outputs.begin();
465 auto b_it = inputs.begin();
466
467 /* maps sorted by key (location); walk them together to find mismatches */
468 while (a_it != outputs.end() || b_it != inputs.end()) {
469 if (b_it == inputs.end() || a_it->first < b_it->first) {
470 printf(" WARN: %s writes to output location %d which is not consumed by %s\n",
471 producer_name, a_it->first, consumer_name);
472 a_it++;
473 }
474 else if (a_it == outputs.end() || a_it->first > b_it->first) {
475 printf(" ERR: %s consumes input location %d which is not written by %s\n",
476 consumer_name, b_it->first, producer_name);
477 b_it++;
478 }
479 else {
Chris Forbes3a5e99a2015-04-10 11:41:20 +1200480 if (types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id)) {
481 printf(" OK: match on location %d\n", a_it->first);
482 }
483 else {
484 char producer_type[1024];
485 char consumer_type[1024];
486 describe_type(producer_type, producer, a_it->second.type_id);
487 describe_type(consumer_type, consumer, b_it->second.type_id);
488
489 printf(" ERR: type mismatch on location %d: '%s' vs '%s'\n", a_it->first,
490 producer_type, consumer_type);
491 }
Chris Forbes41002452015-04-08 10:19:16 +1200492 a_it++;
493 b_it++;
494 }
495 }
496
497 printf("End validate_interface_between_stages\n");
498}
499
500
Chris Forbes3616b462015-04-08 10:37:20 +1200501enum FORMAT_TYPE {
502 FORMAT_TYPE_UNDEFINED,
503 FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
504 FORMAT_TYPE_SINT,
505 FORMAT_TYPE_UINT,
506};
507
508
509static unsigned
510get_format_type(VkFormat fmt) {
511 switch (fmt) {
512 case VK_FMT_UNDEFINED:
513 return FORMAT_TYPE_UNDEFINED;
514 case VK_FMT_R8_SINT:
515 case VK_FMT_R8G8_SINT:
516 case VK_FMT_R8G8B8_SINT:
517 case VK_FMT_R8G8B8A8_SINT:
518 case VK_FMT_R16_SINT:
519 case VK_FMT_R16G16_SINT:
520 case VK_FMT_R16G16B16_SINT:
521 case VK_FMT_R16G16B16A16_SINT:
522 case VK_FMT_R32_SINT:
523 case VK_FMT_R32G32_SINT:
524 case VK_FMT_R32G32B32_SINT:
525 case VK_FMT_R32G32B32A32_SINT:
526 case VK_FMT_B8G8R8_SINT:
527 case VK_FMT_B8G8R8A8_SINT:
528 case VK_FMT_R10G10B10A2_SINT:
529 case VK_FMT_B10G10R10A2_SINT:
530 return FORMAT_TYPE_SINT;
531 case VK_FMT_R8_UINT:
532 case VK_FMT_R8G8_UINT:
533 case VK_FMT_R8G8B8_UINT:
534 case VK_FMT_R8G8B8A8_UINT:
535 case VK_FMT_R16_UINT:
536 case VK_FMT_R16G16_UINT:
537 case VK_FMT_R16G16B16_UINT:
538 case VK_FMT_R16G16B16A16_UINT:
539 case VK_FMT_R32_UINT:
540 case VK_FMT_R32G32_UINT:
541 case VK_FMT_R32G32B32_UINT:
542 case VK_FMT_R32G32B32A32_UINT:
543 case VK_FMT_B8G8R8_UINT:
544 case VK_FMT_B8G8R8A8_UINT:
545 case VK_FMT_R10G10B10A2_UINT:
546 case VK_FMT_B10G10R10A2_UINT:
547 return FORMAT_TYPE_UINT;
548 default:
549 return FORMAT_TYPE_FLOAT;
550 }
551}
552
553
Chris Forbes772d03b2015-04-08 10:36:37 +1200554static void
555validate_vi_against_vs_inputs(VkPipelineVertexInputCreateInfo const *vi, shader_source const *vs)
556{
557 std::map<uint32_t, interface_var> inputs;
558 /* we collect builtin inputs, but they will never appear in the VI state --
559 * the vs builtin inputs are generated in the pipeline, not sourced from buffers (VertexID, etc)
560 */
561 std::map<uint32_t, interface_var> builtin_inputs;
562
563 printf("Begin validate_vi_against_vs_inputs\n");
564
565 collect_interface_by_location(vs, spv::StorageInput, inputs, builtin_inputs);
566
567 /* Build index by location */
568 std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
569 for (int i = 0; i < vi->attributeCount; i++)
570 attribs[vi->pVertexAttributeDescriptions[i].location] = &vi->pVertexAttributeDescriptions[i];
571
572 auto it_a = attribs.begin();
573 auto it_b = inputs.begin();
574
575 while (it_a != attribs.end() || it_b != inputs.end()) {
576 if (it_b == inputs.end() || it_a->first < it_b->first) {
577 printf(" WARN: attribute at location %d not consumed by the vertex shader\n",
578 it_a->first);
579 it_a++;
580 }
581 else if (it_a == attribs.end() || it_b->first < it_a->first) {
582 printf(" ERR: vertex shader consumes input at location %d but not provided\n",
583 it_b->first);
584 it_b++;
585 }
586 else {
587 /* TODO: type check */
588 printf(" OK: match on attribute location %d\n",
589 it_a->first);
590 it_a++;
591 it_b++;
592 }
593 }
594
595 printf("End validate_vi_against_vs_inputs\n");
596}
597
598
Chris Forbes3616b462015-04-08 10:37:20 +1200599static void
600validate_fs_outputs_against_cb(shader_source const *fs, VkPipelineCbStateCreateInfo const *cb)
601{
602 std::map<uint32_t, interface_var> outputs;
603 std::map<uint32_t, interface_var> builtin_outputs;
604
605 printf("Begin validate_fs_outputs_against_cb\n");
606
607 /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
608
609 collect_interface_by_location(fs, spv::StorageOutput, outputs, builtin_outputs);
610
611 /* Check for legacy gl_FragColor broadcast: In this case, we should have no user-defined outputs,
612 * and all color attachment should be UNORM/SNORM/FLOAT.
613 */
614 if (builtin_outputs.find(spv::BuiltInFragColor) != builtin_outputs.end()) {
615 bool broadcast_err = false;
616 if (outputs.size()) {
617 printf(" ERR: should not have user-defined FS outputs when using broadcast\n");
618 broadcast_err = true;
619 }
620
621 for (int i = 0; i < cb->attachmentCount; i++) {
622 unsigned attachmentType = get_format_type(cb->pAttachments[i].format);
623 if (attachmentType == FORMAT_TYPE_SINT || attachmentType == FORMAT_TYPE_UINT) {
624 printf(" ERR: CB fomat should not be SINT or UINT when using broadcast\n");
625 broadcast_err = true;
626 }
627 }
628
629 if (!broadcast_err)
630 printf(" OK: FS broadcast to all color attachments\n");
631
632 /* Skip the usual matching -- all attachments are considered written to. */
633 printf("End validate_fs_outputs_against_cb\n");
634 return;
635 }
636
637 auto it = outputs.begin();
638 uint32_t attachment = 0;
639
640 /* Walk attachment list and outputs together -- this is a little overpowered since attachments
641 * are currently dense, but the parallel with matching between shader stages is nice.
642 */
643
644 while (it != outputs.end() || attachment < cb->attachmentCount) {
645 if (attachment == cb->attachmentCount || it->first < attachment) {
646 printf(" ERR: fragment shader writes to output location %d with no matching attachment\n",
647 it->first);
648 it++;
649 }
650 else if (it == outputs.end() || it->first > attachment) {
651 printf(" ERR: attachment %d not written by fragment shader\n",
652 attachment);
653 attachment++;
654 }
655 else {
656 printf(" OK: match on attachment index %d\n",
657 it->first);
658 /* TODO: typecheck */
659 it++;
660 attachment++;
661 }
662 }
663
664 printf("End validate_fs_outputs_against_cb\n");
665}
666
667
Chris Forbes4175e6f2015-04-08 10:15:35 +1200668VK_LAYER_EXPORT VkResult VKAPI vkCreateGraphicsPipeline(VkDevice device,
669 const VkGraphicsPipelineCreateInfo *pCreateInfo,
670 VkPipeline *pPipeline)
671{
672 /* TODO: run cross-stage validation */
Chris Forbes4175e6f2015-04-08 10:15:35 +1200673 /* - Support GS, TCS, TES stages */
674
Chris Forbesf6800b52015-04-08 10:16:45 +1200675 /* We seem to allow pipeline stages to be specified out of order, so collect and identify them
676 * before trying to do anything more: */
677
678 shader_source const *vs_source = 0;
679 shader_source const *fs_source = 0;
680 VkPipelineCbStateCreateInfo const *cb = 0;
681 VkPipelineVertexInputCreateInfo const *vi = 0;
682
683 for (auto stage = pCreateInfo; stage; stage = (decltype(stage))stage->pNext) {
684 if (stage->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO) {
685 auto shader_stage = (VkPipelineShaderStageCreateInfo const *)stage;
686
687 if (shader_stage->shader.stage == VK_SHADER_STAGE_VERTEX)
688 vs_source = shader_map[(void *)(shader_stage->shader.shader)];
689 else if (shader_stage->shader.stage == VK_SHADER_STAGE_FRAGMENT)
690 fs_source = shader_map[(void *)(shader_stage->shader.shader)];
691 else
692 printf("Unknown shader stage %d\n", shader_stage->shader.stage);
693 }
694 else if (stage->sType == VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO) {
695 cb = (VkPipelineCbStateCreateInfo const *)stage;
696 }
697 else if (stage->sType == VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO) {
698 vi = (VkPipelineVertexInputCreateInfo const *)stage;
699 }
700 }
701
702 printf("Pipeline: vi=%p vs=%p fs=%p cb=%p\n", vi, vs_source, fs_source, cb);
703
Chris Forbes772d03b2015-04-08 10:36:37 +1200704 if (vi && vs_source) {
705 validate_vi_against_vs_inputs(vi, vs_source);
706 }
707
Chris Forbes41002452015-04-08 10:19:16 +1200708 if (vs_source && fs_source) {
709 validate_interface_between_stages(vs_source, "vertex shader",
710 fs_source, "fragment shader");
711 }
712
Chris Forbes3616b462015-04-08 10:37:20 +1200713 if (fs_source && cb) {
714 validate_fs_outputs_against_cb(fs_source, cb);
715 }
716
Chris Forbes4175e6f2015-04-08 10:15:35 +1200717 VkLayerDispatchTable *pTable = tableMap[(VkBaseLayerObject *)device];
718 VkResult res = pTable->CreateGraphicsPipeline(device, pCreateInfo, pPipeline);
719 return res;
720}
721
722
Chris Forbes2778f302015-04-02 13:22:31 +1300723VK_LAYER_EXPORT void * VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* pName)
724{
725 if (gpu == NULL)
726 return NULL;
727
728 initLayerTable((const VkBaseLayerObject *) gpu);
729
730#define ADD_HOOK(fn) \
731 if (!strncmp(#fn, pName, sizeof(#fn))) \
732 return (void *) fn
733
734 ADD_HOOK(vkGetProcAddr);
735 ADD_HOOK(vkEnumerateLayers);
736 ADD_HOOK(vkCreateDevice);
737 ADD_HOOK(vkCreateShader);
Chris Forbes4175e6f2015-04-08 10:15:35 +1200738 ADD_HOOK(vkCreateGraphicsPipeline);
Chris Forbes2778f302015-04-02 13:22:31 +1300739
740 VkBaseLayerObject* gpuw = (VkBaseLayerObject *) gpu;
741 if (gpuw->pGPA == NULL)
742 return NULL;
743 return gpuw->pGPA((VkPhysicalGpu) gpuw->nextObject, pName);
744}