blob: 98e1570372b2548f683494d7f6ba3711a14c5894 [file] [log] [blame]
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <assert.h>
25#include <stdbool.h>
26#include <string.h>
27#include <unistd.h>
28#include <fcntl.h>
29
30#include "private.h"
31
32static int
33anv_env_get_int(const char *name)
34{
35 const char *val = getenv(name);
36
37 if (!val)
38 return 0;
39
40 return strtol(val, NULL, 0);
41}
42
43static VkResult
44fill_physical_device(struct anv_physical_device *device,
45 struct anv_instance *instance,
46 const char *path)
47{
48 int fd;
49
50 fd = open("/dev/dri/renderD128", O_RDWR | O_CLOEXEC);
51 if (fd < 0)
52 return vk_error(VK_ERROR_UNAVAILABLE);
53
54 device->instance = instance;
55 device->path = path;
56
57 device->chipset_id = anv_env_get_int("INTEL_DEVID_OVERRIDE");
58 device->no_hw = false;
59 if (device->chipset_id) {
60 /* INTEL_DEVID_OVERRIDE implies INTEL_NO_HW. */
61 device->no_hw = true;
62 } else {
63 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
64 }
65 if (!device->chipset_id)
66 goto fail;
67
68 device->name = brw_get_device_name(device->chipset_id);
69 device->info = brw_get_device_info(device->chipset_id, -1);
70 if (!device->info)
71 goto fail;
72
73 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT))
74 goto fail;
75
76 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2))
77 goto fail;
78
79 if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC))
80 goto fail;
81
82 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CONSTANTS))
83 goto fail;
84
85 close(fd);
86
87 return VK_SUCCESS;
88
89 fail:
90 close(fd);
91
92 return vk_error(VK_ERROR_UNAVAILABLE);
93}
94
95static void *default_alloc(
96 void* pUserData,
97 size_t size,
98 size_t alignment,
99 VkSystemAllocType allocType)
100{
101 return malloc(size);
102}
103
104static void default_free(
105 void* pUserData,
106 void* pMem)
107{
108 free(pMem);
109}
110
111static const VkAllocCallbacks default_alloc_callbacks = {
112 .pUserData = NULL,
113 .pfnAlloc = default_alloc,
114 .pfnFree = default_free
115};
116
117VkResult VKAPI vkCreateInstance(
118 const VkInstanceCreateInfo* pCreateInfo,
119 VkInstance* pInstance)
120{
121 struct anv_instance *instance;
122 const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
123 void *user_data = NULL;
124 VkResult result;
125
126 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
127
128 if (pCreateInfo->pAllocCb) {
129 alloc_callbacks = pCreateInfo->pAllocCb;
130 user_data = pCreateInfo->pAllocCb->pUserData;
131 }
132 instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
133 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
134 if (!instance)
135 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
136
137 instance->pAllocUserData = alloc_callbacks->pUserData;
138 instance->pfnAlloc = alloc_callbacks->pfnAlloc;
139 instance->pfnFree = alloc_callbacks->pfnFree;
140 instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
141
142 instance->physicalDeviceCount = 0;
143 result = fill_physical_device(&instance->physicalDevice,
144 instance, "/dev/dri/renderD128");
145 if (result == VK_SUCCESS)
146 instance->physicalDeviceCount++;
147
148 *pInstance = (VkInstance) instance;
149
150 return VK_SUCCESS;
151}
152
153VkResult VKAPI vkDestroyInstance(
154 VkInstance _instance)
155{
156 struct anv_instance *instance = (struct anv_instance *) _instance;
157
158 instance->pfnFree(instance->pAllocUserData, instance);
159
160 return VK_SUCCESS;
161}
162
163VkResult VKAPI vkEnumeratePhysicalDevices(
164 VkInstance _instance,
165 uint32_t* pPhysicalDeviceCount,
166 VkPhysicalDevice* pPhysicalDevices)
167{
168 struct anv_instance *instance = (struct anv_instance *) _instance;
169
170 if (*pPhysicalDeviceCount >= 1)
171 pPhysicalDevices[0] = (VkPhysicalDevice) &instance->physicalDevice;
172 *pPhysicalDeviceCount = instance->physicalDeviceCount;
173
174 return VK_SUCCESS;
175}
176
177VkResult VKAPI vkGetPhysicalDeviceInfo(
178 VkPhysicalDevice physicalDevice,
179 VkPhysicalDeviceInfoType infoType,
180 size_t* pDataSize,
181 void* pData)
182{
183 struct anv_physical_device *device = (struct anv_physical_device *) physicalDevice;
184 VkPhysicalDeviceProperties *properties;
185 VkPhysicalDevicePerformance *performance;
186 VkPhysicalDeviceQueueProperties *queue_properties;
187 VkPhysicalDeviceMemoryProperties *memory_properties;
188 uint64_t ns_per_tick = 80;
189
190 switch (infoType) {
191 case VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES:
192 properties = pData;
193 assert(*pDataSize >= sizeof(*properties));
194 *pDataSize = sizeof(*properties); /* Assuming we have to return the size of our struct. */
195
196 properties->apiVersion = 1;
197 properties->driverVersion = 1;
198 properties->vendorId = 0x8086;
199 properties->deviceId = device->chipset_id;
200 properties->deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
201 strcpy(properties->deviceName, device->name);
202 properties->maxInlineMemoryUpdateSize = 0;
203 properties->maxBoundDescriptorSets = 0;
204 properties->maxThreadGroupSize = 0;
205 properties->timestampFrequency = 1000 * 1000 * 1000 / ns_per_tick;
206 properties->multiColorAttachmentClears = 0;
207 properties->maxDescriptorSets = 2;
208 properties->maxViewports = 16;
209 properties->maxColorAttachments = 8;
210 return VK_SUCCESS;
211
212 case VK_PHYSICAL_DEVICE_INFO_TYPE_PERFORMANCE:
213 performance = pData;
214 assert(*pDataSize >= sizeof(*performance));
215 *pDataSize = sizeof(*performance); /* Assuming we have to return the size of our struct. */
216
217 performance->maxDeviceClock = 1.0;
218 performance->aluPerClock = 1.0;
219 performance->texPerClock = 1.0;
220 performance->primsPerClock = 1.0;
221 performance->pixelsPerClock = 1.0;
222 return VK_SUCCESS;
223
224 case VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES:
225 queue_properties = pData;
226 assert(*pDataSize >= sizeof(*queue_properties));
227 *pDataSize = sizeof(*queue_properties);
228
229 queue_properties->queueFlags = 0;
230 queue_properties->queueCount = 1;
231 queue_properties->maxAtomicCounters = 0;
232 queue_properties->supportsTimestamps = 0;
233 queue_properties->maxMemReferences = 0;
234 return VK_SUCCESS;
235
236 case VK_PHYSICAL_DEVICE_INFO_TYPE_MEMORY_PROPERTIES:
237 memory_properties = pData;
238 assert(*pDataSize >= sizeof(*memory_properties));
239 *pDataSize = sizeof(*memory_properties);
240
241 memory_properties->supportsMigration = false;
242 memory_properties->supportsPinning = false;
243 return VK_SUCCESS;
244
245 default:
246 return VK_UNSUPPORTED;
247 }
248
249}
250
251void * vkGetProcAddr(
252 VkPhysicalDevice physicalDevice,
253 const char* pName)
254{
255 return NULL;
256}
257
258static void
259parse_debug_flags(struct anv_device *device)
260{
261 const char *debug, *p, *end;
262
263 debug = getenv("INTEL_DEBUG");
264 device->dump_aub = false;
265 if (debug) {
266 for (p = debug; *p; p = end + 1) {
267 end = strchrnul(p, ',');
268 if (end - p == 3 && memcmp(p, "aub", 3) == 0)
269 device->dump_aub = true;
270 if (end - p == 5 && memcmp(p, "no_hw", 5) == 0)
271 device->no_hw = true;
272 if (*end == '\0')
273 break;
274 }
275 }
276}
277
278VkResult VKAPI vkCreateDevice(
279 VkPhysicalDevice _physicalDevice,
280 const VkDeviceCreateInfo* pCreateInfo,
281 VkDevice* pDevice)
282{
283 struct anv_physical_device *physicalDevice =
284 (struct anv_physical_device *) _physicalDevice;
285 struct anv_instance *instance = physicalDevice->instance;
286 struct anv_device *device;
287
288 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
289
290 device = instance->pfnAlloc(instance->pAllocUserData,
291 sizeof(*device), 8,
292 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
293 if (!device)
294 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
295
296 device->no_hw = physicalDevice->no_hw;
297 parse_debug_flags(device);
298
299 device->instance = physicalDevice->instance;
300 device->fd = open("/dev/dri/renderD128", O_RDWR | O_CLOEXEC);
301 if (device->fd == -1)
302 goto fail_device;
303
304 device->context_id = anv_gem_create_context(device);
305 if (device->context_id == -1)
306 goto fail_fd;
307
308 anv_block_pool_init(&device->dyn_state_block_pool, device, 2048);
309
310 anv_state_pool_init(&device->dyn_state_pool,
311 &device->dyn_state_block_pool);
312
313 anv_block_pool_init(&device->instruction_block_pool, device, 2048);
314 anv_block_pool_init(&device->surface_state_block_pool, device, 2048);
315
316 anv_state_pool_init(&device->surface_state_pool,
317 &device->surface_state_block_pool);
318
319 device->compiler = anv_compiler_create(device->fd);
320 device->aub_writer = NULL;
321
322 device->info = *physicalDevice->info;
323
324 pthread_mutex_init(&device->mutex, NULL);
325
Kristian Høgsbergd77c34d2015-05-11 23:25:06 -0700326 anv_device_init_meta(device);
327
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700328 *pDevice = (VkDevice) device;
329
330 return VK_SUCCESS;
331
332 fail_fd:
333 close(device->fd);
334 fail_device:
335 anv_device_free(device, device);
336
337 return vk_error(VK_ERROR_UNAVAILABLE);
338}
339
340VkResult VKAPI vkDestroyDevice(
341 VkDevice _device)
342{
343 struct anv_device *device = (struct anv_device *) _device;
344
345 anv_compiler_destroy(device->compiler);
346
347 anv_block_pool_finish(&device->dyn_state_block_pool);
348 anv_block_pool_finish(&device->instruction_block_pool);
349 anv_block_pool_finish(&device->surface_state_block_pool);
350
351 close(device->fd);
352
353 if (device->aub_writer)
354 anv_aub_writer_destroy(device->aub_writer);
355
356 anv_device_free(device, device);
357
358 return VK_SUCCESS;
359}
360
361VkResult VKAPI vkGetGlobalExtensionInfo(
362 VkExtensionInfoType infoType,
363 uint32_t extensionIndex,
364 size_t* pDataSize,
365 void* pData)
366{
367 uint32_t *count;
368
369 switch (infoType) {
370 case VK_EXTENSION_INFO_TYPE_COUNT:
371 count = pData;
372 assert(*pDataSize == 4);
373 *count = 0;
374 return VK_SUCCESS;
375
376 case VK_EXTENSION_INFO_TYPE_PROPERTIES:
377 return vk_error(VK_ERROR_INVALID_EXTENSION);
378
379 default:
380 return VK_UNSUPPORTED;
381 }
382}
383
384VkResult VKAPI vkGetPhysicalDeviceExtensionInfo(
385 VkPhysicalDevice physicalDevice,
386 VkExtensionInfoType infoType,
387 uint32_t extensionIndex,
388 size_t* pDataSize,
389 void* pData)
390{
391 uint32_t *count;
392
393 switch (infoType) {
394 case VK_EXTENSION_INFO_TYPE_COUNT:
395 count = pData;
396 assert(*pDataSize == 4);
397 *count = 0;
398 return VK_SUCCESS;
399
400 case VK_EXTENSION_INFO_TYPE_PROPERTIES:
401 return vk_error(VK_ERROR_INVALID_EXTENSION);
402
403 default:
404 return VK_UNSUPPORTED;
405 }
406}
407
408VkResult VKAPI vkEnumerateLayers(
409 VkPhysicalDevice physicalDevice,
410 size_t maxStringSize,
411 size_t* pLayerCount,
412 char* const* pOutLayers,
413 void* pReserved)
414{
415 *pLayerCount = 0;
416
417 return VK_SUCCESS;
418}
419
420VkResult VKAPI vkGetDeviceQueue(
421 VkDevice _device,
422 uint32_t queueNodeIndex,
423 uint32_t queueIndex,
424 VkQueue* pQueue)
425{
426 struct anv_device *device = (struct anv_device *) _device;
427 struct anv_queue *queue;
428
429 /* FIXME: Should allocate these at device create time. */
430
431 queue = anv_device_alloc(device, sizeof(*queue), 8,
432 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
433 if (queue == NULL)
434 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
435
436 queue->device = device;
437 queue->pool = &device->surface_state_pool;
438
439 queue->completed_serial = anv_state_pool_alloc(queue->pool, 4, 4);
440 *(uint32_t *)queue->completed_serial.map = 0;
441 queue->next_serial = 1;
442
443 *pQueue = (VkQueue) queue;
444
445 return VK_SUCCESS;
446}
447
448static const uint32_t BATCH_SIZE = 8192;
449
450VkResult
451anv_batch_init(struct anv_batch *batch, struct anv_device *device)
452{
453 VkResult result;
454
455 result = anv_bo_init_new(&batch->bo, device, BATCH_SIZE);
456 if (result != VK_SUCCESS)
457 return result;
458
459 batch->bo.map =
460 anv_gem_mmap(device, batch->bo.gem_handle, 0, BATCH_SIZE);
461 if (batch->bo.map == NULL) {
462 anv_gem_close(device, batch->bo.gem_handle);
463 return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
464 }
465
466 batch->cmd_relocs.num_relocs = 0;
467 batch->surf_relocs.num_relocs = 0;
468 batch->next = batch->bo.map;
469
470 return VK_SUCCESS;
471}
472
473void
474anv_batch_finish(struct anv_batch *batch, struct anv_device *device)
475{
476 anv_gem_munmap(batch->bo.map, BATCH_SIZE);
477 anv_gem_close(device, batch->bo.gem_handle);
478}
479
480void
481anv_batch_reset(struct anv_batch *batch)
482{
483 batch->next = batch->bo.map;
484 batch->cmd_relocs.num_relocs = 0;
485 batch->surf_relocs.num_relocs = 0;
486}
487
488void *
489anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
490{
491 void *p = batch->next;
492
493 batch->next += num_dwords * 4;
494
495 return p;
496}
497
498static void
499anv_reloc_list_append(struct anv_reloc_list *list,
500 struct anv_reloc_list *other, uint32_t offset)
501{
502 uint32_t i, count;
503
504 count = list->num_relocs;
505 memcpy(&list->relocs[count], &other->relocs[0],
506 other->num_relocs * sizeof(other->relocs[0]));
507 memcpy(&list->reloc_bos[count], &other->reloc_bos[0],
508 other->num_relocs * sizeof(other->reloc_bos[0]));
509 for (i = 0; i < other->num_relocs; i++)
510 list->relocs[i + count].offset += offset;
511
512 count += other->num_relocs;
513}
514
515static uint64_t
516anv_reloc_list_add(struct anv_reloc_list *list,
517 uint32_t offset,
518 struct anv_bo *target_bo, uint32_t delta)
519{
520 struct drm_i915_gem_relocation_entry *entry;
521 int index;
522
523 assert(list->num_relocs < ANV_BATCH_MAX_RELOCS);
524
525 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
526 index = list->num_relocs++;
527 list->reloc_bos[index] = target_bo;
528 entry = &list->relocs[index];
529 entry->target_handle = target_bo->gem_handle;
530 entry->delta = delta;
531 entry->offset = offset;
532 entry->presumed_offset = target_bo->offset;
533 entry->read_domains = 0;
534 entry->write_domain = 0;
535
536 return target_bo->offset + delta;
537}
538
539void
540anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
541{
542 uint32_t size, offset;
543
544 size = other->next - other->bo.map;
545 memcpy(batch->next, other->bo.map, size);
546
547 offset = batch->next - batch->bo.map;
548 anv_reloc_list_append(&batch->cmd_relocs, &other->cmd_relocs, offset);
549 anv_reloc_list_append(&batch->surf_relocs, &other->surf_relocs, offset);
550
551 batch->next += size;
552}
553
554uint64_t
555anv_batch_emit_reloc(struct anv_batch *batch,
556 void *location, struct anv_bo *bo, uint32_t delta)
557{
558 return anv_reloc_list_add(&batch->cmd_relocs,
559 location - batch->bo.map, bo, delta);
560}
561
562VkResult VKAPI vkQueueSubmit(
563 VkQueue _queue,
564 uint32_t cmdBufferCount,
565 const VkCmdBuffer* pCmdBuffers,
566 VkFence fence)
567{
568 struct anv_queue *queue = (struct anv_queue *) _queue;
569 struct anv_device *device = queue->device;
570 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) pCmdBuffers[0];
571 int ret;
572
573 assert(cmdBufferCount == 1);
574
575 if (device->dump_aub)
576 anv_cmd_buffer_dump(cmd_buffer);
577
578 if (!device->no_hw) {
579 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf);
580 if (ret != 0)
581 goto fail;
582
583 for (uint32_t i = 0; i < cmd_buffer->bo_count; i++)
584 cmd_buffer->exec2_bos[i]->offset = cmd_buffer->exec2_objects[i].offset;
585 } else {
586 *(uint32_t *)queue->completed_serial.map = cmd_buffer->serial;
587 }
588
589 return VK_SUCCESS;
590
591 fail:
592 pthread_mutex_unlock(&device->mutex);
593
594 return vk_error(VK_ERROR_UNKNOWN);
595}
596
597VkResult VKAPI vkQueueAddMemReferences(
598 VkQueue queue,
599 uint32_t count,
600 const VkDeviceMemory* pMems)
601{
602 return VK_SUCCESS;
603}
604
605VkResult vkQueueRemoveMemReferences(
606 VkQueue queue,
607 uint32_t count,
608 const VkDeviceMemory* pMems)
609{
610 return VK_SUCCESS;
611}
612
613VkResult VKAPI vkQueueWaitIdle(
614 VkQueue _queue)
615{
616 struct anv_queue *queue = (struct anv_queue *) _queue;
617
618 return vkDeviceWaitIdle((VkDevice) queue->device);
619}
620
621VkResult VKAPI vkDeviceWaitIdle(
622 VkDevice _device)
623{
624 struct anv_device *device = (struct anv_device *) _device;
625 struct anv_state state;
626 struct anv_batch batch;
627 struct drm_i915_gem_execbuffer2 execbuf;
628 struct drm_i915_gem_exec_object2 exec2_objects[1];
629 struct anv_bo *bo = NULL;
630 VkResult result;
631 int64_t timeout;
632 int ret;
633
634 state = anv_state_pool_alloc(&device->dyn_state_pool, 32, 32);
635 bo = &device->dyn_state_pool.block_pool->bo;
636 batch.next = state.map;
637 anv_batch_emit(&batch, GEN8_MI_BATCH_BUFFER_END);
638 anv_batch_emit(&batch, GEN8_MI_NOOP);
639
640 exec2_objects[0].handle = bo->gem_handle;
641 exec2_objects[0].relocation_count = 0;
642 exec2_objects[0].relocs_ptr = 0;
643 exec2_objects[0].alignment = 0;
644 exec2_objects[0].offset = bo->offset;
645 exec2_objects[0].flags = 0;
646 exec2_objects[0].rsvd1 = 0;
647 exec2_objects[0].rsvd2 = 0;
648
649 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
650 execbuf.buffer_count = 1;
651 execbuf.batch_start_offset = state.offset;
652 execbuf.batch_len = batch.next - state.map;
653 execbuf.cliprects_ptr = 0;
654 execbuf.num_cliprects = 0;
655 execbuf.DR1 = 0;
656 execbuf.DR4 = 0;
657
658 execbuf.flags =
659 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
660 execbuf.rsvd1 = device->context_id;
661 execbuf.rsvd2 = 0;
662
663 if (!device->no_hw) {
664 ret = anv_gem_execbuffer(device, &execbuf);
665 if (ret != 0) {
666 result = vk_error(VK_ERROR_UNKNOWN);
667 goto fail;
668 }
669
670 timeout = INT64_MAX;
671 ret = anv_gem_wait(device, bo->gem_handle, &timeout);
672 if (ret != 0) {
673 result = vk_error(VK_ERROR_UNKNOWN);
674 goto fail;
675 }
676 }
677
678 anv_state_pool_free(&device->dyn_state_pool, state);
679
680 return VK_SUCCESS;
681
682 fail:
683 anv_state_pool_free(&device->dyn_state_pool, state);
684
685 return result;
686}
687
688void *
689anv_device_alloc(struct anv_device * device,
690 size_t size,
691 size_t alignment,
692 VkSystemAllocType allocType)
693{
694 return device->instance->pfnAlloc(device->instance->pAllocUserData,
695 size,
696 alignment,
697 allocType);
698}
699
700void
701anv_device_free(struct anv_device * device,
702 void * mem)
703{
704 return device->instance->pfnFree(device->instance->pAllocUserData,
705 mem);
706}
707
708VkResult
709anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
710{
711 bo->gem_handle = anv_gem_create(device, size);
712 if (!bo->gem_handle)
713 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
714
715 bo->map = NULL;
716 bo->index = 0;
717 bo->offset = 0;
718 bo->size = size;
719
720 return VK_SUCCESS;
721}
722
723VkResult VKAPI vkAllocMemory(
724 VkDevice _device,
725 const VkMemoryAllocInfo* pAllocInfo,
726 VkDeviceMemory* pMem)
727{
728 struct anv_device *device = (struct anv_device *) _device;
729 struct anv_device_memory *mem;
730 VkResult result;
731
732 assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
733
734 mem = anv_device_alloc(device, sizeof(*mem), 8,
735 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
736 if (mem == NULL)
737 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
738
739 result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
740 if (result != VK_SUCCESS)
741 goto fail;
742
743 *pMem = (VkDeviceMemory) mem;
744
745 return VK_SUCCESS;
746
747 fail:
748 anv_device_free(device, mem);
749
750 return result;
751}
752
753VkResult VKAPI vkFreeMemory(
754 VkDevice _device,
755 VkDeviceMemory _mem)
756{
757 struct anv_device *device = (struct anv_device *) _device;
758 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
759
760 if (mem->bo.map)
761 anv_gem_munmap(mem->bo.map, mem->bo.size);
762
763 if (mem->bo.gem_handle != 0)
764 anv_gem_close(device, mem->bo.gem_handle);
765
766 anv_device_free(device, mem);
767
768 return VK_SUCCESS;
769}
770
771VkResult VKAPI vkSetMemoryPriority(
772 VkDevice device,
773 VkDeviceMemory mem,
774 VkMemoryPriority priority)
775{
776 return VK_SUCCESS;
777}
778
779VkResult VKAPI vkMapMemory(
780 VkDevice _device,
781 VkDeviceMemory _mem,
782 VkDeviceSize offset,
783 VkDeviceSize size,
784 VkMemoryMapFlags flags,
785 void** ppData)
786{
787 struct anv_device *device = (struct anv_device *) _device;
788 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
789
790 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
791 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
792 * at a time is valid. We could just mmap up front and return an offset
793 * pointer here, but that may exhaust virtual memory on 32 bit
794 * userspace. */
795
796 mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
797 mem->map_size = size;
798
799 *ppData = mem->map;
800
801 return VK_SUCCESS;
802}
803
804VkResult VKAPI vkUnmapMemory(
805 VkDevice _device,
806 VkDeviceMemory _mem)
807{
808 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
809
810 anv_gem_munmap(mem->map, mem->map_size);
811
812 return VK_SUCCESS;
813}
814
815VkResult VKAPI vkFlushMappedMemory(
816 VkDevice device,
817 VkDeviceMemory mem,
818 VkDeviceSize offset,
819 VkDeviceSize size)
820{
821 /* clflush here for !llc platforms */
822
823 return VK_SUCCESS;
824}
825
826VkResult VKAPI vkPinSystemMemory(
827 VkDevice device,
828 const void* pSysMem,
829 size_t memSize,
830 VkDeviceMemory* pMem)
831{
832 return VK_SUCCESS;
833}
834
835VkResult VKAPI vkGetMultiDeviceCompatibility(
836 VkPhysicalDevice physicalDevice0,
837 VkPhysicalDevice physicalDevice1,
838 VkPhysicalDeviceCompatibilityInfo* pInfo)
839{
840 return VK_UNSUPPORTED;
841}
842
843VkResult VKAPI vkOpenSharedMemory(
844 VkDevice device,
845 const VkMemoryOpenInfo* pOpenInfo,
846 VkDeviceMemory* pMem)
847{
848 return VK_UNSUPPORTED;
849}
850
851VkResult VKAPI vkOpenSharedSemaphore(
852 VkDevice device,
853 const VkSemaphoreOpenInfo* pOpenInfo,
854 VkSemaphore* pSemaphore)
855{
856 return VK_UNSUPPORTED;
857}
858
859VkResult VKAPI vkOpenPeerMemory(
860 VkDevice device,
861 const VkPeerMemoryOpenInfo* pOpenInfo,
862 VkDeviceMemory* pMem)
863{
864 return VK_UNSUPPORTED;
865}
866
867VkResult VKAPI vkOpenPeerImage(
868 VkDevice device,
869 const VkPeerImageOpenInfo* pOpenInfo,
870 VkImage* pImage,
871 VkDeviceMemory* pMem)
872{
873 return VK_UNSUPPORTED;
874}
875
876static VkResult
877anv_instance_destructor(struct anv_device * device,
878 VkObject object)
879{
880 return vkDestroyInstance(object);
881}
882
883static VkResult
884anv_noop_destructor(struct anv_device * device,
885 VkObject object)
886{
887 return VK_SUCCESS;
888}
889
890static VkResult
891anv_device_destructor(struct anv_device * device,
892 VkObject object)
893{
894 return vkDestroyDevice(object);
895}
896
897static VkResult
898anv_cmd_buffer_destructor(struct anv_device * device,
899 VkObject object)
900{
901 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) object;
902
903 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
904 anv_batch_finish(&cmd_buffer->batch, device);
905 anv_device_free(device, cmd_buffer->exec2_objects);
906 anv_device_free(device, cmd_buffer->exec2_bos);
907 anv_device_free(device, cmd_buffer);
908
909 return VK_SUCCESS;
910}
911
912static VkResult
913anv_pipeline_destructor(struct anv_device * device,
914 VkObject object)
915{
916 struct anv_pipeline *pipeline = (struct anv_pipeline *) object;
917
918 return anv_pipeline_destroy(pipeline);
919}
920
921static VkResult
922anv_free_destructor(struct anv_device * device,
923 VkObject object)
924{
925 anv_device_free(device, (void *) object);
926
927 return VK_SUCCESS;
928}
929
930static VkResult (*anv_object_destructors[])(struct anv_device *device,
931 VkObject object) = {
932 [VK_OBJECT_TYPE_INSTANCE] = anv_instance_destructor,
933 [VK_OBJECT_TYPE_PHYSICAL_DEVICE] = anv_noop_destructor,
934 [VK_OBJECT_TYPE_DEVICE] = anv_device_destructor,
935 [VK_OBJECT_TYPE_QUEUE] = anv_noop_destructor,
936 [VK_OBJECT_TYPE_COMMAND_BUFFER] = anv_cmd_buffer_destructor,
937 [VK_OBJECT_TYPE_PIPELINE] = anv_pipeline_destructor,
938 [VK_OBJECT_TYPE_SHADER] = anv_free_destructor,
939 [VK_OBJECT_TYPE_BUFFER] = anv_free_destructor,
940 [VK_OBJECT_TYPE_IMAGE] = anv_free_destructor,
941 [VK_OBJECT_TYPE_RENDER_PASS] = anv_free_destructor
942};
943
944VkResult VKAPI vkDestroyObject(
945 VkDevice _device,
946 VkObjectType objType,
947 VkObject object)
948{
949 struct anv_device *device = (struct anv_device *) _device;
950
951 assert(objType < ARRAY_SIZE(anv_object_destructors) &&
952 anv_object_destructors[objType] != NULL);
953
954 return anv_object_destructors[objType](device, object);
955}
956
957static void
958fill_memory_requirements(
959 VkObjectType objType,
960 VkObject object,
961 VkMemoryRequirements * memory_requirements)
962{
963 struct anv_buffer *buffer;
964 struct anv_image *image;
965
966 memory_requirements->memPropsAllowed =
967 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
968 VK_MEMORY_PROPERTY_HOST_DEVICE_COHERENT_BIT |
969 /* VK_MEMORY_PROPERTY_HOST_UNCACHED_BIT | */
970 VK_MEMORY_PROPERTY_HOST_WRITE_COMBINED_BIT |
971 VK_MEMORY_PROPERTY_PREFER_HOST_LOCAL |
972 VK_MEMORY_PROPERTY_SHAREABLE_BIT;
973
974 memory_requirements->memPropsRequired = 0;
975
976 switch (objType) {
977 case VK_OBJECT_TYPE_BUFFER:
978 buffer = (struct anv_buffer *) object;
979 memory_requirements->size = buffer->size;
980 memory_requirements->alignment = 16;
981 break;
982 case VK_OBJECT_TYPE_IMAGE:
983 image = (struct anv_image *) object;
984 memory_requirements->size = image->size;
985 memory_requirements->alignment = image->alignment;
986 break;
987 default:
988 memory_requirements->size = 0;
989 break;
990 }
991}
992
993VkResult VKAPI vkGetObjectInfo(
994 VkDevice _device,
995 VkObjectType objType,
996 VkObject object,
997 VkObjectInfoType infoType,
998 size_t* pDataSize,
999 void* pData)
1000{
1001 VkMemoryRequirements memory_requirements;
1002
1003 switch (infoType) {
1004 case VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS:
1005 fill_memory_requirements(objType, object, &memory_requirements);
1006 memcpy(pData, &memory_requirements,
1007 MIN2(*pDataSize, sizeof(memory_requirements)));
1008 *pDataSize = sizeof(memory_requirements);
1009 return VK_SUCCESS;
1010
1011 case VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT:
1012 default:
1013 return VK_UNSUPPORTED;
1014 }
1015
1016}
1017
1018VkResult VKAPI vkQueueBindObjectMemory(
1019 VkQueue queue,
1020 VkObjectType objType,
1021 VkObject object,
1022 uint32_t allocationIdx,
1023 VkDeviceMemory _mem,
1024 VkDeviceSize memOffset)
1025{
1026 struct anv_buffer *buffer;
1027 struct anv_image *image;
1028 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
1029
1030 switch (objType) {
1031 case VK_OBJECT_TYPE_BUFFER:
1032 buffer = (struct anv_buffer *) object;
Kristian Høgsberg099faa12015-05-11 22:19:58 -07001033 buffer->bo = &mem->bo;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001034 buffer->offset = memOffset;
1035 break;
1036 case VK_OBJECT_TYPE_IMAGE:
1037 image = (struct anv_image *) object;
Kristian Høgsberg099faa12015-05-11 22:19:58 -07001038 image->bo = &mem->bo;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001039 image->offset = memOffset;
1040 break;
1041 default:
1042 break;
1043 }
1044
1045 return VK_SUCCESS;
1046}
1047
1048VkResult VKAPI vkQueueBindObjectMemoryRange(
1049 VkQueue queue,
1050 VkObjectType objType,
1051 VkObject object,
1052 uint32_t allocationIdx,
1053 VkDeviceSize rangeOffset,
1054 VkDeviceSize rangeSize,
1055 VkDeviceMemory mem,
1056 VkDeviceSize memOffset)
1057{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001058 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001059}
1060
1061VkResult vkQueueBindImageMemoryRange(
1062 VkQueue queue,
1063 VkImage image,
1064 uint32_t allocationIdx,
1065 const VkImageMemoryBindInfo* pBindInfo,
1066 VkDeviceMemory mem,
1067 VkDeviceSize memOffset)
1068{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001069 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001070}
1071
1072VkResult VKAPI vkCreateFence(
1073 VkDevice device,
1074 const VkFenceCreateInfo* pCreateInfo,
1075 VkFence* pFence)
1076{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001077 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001078}
1079
1080VkResult VKAPI vkResetFences(
1081 VkDevice device,
1082 uint32_t fenceCount,
1083 VkFence* pFences)
1084{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001085 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001086}
1087
1088VkResult VKAPI vkGetFenceStatus(
1089 VkDevice device,
1090 VkFence fence)
1091{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001092 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001093}
1094
1095VkResult VKAPI vkWaitForFences(
1096 VkDevice device,
1097 uint32_t fenceCount,
1098 const VkFence* pFences,
1099 bool32_t waitAll,
1100 uint64_t timeout)
1101{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001102 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001103}
1104
1105// Queue semaphore functions
1106
1107VkResult VKAPI vkCreateSemaphore(
1108 VkDevice device,
1109 const VkSemaphoreCreateInfo* pCreateInfo,
1110 VkSemaphore* pSemaphore)
1111{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001112 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001113}
1114
1115VkResult VKAPI vkQueueSignalSemaphore(
1116 VkQueue queue,
1117 VkSemaphore semaphore)
1118{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001119 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001120}
1121
1122VkResult VKAPI vkQueueWaitSemaphore(
1123 VkQueue queue,
1124 VkSemaphore semaphore)
1125{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001126 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001127}
1128
1129// Event functions
1130
1131VkResult VKAPI vkCreateEvent(
1132 VkDevice device,
1133 const VkEventCreateInfo* pCreateInfo,
1134 VkEvent* pEvent)
1135{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001136 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001137}
1138
1139VkResult VKAPI vkGetEventStatus(
1140 VkDevice device,
1141 VkEvent event)
1142{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001143 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001144}
1145
1146VkResult VKAPI vkSetEvent(
1147 VkDevice device,
1148 VkEvent event)
1149{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001150 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001151}
1152
1153VkResult VKAPI vkResetEvent(
1154 VkDevice device,
1155 VkEvent event)
1156{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001157 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001158}
1159
1160// Query functions
1161
1162struct anv_query_pool {
1163 VkQueryType type;
1164 uint32_t slots;
1165 struct anv_bo bo;
1166};
1167
1168VkResult VKAPI vkCreateQueryPool(
1169 VkDevice _device,
1170 const VkQueryPoolCreateInfo* pCreateInfo,
1171 VkQueryPool* pQueryPool)
1172{
1173 struct anv_device *device = (struct anv_device *) _device;
1174 struct anv_query_pool *pool;
1175 VkResult result;
1176
1177 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
1178
1179 pool = anv_device_alloc(device, sizeof(*pool), 8,
1180 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1181 if (pool == NULL)
1182 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1183
1184 pool->type = pCreateInfo->queryType;
1185 result = anv_bo_init_new(&pool->bo, device, pCreateInfo->slots * 16);
1186 if (result != VK_SUCCESS)
1187 goto fail;
1188
1189 *pQueryPool = (VkQueryPool) pool;
1190
1191 return VK_SUCCESS;
1192
1193 fail:
1194 anv_device_free(device, pool);
1195
1196 return result;
1197}
1198
1199VkResult VKAPI vkGetQueryPoolResults(
1200 VkDevice device,
1201 VkQueryPool queryPool,
1202 uint32_t startQuery,
1203 uint32_t queryCount,
1204 size_t* pDataSize,
1205 void* pData,
1206 VkQueryResultFlags flags)
1207{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001208 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001209}
1210
1211// Format capabilities
1212
1213VkResult VKAPI vkGetFormatInfo(
1214 VkDevice device,
1215 VkFormat format,
1216 VkFormatInfoType infoType,
1217 size_t* pDataSize,
1218 void* pData)
1219{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001220 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001221}
1222
1223// Buffer functions
1224
1225VkResult VKAPI vkCreateBuffer(
1226 VkDevice _device,
1227 const VkBufferCreateInfo* pCreateInfo,
1228 VkBuffer* pBuffer)
1229{
1230 struct anv_device *device = (struct anv_device *) _device;
1231 struct anv_buffer *buffer;
1232
1233 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1234
1235 buffer = anv_device_alloc(device, sizeof(*buffer), 8,
1236 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1237 if (buffer == NULL)
1238 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1239
1240 buffer->size = pCreateInfo->size;
Kristian Høgsberg099faa12015-05-11 22:19:58 -07001241 buffer->bo = NULL;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001242 buffer->offset = 0;
1243
1244 *pBuffer = (VkBuffer) buffer;
1245
1246 return VK_SUCCESS;
1247}
1248
1249// Buffer view functions
1250
1251VkResult VKAPI vkCreateBufferView(
1252 VkDevice _device,
1253 const VkBufferViewCreateInfo* pCreateInfo,
1254 VkBufferView* pView)
1255{
1256 struct anv_device *device = (struct anv_device *) _device;
1257 struct anv_buffer_view *view;
1258 const struct anv_format *format;
1259
1260 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
1261
1262 view = anv_device_alloc(device, sizeof(*view), 8,
1263 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1264 if (view == NULL)
1265 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1266
1267 view->buffer = (struct anv_buffer *) pCreateInfo->buffer;
1268 view->offset = pCreateInfo->offset;
1269 view->surface_state =
1270 anv_state_pool_alloc(&device->surface_state_pool, 64, 64);
1271
1272 format = anv_format_for_vk_format(pCreateInfo->format);
1273 /* This assumes RGBA float format. */
1274 uint32_t stride = 4;
1275 uint32_t num_elements = pCreateInfo->range / stride;
1276 struct GEN8_RENDER_SURFACE_STATE surface_state = {
1277 .SurfaceType = SURFTYPE_BUFFER,
1278 .SurfaceArray = false,
1279 .SurfaceFormat = format->format,
1280 .SurfaceVerticalAlignment = VALIGN4,
1281 .SurfaceHorizontalAlignment = HALIGN4,
1282 .TileMode = LINEAR,
1283 .VerticalLineStride = 0,
1284 .VerticalLineStrideOffset = 0,
1285 .SamplerL2BypassModeDisable = true,
1286 .RenderCacheReadWriteMode = WriteOnlyCache,
1287 .MemoryObjectControlState = 0, /* FIXME: MOCS */
1288 .BaseMipLevel = 0,
1289 .SurfaceQPitch = 0,
1290 .Height = (num_elements >> 7) & 0x3fff,
1291 .Width = num_elements & 0x7f,
1292 .Depth = (num_elements >> 21) & 0x3f,
1293 .SurfacePitch = stride - 1,
1294 .MinimumArrayElement = 0,
1295 .NumberofMultisamples = MULTISAMPLECOUNT_1,
1296 .XOffset = 0,
1297 .YOffset = 0,
1298 .SurfaceMinLOD = 0,
1299 .MIPCountLOD = 0,
1300 .AuxiliarySurfaceMode = AUX_NONE,
1301 .RedClearColor = 0,
1302 .GreenClearColor = 0,
1303 .BlueClearColor = 0,
1304 .AlphaClearColor = 0,
1305 .ShaderChannelSelectRed = SCS_RED,
1306 .ShaderChannelSelectGreen = SCS_GREEN,
1307 .ShaderChannelSelectBlue = SCS_BLUE,
1308 .ShaderChannelSelectAlpha = SCS_ALPHA,
1309 .ResourceMinLOD = 0,
1310 /* FIXME: We assume that the image must be bound at this time. */
1311 .SurfaceBaseAddress = { NULL, view->buffer->offset + view->offset },
1312 };
1313
1314 GEN8_RENDER_SURFACE_STATE_pack(NULL, view->surface_state.map, &surface_state);
1315
1316 *pView = (VkImageView) view;
1317
1318 return VK_SUCCESS;
1319}
1320
1321// Sampler functions
1322
1323struct anv_sampler {
1324 uint32_t state[4];
1325};
1326
1327VkResult VKAPI vkCreateSampler(
1328 VkDevice _device,
1329 const VkSamplerCreateInfo* pCreateInfo,
1330 VkSampler* pSampler)
1331{
1332 struct anv_device *device = (struct anv_device *) _device;
1333 struct anv_sampler *sampler;
1334
1335 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1336
1337 sampler = anv_device_alloc(device, sizeof(*sampler), 8,
1338 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1339 if (!sampler)
1340 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1341
1342 struct GEN8_SAMPLER_STATE sampler_state = {
1343 .SamplerDisable = 0,
1344 .TextureBorderColorMode = 0,
1345 .LODPreClampMode = 0,
1346 .BaseMipLevel = 0,
1347 .MipModeFilter = 0,
1348 .MagModeFilter = 0,
1349 .MinModeFilter = 0,
1350 .TextureLODBias = 0,
1351 .AnisotropicAlgorithm = 0,
1352 .MinLOD = 0,
1353 .MaxLOD = 0,
1354 .ChromaKeyEnable = 0,
1355 .ChromaKeyIndex = 0,
1356 .ChromaKeyMode = 0,
1357 .ShadowFunction = 0,
1358 .CubeSurfaceControlMode = 0,
1359 .IndirectStatePointer = 0,
1360 .LODClampMagnificationMode = 0,
1361 .MaximumAnisotropy = 0,
1362 .RAddressMinFilterRoundingEnable = 0,
1363 .RAddressMagFilterRoundingEnable = 0,
1364 .VAddressMinFilterRoundingEnable = 0,
1365 .VAddressMagFilterRoundingEnable = 0,
1366 .UAddressMinFilterRoundingEnable = 0,
1367 .UAddressMagFilterRoundingEnable = 0,
1368 .TrilinearFilterQuality = 0,
1369 .NonnormalizedCoordinateEnable = 0,
1370 .TCXAddressControlMode = 0,
1371 .TCYAddressControlMode = 0,
1372 .TCZAddressControlMode = 0,
1373 };
1374
1375 GEN8_SAMPLER_STATE_pack(NULL, sampler->state, &sampler_state);
1376
1377 *pSampler = (VkSampler) sampler;
1378
1379 return VK_SUCCESS;
1380}
1381
1382// Descriptor set functions
1383
1384VkResult VKAPI vkCreateDescriptorSetLayout(
1385 VkDevice _device,
1386 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
1387 VkDescriptorSetLayout* pSetLayout)
1388{
1389 struct anv_device *device = (struct anv_device *) _device;
1390 struct anv_descriptor_set_layout *set_layout;
1391 uint32_t count, k;
1392 size_t size, total;
1393
1394 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1395
1396 count = 0;
1397 for (uint32_t i = 0; i < pCreateInfo->count; i++)
1398 count += pCreateInfo->pBinding[i].count;
1399
1400 size = sizeof(*set_layout) +
1401 count * sizeof(set_layout->bindings[0]);
1402 set_layout = anv_device_alloc(device, size, 8,
1403 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1404 if (!set_layout)
1405 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1406
1407 k = 0;
1408 total = 0;
1409 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1410 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].count; j++) {
1411 set_layout->bindings[k].mask = pCreateInfo->pBinding[i].stageFlags;
1412 set_layout->bindings[k].type = pCreateInfo->pBinding[i].descriptorType;
1413 k++;
1414 }
1415
1416 total += pCreateInfo->pBinding[i].count *
1417 __builtin_popcount(pCreateInfo->pBinding[i].stageFlags);
1418 }
1419
1420 set_layout->total = total;
1421 set_layout->count = count;
1422
1423 *pSetLayout = (VkDescriptorSetLayout) set_layout;
1424
1425 return VK_SUCCESS;
1426}
1427
1428VkResult VKAPI vkBeginDescriptorPoolUpdate(
1429 VkDevice device,
1430 VkDescriptorUpdateMode updateMode)
1431{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001432 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001433}
1434
1435VkResult VKAPI vkEndDescriptorPoolUpdate(
1436 VkDevice device,
1437 VkCmdBuffer cmd)
1438{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001439 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001440}
1441
1442VkResult VKAPI vkCreateDescriptorPool(
1443 VkDevice device,
1444 VkDescriptorPoolUsage poolUsage,
1445 uint32_t maxSets,
1446 const VkDescriptorPoolCreateInfo* pCreateInfo,
1447 VkDescriptorPool* pDescriptorPool)
1448{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001449 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001450}
1451
1452VkResult VKAPI vkResetDescriptorPool(
1453 VkDevice device,
1454 VkDescriptorPool descriptorPool)
1455{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001456 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001457}
1458
1459VkResult VKAPI vkAllocDescriptorSets(
1460 VkDevice _device,
1461 VkDescriptorPool descriptorPool,
1462 VkDescriptorSetUsage setUsage,
1463 uint32_t count,
1464 const VkDescriptorSetLayout* pSetLayouts,
1465 VkDescriptorSet* pDescriptorSets,
1466 uint32_t* pCount)
1467{
1468 struct anv_device *device = (struct anv_device *) _device;
1469 const struct anv_descriptor_set_layout *layout;
1470 struct anv_descriptor_set *set;
1471 size_t size;
1472
1473 for (uint32_t i = 0; i < count; i++) {
1474 layout = (struct anv_descriptor_set_layout *) pSetLayouts[i];
1475 size = sizeof(*set) + layout->total * sizeof(set->descriptors[0]);
1476 set = anv_device_alloc(device, size, 8,
1477 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1478 if (!set) {
1479 *pCount = i;
1480 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1481 }
1482
1483 pDescriptorSets[i] = (VkDescriptorSet) set;
1484 }
1485
1486 *pCount = count;
1487
1488 return VK_UNSUPPORTED;
1489}
1490
1491void VKAPI vkClearDescriptorSets(
1492 VkDevice device,
1493 VkDescriptorPool descriptorPool,
1494 uint32_t count,
1495 const VkDescriptorSet* pDescriptorSets)
1496{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001497 stub();
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001498}
1499
1500void VKAPI vkUpdateDescriptors(
1501 VkDevice _device,
1502 VkDescriptorSet descriptorSet,
1503 uint32_t updateCount,
1504 const void** ppUpdateArray)
1505{
1506 struct anv_descriptor_set *set = (struct anv_descriptor_set *) descriptorSet;
1507 VkUpdateSamplers *update_samplers;
1508 VkUpdateSamplerTextures *update_sampler_textures;
1509 VkUpdateImages *update_images;
1510 VkUpdateBuffers *update_buffers;
1511 VkUpdateAsCopy *update_as_copy;
1512
1513 for (uint32_t i = 0; i < updateCount; i++) {
1514 const struct anv_common *common = ppUpdateArray[i];
1515
1516 switch (common->sType) {
1517 case VK_STRUCTURE_TYPE_UPDATE_SAMPLERS:
1518 update_samplers = (VkUpdateSamplers *) common;
1519
1520 for (uint32_t j = 0; j < update_samplers->count; j++) {
1521 set->descriptors[update_samplers->binding + j] =
1522 (void *) update_samplers->pSamplers[j];
1523 }
1524 break;
1525
1526 case VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES:
1527 /* FIXME: Shouldn't this be *_UPDATE_SAMPLER_IMAGES? */
1528 update_sampler_textures = (VkUpdateSamplerTextures *) common;
1529
1530 for (uint32_t j = 0; j < update_sampler_textures->count; j++) {
1531 set->descriptors[update_sampler_textures->binding + j] =
1532 (void *) update_sampler_textures->pSamplerImageViews[j].pImageView->view;
1533 }
1534 break;
1535
1536 case VK_STRUCTURE_TYPE_UPDATE_IMAGES:
1537 update_images = (VkUpdateImages *) common;
1538
1539 for (uint32_t j = 0; j < update_images->count; j++) {
1540 set->descriptors[update_images->binding + j] =
1541 (void *) update_images->pImageViews[j].view;
1542 }
1543 break;
1544
1545 case VK_STRUCTURE_TYPE_UPDATE_BUFFERS:
1546 update_buffers = (VkUpdateBuffers *) common;
1547
1548 for (uint32_t j = 0; j < update_buffers->count; j++) {
1549 set->descriptors[update_buffers->binding + j] =
1550 (void *) update_buffers->pBufferViews[j].view;
1551 }
1552 /* FIXME: descriptor arrays? */
1553 break;
1554
1555 case VK_STRUCTURE_TYPE_UPDATE_AS_COPY:
1556 update_as_copy = (VkUpdateAsCopy *) common;
1557 (void) update_as_copy;
1558 break;
1559
1560 default:
1561 break;
1562 }
1563 }
1564}
1565
1566// State object functions
1567
1568static inline int64_t
1569clamp_int64(int64_t x, int64_t min, int64_t max)
1570{
1571 if (x < min)
1572 return min;
1573 else if (x < max)
1574 return x;
1575 else
1576 return max;
1577}
1578
1579VkResult VKAPI vkCreateDynamicViewportState(
1580 VkDevice _device,
1581 const VkDynamicVpStateCreateInfo* pCreateInfo,
1582 VkDynamicVpState* pState)
1583{
1584 struct anv_device *device = (struct anv_device *) _device;
1585 struct anv_dynamic_vp_state *state;
1586
1587 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO);
1588
1589 state = anv_device_alloc(device, sizeof(*state), 8,
1590 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1591 if (state == NULL)
1592 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1593
1594 unsigned count = pCreateInfo->viewportAndScissorCount;
1595 state->sf_clip_vp = anv_state_pool_alloc(&device->dyn_state_pool,
1596 count * 64, 64);
1597 state->cc_vp = anv_state_pool_alloc(&device->dyn_state_pool,
1598 count * 8, 32);
1599 state->scissor = anv_state_pool_alloc(&device->dyn_state_pool,
1600 count * 32, 32);
1601
1602 for (uint32_t i = 0; i < pCreateInfo->viewportAndScissorCount; i++) {
1603 const VkViewport *vp = &pCreateInfo->pViewports[i];
1604 const VkRect *s = &pCreateInfo->pScissors[i];
1605
1606 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport = {
1607 .ViewportMatrixElementm00 = vp->width / 2,
1608 .ViewportMatrixElementm11 = vp->height / 2,
1609 .ViewportMatrixElementm22 = (vp->maxDepth - vp->minDepth) / 2,
1610 .ViewportMatrixElementm30 = vp->originX + vp->width / 2,
1611 .ViewportMatrixElementm31 = vp->originY + vp->height / 2,
1612 .ViewportMatrixElementm32 = (vp->maxDepth + vp->minDepth) / 2,
1613 .XMinClipGuardband = -1.0f,
1614 .XMaxClipGuardband = 1.0f,
1615 .YMinClipGuardband = -1.0f,
1616 .YMaxClipGuardband = 1.0f,
1617 .XMinViewPort = vp->originX,
1618 .XMaxViewPort = vp->originX + vp->width - 1,
1619 .YMinViewPort = vp->originY,
1620 .YMaxViewPort = vp->originY + vp->height - 1,
1621 };
1622
1623 struct GEN8_CC_VIEWPORT cc_viewport = {
1624 .MinimumDepth = vp->minDepth,
1625 .MaximumDepth = vp->maxDepth
1626 };
1627
1628 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1629 * ymax < ymin for empty clips. In case clip x, y, width height are all
1630 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1631 * what we want. Just special case empty clips and produce a canonical
1632 * empty clip. */
1633 static const struct GEN8_SCISSOR_RECT empty_scissor = {
1634 .ScissorRectangleYMin = 1,
1635 .ScissorRectangleXMin = 1,
1636 .ScissorRectangleYMax = 0,
1637 .ScissorRectangleXMax = 0
1638 };
1639
1640 const int max = 0xffff;
1641 struct GEN8_SCISSOR_RECT scissor = {
1642 /* Do this math using int64_t so overflow gets clamped correctly. */
1643 .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
1644 .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
1645 .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
1646 .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
1647 };
1648
1649 GEN8_SF_CLIP_VIEWPORT_pack(NULL, state->sf_clip_vp.map + i * 64, &sf_clip_viewport);
1650 GEN8_CC_VIEWPORT_pack(NULL, state->cc_vp.map + i * 32, &cc_viewport);
1651
1652 if (s->extent.width <= 0 || s->extent.height <= 0) {
1653 GEN8_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &empty_scissor);
1654 } else {
1655 GEN8_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &scissor);
1656 }
1657 }
1658
1659 *pState = (VkDynamicVpState) state;
1660
1661 return VK_SUCCESS;
1662}
1663
1664VkResult VKAPI vkCreateDynamicRasterState(
1665 VkDevice _device,
1666 const VkDynamicRsStateCreateInfo* pCreateInfo,
1667 VkDynamicRsState* pState)
1668{
1669 struct anv_device *device = (struct anv_device *) _device;
1670 struct anv_dynamic_rs_state *state;
1671
1672 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO);
1673
1674 state = anv_device_alloc(device, sizeof(*state), 8,
1675 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1676 if (state == NULL)
1677 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1678
1679 /* Missing these:
1680 * float depthBias;
1681 * float depthBiasClamp;
1682 * float slopeScaledDepthBias;
1683 * float pointFadeThreshold;
1684 * // optional (GL45) - Size of point fade threshold
1685 */
1686
1687 struct GEN8_3DSTATE_SF sf = {
1688 GEN8_3DSTATE_SF_header,
1689 .LineWidth = pCreateInfo->lineWidth,
1690 .PointWidth = pCreateInfo->pointSize,
1691 };
1692
1693 GEN8_3DSTATE_SF_pack(NULL, state->state_sf, &sf);
1694
1695 *pState = (VkDynamicRsState) state;
1696
1697 return VK_SUCCESS;
1698}
1699
1700VkResult VKAPI vkCreateDynamicColorBlendState(
1701 VkDevice _device,
1702 const VkDynamicCbStateCreateInfo* pCreateInfo,
1703 VkDynamicCbState* pState)
1704{
1705 struct anv_device *device = (struct anv_device *) _device;
1706 struct anv_dynamic_cb_state *state;
1707
1708 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_CB_STATE_CREATE_INFO);
1709
1710 state = anv_device_alloc(device, sizeof(*state), 8,
1711 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1712 if (state == NULL)
1713 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1714
1715 *pState = (VkDynamicCbState) state;
1716
1717 return VK_SUCCESS;
1718}
1719
1720VkResult VKAPI vkCreateDynamicDepthStencilState(
1721 VkDevice device,
1722 const VkDynamicDsStateCreateInfo* pCreateInfo,
1723 VkDynamicDsState* pState)
1724{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07001725 stub_return(VK_UNSUPPORTED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001726}
1727
1728// Command buffer functions
1729
1730VkResult VKAPI vkCreateCommandBuffer(
1731 VkDevice _device,
1732 const VkCmdBufferCreateInfo* pCreateInfo,
1733 VkCmdBuffer* pCmdBuffer)
1734{
1735 struct anv_device *device = (struct anv_device *) _device;
1736 struct anv_cmd_buffer *cmd_buffer;
1737 VkResult result;
1738
1739 cmd_buffer = anv_device_alloc(device, sizeof(*cmd_buffer), 8,
1740 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1741 if (cmd_buffer == NULL)
1742 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1743
1744 cmd_buffer->device = device;
Kristian Høgsbergd77c34d2015-05-11 23:25:06 -07001745 cmd_buffer->rs_state = NULL;
1746 cmd_buffer->vp_state = NULL;
1747
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001748 result = anv_batch_init(&cmd_buffer->batch, device);
1749 if (result != VK_SUCCESS)
1750 goto fail;
1751
1752 cmd_buffer->exec2_objects =
1753 anv_device_alloc(device, 8192 * sizeof(cmd_buffer->exec2_objects[0]), 8,
1754 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1755 if (cmd_buffer->exec2_objects == NULL) {
1756 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1757 goto fail_batch;
1758 }
1759
1760 cmd_buffer->exec2_bos =
1761 anv_device_alloc(device, 8192 * sizeof(cmd_buffer->exec2_bos[0]), 8,
1762 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1763 if (cmd_buffer->exec2_bos == NULL) {
1764 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1765 goto fail_exec2_objects;
1766 }
1767
1768 anv_state_stream_init(&cmd_buffer->surface_state_stream,
1769 &device->surface_state_block_pool);
1770
1771 cmd_buffer->dirty = 0;
1772 cmd_buffer->vb_dirty = 0;
1773
1774 *pCmdBuffer = (VkCmdBuffer) cmd_buffer;
1775
1776 return VK_SUCCESS;
1777
1778 fail_exec2_objects:
1779 anv_device_free(device, cmd_buffer->exec2_objects);
1780 fail_batch:
1781 anv_batch_finish(&cmd_buffer->batch, device);
1782 fail:
1783 anv_device_free(device, cmd_buffer);
1784
1785 return result;
1786}
1787
1788VkResult VKAPI vkBeginCommandBuffer(
1789 VkCmdBuffer cmdBuffer,
1790 const VkCmdBufferBeginInfo* pBeginInfo)
1791{
1792 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
1793 struct anv_device *device = cmd_buffer->device;
1794
1795 anv_batch_emit(&cmd_buffer->batch, GEN8_PIPELINE_SELECT,
1796 .PipelineSelection = _3D);
1797 anv_batch_emit(&cmd_buffer->batch, GEN8_STATE_SIP);
1798
1799 anv_batch_emit(&cmd_buffer->batch, GEN8_STATE_BASE_ADDRESS,
1800 .GeneralStateBaseAddress = { NULL, 0 },
1801 .GeneralStateBaseAddressModifyEnable = true,
1802 .GeneralStateBufferSize = 0xfffff,
1803 .GeneralStateBufferSizeModifyEnable = true,
1804
1805 .SurfaceStateBaseAddress = { &device->surface_state_block_pool.bo, 0 },
1806 .SurfaceStateMemoryObjectControlState = 0, /* FIXME: MOCS */
1807 .SurfaceStateBaseAddressModifyEnable = true,
1808
1809 .DynamicStateBaseAddress = { &device->dyn_state_block_pool.bo, 0 },
1810 .DynamicStateBaseAddressModifyEnable = true,
1811 .DynamicStateBufferSize = 0xfffff,
1812 .DynamicStateBufferSizeModifyEnable = true,
1813
1814 .IndirectObjectBaseAddress = { NULL, 0 },
1815 .IndirectObjectBaseAddressModifyEnable = true,
1816 .IndirectObjectBufferSize = 0xfffff,
1817 .IndirectObjectBufferSizeModifyEnable = true,
1818
1819 .InstructionBaseAddress = { &device->instruction_block_pool.bo, 0 },
1820 .InstructionBaseAddressModifyEnable = true,
1821 .InstructionBufferSize = 0xfffff,
1822 .InstructionBuffersizeModifyEnable = true);
1823
1824 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VF_STATISTICS,
1825 .StatisticsEnable = true);
1826 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_HS, .Enable = false);
1827 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_TE, .TEEnable = false);
1828 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DS, .FunctionEnable = false);
1829 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_STREAMOUT, .SOFunctionEnable = false);
1830
1831 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
1832 .ConstantBufferOffset = 0,
1833 .ConstantBufferSize = 4);
1834 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
1835 .ConstantBufferOffset = 4,
1836 .ConstantBufferSize = 4);
1837 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
1838 .ConstantBufferOffset = 8,
1839 .ConstantBufferSize = 4);
1840
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001841 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_WM_CHROMAKEY,
1842 .ChromaKeyKillEnable = false);
1843 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_SBE_SWIZ);
1844 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_AA_LINE_PARAMETERS);
1845
1846 /* Hardcoded state: */
1847 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DEPTH_BUFFER,
1848 .SurfaceType = SURFTYPE_2D,
1849 .Width = 1,
1850 .Height = 1,
1851 .SurfaceFormat = D16_UNORM,
1852 .SurfaceBaseAddress = { NULL, 0 },
1853 .HierarchicalDepthBufferEnable = 0);
1854
1855 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_WM_DEPTH_STENCIL,
1856 .DepthTestEnable = false,
1857 .DepthBufferWriteEnable = false);
1858
1859 return VK_SUCCESS;
1860}
1861
1862static void
1863anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer,
1864 struct anv_bo *bo, struct anv_reloc_list *list)
1865{
1866 struct drm_i915_gem_exec_object2 *obj;
1867
1868 bo->index = cmd_buffer->bo_count;
1869 obj = &cmd_buffer->exec2_objects[bo->index];
1870 cmd_buffer->exec2_bos[bo->index] = bo;
1871 cmd_buffer->bo_count++;
1872
1873 obj->handle = bo->gem_handle;
1874 obj->relocation_count = 0;
1875 obj->relocs_ptr = 0;
1876 obj->alignment = 0;
1877 obj->offset = bo->offset;
1878 obj->flags = 0;
1879 obj->rsvd1 = 0;
1880 obj->rsvd2 = 0;
1881
1882 if (list) {
1883 obj->relocation_count = list->num_relocs;
1884 obj->relocs_ptr = (uintptr_t) list->relocs;
1885 }
1886}
1887
1888static void
1889anv_cmd_buffer_add_validate_bos(struct anv_cmd_buffer *cmd_buffer,
1890 struct anv_reloc_list *list)
1891{
1892 struct anv_bo *bo, *batch_bo;
1893
1894 batch_bo = &cmd_buffer->batch.bo;
1895 for (size_t i = 0; i < list->num_relocs; i++) {
1896 bo = list->reloc_bos[i];
1897 /* Skip any relocations targeting the batch bo. We need to make sure
1898 * it's the last in the list so we'll add it manually later.
1899 */
1900 if (bo == batch_bo)
1901 continue;
1902 if (bo->index < cmd_buffer->bo_count && cmd_buffer->exec2_bos[bo->index] == bo)
1903 continue;
1904
1905 anv_cmd_buffer_add_bo(cmd_buffer, bo, NULL);
1906 }
1907}
1908
1909static void
1910anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1911 struct anv_reloc_list *list)
1912{
1913 struct anv_bo *bo;
1914
1915 /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
1916 * struct drm_i915_gem_exec_object2 against the bos current offset and if
1917 * all bos haven't moved it will skip relocation processing alltogether.
1918 * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
1919 * value of offset so we can set it either way. For that to work we need
1920 * to make sure all relocs use the same presumed offset.
1921 */
1922
1923 for (size_t i = 0; i < list->num_relocs; i++) {
1924 bo = list->reloc_bos[i];
1925 if (bo->offset != list->relocs[i].presumed_offset)
1926 cmd_buffer->need_reloc = true;
1927
1928 list->relocs[i].target_handle = bo->index;
1929 }
1930}
1931
1932VkResult VKAPI vkEndCommandBuffer(
1933 VkCmdBuffer cmdBuffer)
1934{
1935 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
1936 struct anv_device *device = cmd_buffer->device;
1937 struct anv_batch *batch = &cmd_buffer->batch;
1938
1939 anv_batch_emit(batch, GEN8_MI_BATCH_BUFFER_END);
1940
1941 /* Round batch up to an even number of dwords. */
1942 if ((batch->next - batch->bo.map) & 4)
1943 anv_batch_emit(batch, GEN8_MI_NOOP);
1944
1945 cmd_buffer->bo_count = 0;
1946 cmd_buffer->need_reloc = false;
1947
1948 /* Lock for access to bo->index. */
1949 pthread_mutex_lock(&device->mutex);
1950
1951 /* Add block pool bos first so we can add them with their relocs. */
1952 anv_cmd_buffer_add_bo(cmd_buffer, &device->surface_state_block_pool.bo,
1953 &batch->surf_relocs);
1954
1955 anv_cmd_buffer_add_validate_bos(cmd_buffer, &batch->surf_relocs);
1956 anv_cmd_buffer_add_validate_bos(cmd_buffer, &batch->cmd_relocs);
1957 anv_cmd_buffer_add_bo(cmd_buffer, &batch->bo, &batch->cmd_relocs);
1958 anv_cmd_buffer_process_relocs(cmd_buffer, &batch->surf_relocs);
1959 anv_cmd_buffer_process_relocs(cmd_buffer, &batch->cmd_relocs);
1960
1961 cmd_buffer->execbuf.buffers_ptr = (uintptr_t) cmd_buffer->exec2_objects;
1962 cmd_buffer->execbuf.buffer_count = cmd_buffer->bo_count;
1963 cmd_buffer->execbuf.batch_start_offset = 0;
1964 cmd_buffer->execbuf.batch_len = batch->next - batch->bo.map;
1965 cmd_buffer->execbuf.cliprects_ptr = 0;
1966 cmd_buffer->execbuf.num_cliprects = 0;
1967 cmd_buffer->execbuf.DR1 = 0;
1968 cmd_buffer->execbuf.DR4 = 0;
1969
1970 cmd_buffer->execbuf.flags = I915_EXEC_HANDLE_LUT;
1971 if (!cmd_buffer->need_reloc)
1972 cmd_buffer->execbuf.flags |= I915_EXEC_NO_RELOC;
1973 cmd_buffer->execbuf.flags |= I915_EXEC_RENDER;
1974 cmd_buffer->execbuf.rsvd1 = device->context_id;
1975 cmd_buffer->execbuf.rsvd2 = 0;
1976
1977 pthread_mutex_unlock(&device->mutex);
1978
1979 return VK_SUCCESS;
1980}
1981
1982VkResult VKAPI vkResetCommandBuffer(
1983 VkCmdBuffer cmdBuffer)
1984{
1985 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
1986
1987 anv_batch_reset(&cmd_buffer->batch);
1988
1989 return VK_SUCCESS;
1990}
1991
1992// Command buffer building functions
1993
1994void VKAPI vkCmdBindPipeline(
1995 VkCmdBuffer cmdBuffer,
1996 VkPipelineBindPoint pipelineBindPoint,
1997 VkPipeline _pipeline)
1998{
1999 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2000
2001 cmd_buffer->pipeline = (struct anv_pipeline *) _pipeline;
2002 cmd_buffer->dirty |= ANV_CMD_BUFFER_PIPELINE_DIRTY;
2003}
2004
2005void VKAPI vkCmdBindDynamicStateObject(
2006 VkCmdBuffer cmdBuffer,
2007 VkStateBindPoint stateBindPoint,
2008 VkDynamicStateObject dynamicState)
2009{
2010 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2011 struct anv_dynamic_vp_state *vp_state;
2012
2013 switch (stateBindPoint) {
2014 case VK_STATE_BIND_POINT_VIEWPORT:
2015 vp_state = (struct anv_dynamic_vp_state *) dynamicState;
Kristian Høgsbergd77c34d2015-05-11 23:25:06 -07002016 /* We emit state immediately, but set cmd_buffer->vp_state to indicate
2017 * that vp state has been set in this command buffer. */
2018 cmd_buffer->vp_state = vp_state;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002019 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_SCISSOR_STATE_POINTERS,
2020 .ScissorRectPointer = vp_state->scissor.offset);
2021 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
2022 .CCViewportPointer = vp_state->cc_vp.offset);
2023 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
2024 .SFClipViewportPointer = vp_state->sf_clip_vp.offset);
2025 break;
2026 case VK_STATE_BIND_POINT_RASTER:
2027 cmd_buffer->rs_state = (struct anv_dynamic_rs_state *) dynamicState;
2028 cmd_buffer->dirty |= ANV_CMD_BUFFER_RS_DIRTY;
2029 break;
2030 case VK_STATE_BIND_POINT_COLOR_BLEND:
2031 case VK_STATE_BIND_POINT_DEPTH_STENCIL:
2032 break;
2033 default:
2034 break;
2035 };
2036}
2037
2038void VKAPI vkCmdBindDescriptorSets(
2039 VkCmdBuffer cmdBuffer,
2040 VkPipelineBindPoint pipelineBindPoint,
2041 uint32_t firstSet,
2042 uint32_t setCount,
2043 const VkDescriptorSet* pDescriptorSets,
2044 uint32_t dynamicOffsetCount,
2045 const uint32_t* pDynamicOffsets)
2046{
2047 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2048
2049 /* What are the semantics for setting descriptor sets? Assuming that
2050 * setting preserves lower sets and invalidate higher sets. This means that
2051 * we can set the number of active sets to firstSet + setCount.
2052 */
2053
2054 for (uint32_t i = 0; i < setCount; i++)
2055 cmd_buffer->descriptor_sets[firstSet + i] =
2056 (struct anv_descriptor_set *) pDescriptorSets[i];
2057
2058 cmd_buffer->num_descriptor_sets = firstSet + setCount;
2059 cmd_buffer->dirty |= ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY;
2060}
2061
2062void VKAPI vkCmdBindIndexBuffer(
2063 VkCmdBuffer cmdBuffer,
2064 VkBuffer _buffer,
2065 VkDeviceSize offset,
2066 VkIndexType indexType)
2067{
2068 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2069 struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
2070
2071 static const uint32_t vk_to_gen_index_type[] = {
2072 [VK_INDEX_TYPE_UINT8] = INDEX_BYTE,
2073 [VK_INDEX_TYPE_UINT16] = INDEX_WORD,
2074 [VK_INDEX_TYPE_UINT32] = INDEX_DWORD,
2075 };
2076
2077 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_INDEX_BUFFER,
2078 .IndexFormat = vk_to_gen_index_type[indexType],
2079 .MemoryObjectControlState = 0,
Kristian Høgsberg099faa12015-05-11 22:19:58 -07002080 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002081 .BufferSize = buffer->size - offset);
2082}
2083
2084void VKAPI vkCmdBindVertexBuffers(
2085 VkCmdBuffer cmdBuffer,
2086 uint32_t startBinding,
2087 uint32_t bindingCount,
2088 const VkBuffer* pBuffers,
2089 const VkDeviceSize* pOffsets)
2090{
2091 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2092
2093 /* We have to defer setting up vertex buffer since we need the buffer
2094 * stride from the pipeline. */
2095
2096 for (uint32_t i = 0; i < bindingCount; i++) {
2097 cmd_buffer->vb[startBinding + i].buffer = (struct anv_buffer *) pBuffers[i];
2098 cmd_buffer->vb[startBinding + i].offset = pOffsets[i];
2099 cmd_buffer->vb_dirty |= 1 << (startBinding + i);
2100 }
2101}
2102
2103static void
2104flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
2105{
2106 static const uint32_t opcodes[] = {
2107 [VK_SHADER_STAGE_VERTEX] = 38,
2108 [VK_SHADER_STAGE_TESS_CONTROL] = 39,
2109 [VK_SHADER_STAGE_TESS_EVALUATION] = 40,
2110 [VK_SHADER_STAGE_GEOMETRY] = 41,
2111 [VK_SHADER_STAGE_FRAGMENT] = 42,
2112 [VK_SHADER_STAGE_COMPUTE] = 0,
2113 };
2114
2115 struct anv_pipeline_layout *layout = cmd_buffer->pipeline->layout;
2116 struct anv_framebuffer *framebuffer = cmd_buffer->framebuffer;
2117
2118 for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
2119
2120 uint32_t bias = s == VK_SHADER_STAGE_FRAGMENT ? MAX_RTS : 0;
2121 uint32_t count, *table;
2122 struct anv_state table_state;
2123
2124 if (layout)
2125 count = layout->stage[s].count + bias;
2126 else if (s == VK_SHADER_STAGE_FRAGMENT)
2127 count = framebuffer->color_attachment_count;
2128 else
2129 count = 0;
2130
2131 if (count == 0)
2132 continue;
2133
2134 table_state = anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
2135 count * 4, 32);
2136 table = table_state.map;
2137
2138 if (s == VK_SHADER_STAGE_FRAGMENT) {
2139 for (uint32_t i = 0; i < framebuffer->color_attachment_count; i++) {
2140 struct anv_color_attachment_view *view = framebuffer->color_attachments[i];
2141 table[i] = view->surface_state.offset;
2142
2143 /* Don't write the reloc back to the surface state. We do that at
2144 * submit time. Surface address is dwords 8-9. */
2145 anv_reloc_list_add(&cmd_buffer->batch.surf_relocs,
2146 view->surface_state.offset + 8 * sizeof(int32_t),
Kristian Høgsberg099faa12015-05-11 22:19:58 -07002147 view->image->bo, view->image->offset);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002148 }
2149 }
2150
2151 if (layout) {
2152 for (uint32_t i = 0; i < layout->stage[s].count; i++) {
2153 struct anv_pipeline_layout_entry *e = &layout->stage[s].entries[i];
2154 struct anv_image_view *image_view;
2155 struct anv_buffer_view *buffer_view;
2156 void *d = cmd_buffer->descriptor_sets[e->set]->descriptors[e->index];
2157
2158 switch (e->type) {
2159 case VK_DESCRIPTOR_TYPE_SAMPLER:
2160 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2161 break;
2162 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
2163 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2164 image_view = d;
2165 table[bias + i] = image_view->surface_state.offset;
2166 anv_reloc_list_add(&cmd_buffer->batch.surf_relocs,
2167 image_view->surface_state.offset + 8 * sizeof(int32_t),
Kristian Høgsberg099faa12015-05-11 22:19:58 -07002168 image_view->image->bo,
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002169 image_view->image->offset);
2170 break;
2171 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2172 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2173 /* FIXME: What are these? TBOs? */
2174 break;
2175
2176 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2177 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2178 buffer_view = d;
2179 table[bias + i] = buffer_view->surface_state.offset;
2180 anv_reloc_list_add(&cmd_buffer->batch.surf_relocs,
2181 buffer_view->surface_state.offset + 8 * sizeof(int32_t),
Kristian Høgsberg099faa12015-05-11 22:19:58 -07002182 buffer_view->buffer->bo,
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002183 buffer_view->buffer->offset + buffer_view->offset);
2184 break;
2185
2186 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2187 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2188 break;
2189 default:
2190 break;
2191 }
2192 }
2193 }
2194
2195 /* FIXME: Samplers */
2196
2197 /* The binding table pointer commands all have the same structure, only
2198 * the opcode differs.
2199 */
2200 anv_batch_emit(&cmd_buffer->batch,
2201 GEN8_3DSTATE_BINDING_TABLE_POINTERS_VS,
2202 ._3DCommandSubOpcode = opcodes[s],
2203 .PointertoVSBindingTable = table_state.offset);
2204 }
2205}
2206
2207static void
2208anv_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer)
2209{
2210 struct anv_pipeline *pipeline = cmd_buffer->pipeline;
2211 const uint32_t num_buffers = __builtin_popcount(cmd_buffer->vb_dirty);
2212 const uint32_t num_dwords = 1 + num_buffers * 4;
2213 uint32_t *p;
2214
2215 if (cmd_buffer->vb_dirty) {
2216 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
2217 GEN8_3DSTATE_VERTEX_BUFFERS);
2218 uint32_t vb, i = 0;
2219 for_each_bit(vb, cmd_buffer->vb_dirty) {
2220 struct anv_buffer *buffer = cmd_buffer->vb[vb].buffer;
2221 uint32_t offset = cmd_buffer->vb[vb].offset;
2222
2223 struct GEN8_VERTEX_BUFFER_STATE state = {
2224 .VertexBufferIndex = vb,
2225 .MemoryObjectControlState = 0,
2226 .AddressModifyEnable = true,
2227 .BufferPitch = pipeline->binding_stride[vb],
Kristian Høgsberg099faa12015-05-11 22:19:58 -07002228 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002229 .BufferSize = buffer->size - offset
2230 };
2231
2232 GEN8_VERTEX_BUFFER_STATE_pack(&cmd_buffer->batch, &p[1 + i * 4], &state);
2233 i++;
2234 }
2235 }
2236
2237 if (cmd_buffer->dirty & ANV_CMD_BUFFER_PIPELINE_DIRTY)
2238 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
2239
2240 if (cmd_buffer->dirty & ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY)
2241 flush_descriptor_sets(cmd_buffer);
2242
Kristian Høgsberg55b9b702015-05-11 22:23:38 -07002243 if (cmd_buffer->dirty & (ANV_CMD_BUFFER_PIPELINE_DIRTY | ANV_CMD_BUFFER_RS_DIRTY))
2244 anv_batch_emit_merge(&cmd_buffer->batch,
2245 cmd_buffer->rs_state->state_sf, pipeline->state_sf);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002246
2247 cmd_buffer->vb_dirty = 0;
2248 cmd_buffer->dirty = 0;
2249}
2250
2251void VKAPI vkCmdDraw(
2252 VkCmdBuffer cmdBuffer,
2253 uint32_t firstVertex,
2254 uint32_t vertexCount,
2255 uint32_t firstInstance,
2256 uint32_t instanceCount)
2257{
2258 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2259
2260 anv_cmd_buffer_flush_state(cmd_buffer);
2261
2262 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
2263 .VertexAccessType = SEQUENTIAL,
2264 .VertexCountPerInstance = vertexCount,
2265 .StartVertexLocation = firstVertex,
2266 .InstanceCount = instanceCount,
2267 .StartInstanceLocation = firstInstance,
2268 .BaseVertexLocation = 0);
2269}
2270
2271void VKAPI vkCmdDrawIndexed(
2272 VkCmdBuffer cmdBuffer,
2273 uint32_t firstIndex,
2274 uint32_t indexCount,
2275 int32_t vertexOffset,
2276 uint32_t firstInstance,
2277 uint32_t instanceCount)
2278{
2279 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2280
2281 anv_cmd_buffer_flush_state(cmd_buffer);
2282
2283 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
2284 .VertexAccessType = RANDOM,
2285 .VertexCountPerInstance = indexCount,
2286 .StartVertexLocation = firstIndex,
2287 .InstanceCount = instanceCount,
2288 .StartInstanceLocation = firstInstance,
2289 .BaseVertexLocation = 0);
2290}
2291
2292static void
2293anv_batch_lrm(struct anv_batch *batch,
2294 uint32_t reg, struct anv_bo *bo, uint32_t offset)
2295{
2296 anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_MEM,
2297 .RegisterAddress = reg,
2298 .MemoryAddress = { bo, offset });
2299}
2300
2301static void
2302anv_batch_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
2303{
2304 anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_IMM,
2305 .RegisterOffset = reg,
2306 .DataDWord = imm);
2307}
2308
2309/* Auto-Draw / Indirect Registers */
2310#define GEN7_3DPRIM_END_OFFSET 0x2420
2311#define GEN7_3DPRIM_START_VERTEX 0x2430
2312#define GEN7_3DPRIM_VERTEX_COUNT 0x2434
2313#define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
2314#define GEN7_3DPRIM_START_INSTANCE 0x243C
2315#define GEN7_3DPRIM_BASE_VERTEX 0x2440
2316
2317void VKAPI vkCmdDrawIndirect(
2318 VkCmdBuffer cmdBuffer,
2319 VkBuffer _buffer,
2320 VkDeviceSize offset,
2321 uint32_t count,
2322 uint32_t stride)
2323{
2324 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2325 struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
Kristian Høgsberg099faa12015-05-11 22:19:58 -07002326 struct anv_bo *bo = buffer->bo;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002327 uint32_t bo_offset = buffer->offset + offset;
2328
2329 anv_cmd_buffer_flush_state(cmd_buffer);
2330
2331 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
2332 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
2333 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
2334 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
2335 anv_batch_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
2336
2337 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
2338 .IndirectParameterEnable = true,
2339 .VertexAccessType = SEQUENTIAL);
2340}
2341
2342void VKAPI vkCmdDrawIndexedIndirect(
2343 VkCmdBuffer cmdBuffer,
2344 VkBuffer _buffer,
2345 VkDeviceSize offset,
2346 uint32_t count,
2347 uint32_t stride)
2348{
2349 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2350 struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
Kristian Høgsberg099faa12015-05-11 22:19:58 -07002351 struct anv_bo *bo = buffer->bo;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002352 uint32_t bo_offset = buffer->offset + offset;
2353
2354 anv_cmd_buffer_flush_state(cmd_buffer);
2355
2356 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
2357 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
2358 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
2359 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
2360 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
2361
2362 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
2363 .IndirectParameterEnable = true,
2364 .VertexAccessType = RANDOM);
2365}
2366
2367void VKAPI vkCmdDispatch(
2368 VkCmdBuffer cmdBuffer,
2369 uint32_t x,
2370 uint32_t y,
2371 uint32_t z)
2372{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07002373 stub();
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002374}
2375
2376void VKAPI vkCmdDispatchIndirect(
2377 VkCmdBuffer cmdBuffer,
2378 VkBuffer buffer,
2379 VkDeviceSize offset)
2380{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07002381 stub();
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002382}
2383
2384void VKAPI vkCmdSetEvent(
2385 VkCmdBuffer cmdBuffer,
2386 VkEvent event,
2387 VkPipeEvent pipeEvent)
2388{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07002389 stub();
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002390}
2391
2392void VKAPI vkCmdResetEvent(
2393 VkCmdBuffer cmdBuffer,
2394 VkEvent event,
2395 VkPipeEvent pipeEvent)
2396{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07002397 stub();
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002398}
2399
2400void VKAPI vkCmdWaitEvents(
2401 VkCmdBuffer cmdBuffer,
2402 VkWaitEvent waitEvent,
2403 uint32_t eventCount,
2404 const VkEvent* pEvents,
2405 uint32_t memBarrierCount,
2406 const void** ppMemBarriers)
2407{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07002408 stub();
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002409}
2410
2411void VKAPI vkCmdPipelineBarrier(
2412 VkCmdBuffer cmdBuffer,
2413 VkWaitEvent waitEvent,
2414 uint32_t pipeEventCount,
2415 const VkPipeEvent* pPipeEvents,
2416 uint32_t memBarrierCount,
2417 const void** ppMemBarriers)
2418{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07002419 stub();
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002420}
2421
2422static void
2423anv_batch_emit_ps_depth_count(struct anv_batch *batch,
2424 struct anv_bo *bo, uint32_t offset)
2425{
2426 anv_batch_emit(batch, GEN8_PIPE_CONTROL,
2427 .DestinationAddressType = DAT_PPGTT,
2428 .PostSyncOperation = WritePSDepthCount,
2429 .Address = { bo, offset }); /* FIXME: This is only lower 32 bits */
2430}
2431
2432void VKAPI vkCmdBeginQuery(
2433 VkCmdBuffer cmdBuffer,
2434 VkQueryPool queryPool,
2435 uint32_t slot,
2436 VkQueryControlFlags flags)
2437{
2438 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2439 struct anv_query_pool *pool = (struct anv_query_pool *) queryPool;
2440
2441 switch (pool->type) {
2442 case VK_QUERY_TYPE_OCCLUSION:
2443 anv_batch_emit_ps_depth_count(&cmd_buffer->batch, &pool->bo, slot * 16);
2444 break;
2445
2446 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
2447 break;
2448
2449 default:
2450 break;
2451 }
2452}
2453
2454void VKAPI vkCmdEndQuery(
2455 VkCmdBuffer cmdBuffer,
2456 VkQueryPool queryPool,
2457 uint32_t slot)
2458{
2459 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2460 struct anv_query_pool *pool = (struct anv_query_pool *) queryPool;
2461
2462 switch (pool->type) {
2463 case VK_QUERY_TYPE_OCCLUSION:
2464 anv_batch_emit_ps_depth_count(&cmd_buffer->batch, &pool->bo, slot * 16 + 8);
2465 break;
2466
2467 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
2468 break;
2469
2470 default:
2471 break;
2472 }
2473}
2474
2475void VKAPI vkCmdResetQueryPool(
2476 VkCmdBuffer cmdBuffer,
2477 VkQueryPool queryPool,
2478 uint32_t startQuery,
2479 uint32_t queryCount)
2480{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07002481 stub();
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002482}
2483
2484#define TIMESTAMP 0x44070
2485
2486void VKAPI vkCmdWriteTimestamp(
2487 VkCmdBuffer cmdBuffer,
2488 VkTimestampType timestampType,
2489 VkBuffer destBuffer,
2490 VkDeviceSize destOffset)
2491{
2492 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2493 struct anv_buffer *buffer = (struct anv_buffer *) destBuffer;
Kristian Høgsberg099faa12015-05-11 22:19:58 -07002494 struct anv_bo *bo = buffer->bo;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002495
2496 switch (timestampType) {
2497 case VK_TIMESTAMP_TYPE_TOP:
2498 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_STORE_REGISTER_MEM,
2499 .RegisterAddress = TIMESTAMP,
2500 .MemoryAddress = { bo, buffer->offset + destOffset });
2501 break;
2502
2503 case VK_TIMESTAMP_TYPE_BOTTOM:
2504 anv_batch_emit(&cmd_buffer->batch, GEN8_PIPE_CONTROL,
2505 .DestinationAddressType = DAT_PPGTT,
2506 .PostSyncOperation = WriteTimestamp,
2507 .Address = /* FIXME: This is only lower 32 bits */
2508 { bo, buffer->offset + destOffset });
2509 break;
2510
2511 default:
2512 break;
2513 }
2514}
2515
2516void VKAPI vkCmdCopyQueryPoolResults(
2517 VkCmdBuffer cmdBuffer,
2518 VkQueryPool queryPool,
2519 uint32_t startQuery,
2520 uint32_t queryCount,
2521 VkBuffer destBuffer,
2522 VkDeviceSize destOffset,
2523 VkDeviceSize destStride,
2524 VkQueryResultFlags flags)
2525{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07002526 stub();
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002527}
2528
2529void VKAPI vkCmdInitAtomicCounters(
2530 VkCmdBuffer cmdBuffer,
2531 VkPipelineBindPoint pipelineBindPoint,
2532 uint32_t startCounter,
2533 uint32_t counterCount,
2534 const uint32_t* pData)
2535{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07002536 stub();
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002537}
2538
2539void VKAPI vkCmdLoadAtomicCounters(
2540 VkCmdBuffer cmdBuffer,
2541 VkPipelineBindPoint pipelineBindPoint,
2542 uint32_t startCounter,
2543 uint32_t counterCount,
2544 VkBuffer srcBuffer,
2545 VkDeviceSize srcOffset)
2546{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07002547 stub();
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002548}
2549
2550void VKAPI vkCmdSaveAtomicCounters(
2551 VkCmdBuffer cmdBuffer,
2552 VkPipelineBindPoint pipelineBindPoint,
2553 uint32_t startCounter,
2554 uint32_t counterCount,
2555 VkBuffer destBuffer,
2556 VkDeviceSize destOffset)
2557{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07002558 stub();
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002559}
2560
2561VkResult VKAPI vkCreateFramebuffer(
2562 VkDevice _device,
2563 const VkFramebufferCreateInfo* pCreateInfo,
2564 VkFramebuffer* pFramebuffer)
2565{
2566 struct anv_device *device = (struct anv_device *) _device;
2567 struct anv_framebuffer *framebuffer;
2568
2569 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2570
2571 framebuffer = anv_device_alloc(device, sizeof(*framebuffer), 8,
2572 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2573 if (framebuffer == NULL)
2574 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2575
2576 framebuffer->color_attachment_count = pCreateInfo->colorAttachmentCount;
2577 for (uint32_t i = 0; i < pCreateInfo->colorAttachmentCount; i++) {
2578 framebuffer->color_attachments[i] =
2579 (struct anv_color_attachment_view *) pCreateInfo->pColorAttachments[i].view;
2580 }
2581
2582 if (pCreateInfo->pDepthStencilAttachment) {
2583 framebuffer->depth_stencil =
2584 (struct anv_depth_stencil_view *) pCreateInfo->pDepthStencilAttachment->view;
2585 }
2586
2587 framebuffer->sample_count = pCreateInfo->sampleCount;
2588 framebuffer->width = pCreateInfo->width;
2589 framebuffer->height = pCreateInfo->height;
2590 framebuffer->layers = pCreateInfo->layers;
2591
Kristian Høgsbergd77c34d2015-05-11 23:25:06 -07002592 vkCreateDynamicViewportState((VkDevice) device,
2593 &(VkDynamicVpStateCreateInfo) {
2594 .sType = VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO,
2595 .viewportAndScissorCount = 2,
2596 .pViewports = (VkViewport[]) {
2597 {
2598 .originX = 0,
2599 .originY = 0,
2600 .width = pCreateInfo->width,
2601 .height = pCreateInfo->height,
2602 .minDepth = 0,
2603 .maxDepth = 1
2604 },
2605 },
2606 .pScissors = (VkRect[]) {
2607 { { 0, 0 },
2608 { pCreateInfo->width, pCreateInfo->height } },
2609 }
2610 },
2611 &framebuffer->vp_state);
2612
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002613 *pFramebuffer = (VkFramebuffer) framebuffer;
2614
2615 return VK_SUCCESS;
2616}
2617
2618VkResult VKAPI vkCreateRenderPass(
2619 VkDevice _device,
2620 const VkRenderPassCreateInfo* pCreateInfo,
2621 VkRenderPass* pRenderPass)
2622{
2623 struct anv_device *device = (struct anv_device *) _device;
2624 struct anv_render_pass *pass;
Kristian Høgsbergd77c34d2015-05-11 23:25:06 -07002625 size_t size;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002626
2627 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
2628
Kristian Høgsbergd77c34d2015-05-11 23:25:06 -07002629 size = sizeof(*pass) +
2630 pCreateInfo->layers * sizeof(struct anv_render_pass_layer);
2631 pass = anv_device_alloc(device, size, 8,
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002632 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2633 if (pass == NULL)
2634 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2635
2636 pass->render_area = pCreateInfo->renderArea;
2637
Kristian Høgsbergd77c34d2015-05-11 23:25:06 -07002638 pass->num_layers = pCreateInfo->layers;
2639
2640 pass->num_clear_layers = 0;
2641 for (uint32_t i = 0; i < pCreateInfo->layers; i++) {
2642 pass->layers[i].color_load_op = pCreateInfo->pColorLoadOps[i];
2643 pass->layers[i].clear_color = pCreateInfo->pColorLoadClearValues[i];
2644 if (pass->layers[i].color_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
2645 pass->num_clear_layers++;
2646 }
2647
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002648 *pRenderPass = (VkRenderPass) pass;
2649
2650 return VK_SUCCESS;
2651}
2652
2653void VKAPI vkCmdBeginRenderPass(
2654 VkCmdBuffer cmdBuffer,
2655 const VkRenderPassBegin* pRenderPassBegin)
2656{
2657 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2658 struct anv_render_pass *pass = (struct anv_render_pass *) pRenderPassBegin->renderPass;
2659
2660 cmd_buffer->framebuffer = (struct anv_framebuffer *) pRenderPassBegin->framebuffer;
2661 cmd_buffer->dirty |= ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY;
2662
2663 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DRAWING_RECTANGLE,
2664 .ClippedDrawingRectangleYMin = pass->render_area.offset.y,
2665 .ClippedDrawingRectangleXMin = pass->render_area.offset.x,
2666 .ClippedDrawingRectangleYMax =
2667 pass->render_area.offset.y + pass->render_area.extent.height - 1,
2668 .ClippedDrawingRectangleXMax =
2669 pass->render_area.offset.x + pass->render_area.extent.width - 1,
2670 .DrawingRectangleOriginY = 0,
2671 .DrawingRectangleOriginX = 0);
Kristian Høgsbergd77c34d2015-05-11 23:25:06 -07002672
2673 anv_cmd_buffer_clear(cmd_buffer, pass);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002674}
2675
2676void VKAPI vkCmdEndRenderPass(
2677 VkCmdBuffer cmdBuffer,
2678 VkRenderPass renderPass)
2679{
Jason Ekstrandffe9f602015-05-12 13:44:43 -07002680 stub();
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002681}