blob: 3c245cd036bc61caca47a1209986d943f63a7eb6 [file] [log] [blame]
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include "private.h"
static int
anv_env_get_int(const char *name)
{
const char *val = getenv(name);
if (!val)
return 0;
return strtol(val, NULL, 0);
}
static VkResult
fill_physical_device(struct anv_physical_device *device,
struct anv_instance *instance,
const char *path)
{
int fd;
fd = open("/dev/dri/renderD128", O_RDWR | O_CLOEXEC);
if (fd < 0)
return vk_error(VK_ERROR_UNAVAILABLE);
device->instance = instance;
device->path = path;
device->chipset_id = anv_env_get_int("INTEL_DEVID_OVERRIDE");
device->no_hw = false;
if (device->chipset_id) {
/* INTEL_DEVID_OVERRIDE implies INTEL_NO_HW. */
device->no_hw = true;
} else {
device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
}
if (!device->chipset_id)
goto fail;
device->name = brw_get_device_name(device->chipset_id);
device->info = brw_get_device_info(device->chipset_id, -1);
if (!device->info)
goto fail;
if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT))
goto fail;
if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2))
goto fail;
if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC))
goto fail;
if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CONSTANTS))
goto fail;
close(fd);
return VK_SUCCESS;
fail:
close(fd);
return vk_error(VK_ERROR_UNAVAILABLE);
}
static void *default_alloc(
void* pUserData,
size_t size,
size_t alignment,
VkSystemAllocType allocType)
{
return malloc(size);
}
static void default_free(
void* pUserData,
void* pMem)
{
free(pMem);
}
static const VkAllocCallbacks default_alloc_callbacks = {
.pUserData = NULL,
.pfnAlloc = default_alloc,
.pfnFree = default_free
};
VkResult anv_CreateInstance(
const VkInstanceCreateInfo* pCreateInfo,
VkInstance* pInstance)
{
struct anv_instance *instance;
const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
void *user_data = NULL;
VkResult result;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
if (pCreateInfo->pAllocCb) {
alloc_callbacks = pCreateInfo->pAllocCb;
user_data = pCreateInfo->pAllocCb->pUserData;
}
instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (!instance)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
instance->pAllocUserData = alloc_callbacks->pUserData;
instance->pfnAlloc = alloc_callbacks->pfnAlloc;
instance->pfnFree = alloc_callbacks->pfnFree;
instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
instance->physicalDeviceCount = 0;
result = fill_physical_device(&instance->physicalDevice,
instance, "/dev/dri/renderD128");
if (result != VK_SUCCESS)
return result;
instance->physicalDeviceCount++;
*pInstance = (VkInstance) instance;
return VK_SUCCESS;
}
VkResult anv_DestroyInstance(
VkInstance _instance)
{
struct anv_instance *instance = (struct anv_instance *) _instance;
instance->pfnFree(instance->pAllocUserData, instance);
return VK_SUCCESS;
}
VkResult anv_EnumeratePhysicalDevices(
VkInstance _instance,
uint32_t* pPhysicalDeviceCount,
VkPhysicalDevice* pPhysicalDevices)
{
struct anv_instance *instance = (struct anv_instance *) _instance;
if (*pPhysicalDeviceCount >= 1)
pPhysicalDevices[0] = (VkPhysicalDevice) &instance->physicalDevice;
*pPhysicalDeviceCount = instance->physicalDeviceCount;
return VK_SUCCESS;
}
VkResult anv_GetPhysicalDeviceInfo(
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceInfoType infoType,
size_t* pDataSize,
void* pData)
{
struct anv_physical_device *device = (struct anv_physical_device *) physicalDevice;
VkPhysicalDeviceProperties *properties;
VkPhysicalDevicePerformance *performance;
VkPhysicalDeviceQueueProperties *queue_properties;
VkPhysicalDeviceMemoryProperties *memory_properties;
uint64_t ns_per_tick = 80;
switch (infoType) {
case VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES:
properties = pData;
*pDataSize = sizeof(*properties);
if (pData == NULL)
return VK_SUCCESS;
properties->apiVersion = 1;
properties->driverVersion = 1;
properties->vendorId = 0x8086;
properties->deviceId = device->chipset_id;
properties->deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
strcpy(properties->deviceName, device->name);
properties->maxInlineMemoryUpdateSize = 0;
properties->maxBoundDescriptorSets = MAX_SETS;
properties->maxThreadGroupSize = 512;
properties->timestampFrequency = 1000 * 1000 * 1000 / ns_per_tick;
properties->multiColorAttachmentClears = true;
properties->maxDescriptorSets = 8;
properties->maxViewports = 16;
properties->maxColorAttachments = 8;
return VK_SUCCESS;
case VK_PHYSICAL_DEVICE_INFO_TYPE_PERFORMANCE:
performance = pData;
*pDataSize = sizeof(*performance);
if (pData == NULL)
return VK_SUCCESS;
performance->maxDeviceClock = 1.0;
performance->aluPerClock = 1.0;
performance->texPerClock = 1.0;
performance->primsPerClock = 1.0;
performance->pixelsPerClock = 1.0;
return VK_SUCCESS;
case VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES:
queue_properties = pData;
*pDataSize = sizeof(*queue_properties);
if (pData == NULL)
return VK_SUCCESS;
queue_properties->queueFlags = 0;
queue_properties->queueCount = 1;
queue_properties->maxAtomicCounters = 0;
queue_properties->supportsTimestamps = true;
queue_properties->maxMemReferences = 256;
return VK_SUCCESS;
case VK_PHYSICAL_DEVICE_INFO_TYPE_MEMORY_PROPERTIES:
memory_properties = pData;
*pDataSize = sizeof(*memory_properties);
if (pData == NULL)
return VK_SUCCESS;
memory_properties->supportsMigration = false;
memory_properties->supportsPinning = false;
return VK_SUCCESS;
default:
return VK_UNSUPPORTED;
}
}
void * vkGetProcAddr(
VkPhysicalDevice physicalDevice,
const char* pName)
{
return anv_lookup_entrypoint(pName);
}
static void
parse_debug_flags(struct anv_device *device)
{
const char *debug, *p, *end;
debug = getenv("INTEL_DEBUG");
device->dump_aub = false;
if (debug) {
for (p = debug; *p; p = end + 1) {
end = strchrnul(p, ',');
if (end - p == 3 && memcmp(p, "aub", 3) == 0)
device->dump_aub = true;
if (end - p == 5 && memcmp(p, "no_hw", 5) == 0)
device->no_hw = true;
if (*end == '\0')
break;
}
}
}
VkResult anv_CreateDevice(
VkPhysicalDevice _physicalDevice,
const VkDeviceCreateInfo* pCreateInfo,
VkDevice* pDevice)
{
struct anv_physical_device *physicalDevice =
(struct anv_physical_device *) _physicalDevice;
struct anv_instance *instance = physicalDevice->instance;
struct anv_device *device;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
device = instance->pfnAlloc(instance->pAllocUserData,
sizeof(*device), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (!device)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
device->no_hw = physicalDevice->no_hw;
parse_debug_flags(device);
device->instance = physicalDevice->instance;
device->fd = open("/dev/dri/renderD128", O_RDWR | O_CLOEXEC);
if (device->fd == -1)
goto fail_device;
device->context_id = anv_gem_create_context(device);
if (device->context_id == -1)
goto fail_fd;
anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
anv_state_pool_init(&device->dynamic_state_pool,
&device->dynamic_state_block_pool);
anv_block_pool_init(&device->instruction_block_pool, device, 2048);
anv_block_pool_init(&device->surface_state_block_pool, device, 2048);
/* Binding table pointers are only 16 bits so we have to make sure that
* they get allocated at the beginning of the surface state BO. To
* handle this, we create a separate block pool that works out of the
* first 64 KB of the surface state BO.
*/
anv_block_pool_init_slave(&device->binding_table_block_pool,
&device->surface_state_block_pool, 32);
anv_state_pool_init(&device->surface_state_pool,
&device->surface_state_block_pool);
device->compiler = anv_compiler_create(device->fd);
device->aub_writer = NULL;
device->info = *physicalDevice->info;
pthread_mutex_init(&device->mutex, NULL);
anv_device_init_meta(device);
*pDevice = (VkDevice) device;
return VK_SUCCESS;
fail_fd:
close(device->fd);
fail_device:
anv_device_free(device, device);
return vk_error(VK_ERROR_UNAVAILABLE);
}
VkResult anv_DestroyDevice(
VkDevice _device)
{
struct anv_device *device = (struct anv_device *) _device;
anv_compiler_destroy(device->compiler);
anv_block_pool_finish(&device->dynamic_state_block_pool);
anv_block_pool_finish(&device->instruction_block_pool);
anv_block_pool_finish(&device->surface_state_block_pool);
close(device->fd);
if (device->aub_writer)
anv_aub_writer_destroy(device->aub_writer);
anv_device_free(device, device);
return VK_SUCCESS;
}
VkResult anv_GetGlobalExtensionInfo(
VkExtensionInfoType infoType,
uint32_t extensionIndex,
size_t* pDataSize,
void* pData)
{
uint32_t *count;
switch (infoType) {
case VK_EXTENSION_INFO_TYPE_COUNT:
count = pData;
assert(*pDataSize == 4);
*count = 0;
return VK_SUCCESS;
case VK_EXTENSION_INFO_TYPE_PROPERTIES:
return vk_error(VK_ERROR_INVALID_EXTENSION);
default:
return VK_UNSUPPORTED;
}
}
VkResult anv_GetPhysicalDeviceExtensionInfo(
VkPhysicalDevice physicalDevice,
VkExtensionInfoType infoType,
uint32_t extensionIndex,
size_t* pDataSize,
void* pData)
{
uint32_t *count;
switch (infoType) {
case VK_EXTENSION_INFO_TYPE_COUNT:
*pDataSize = 4;
if (pData == NULL)
return VK_SUCCESS;
count = pData;
*count = 0;
return VK_SUCCESS;
case VK_EXTENSION_INFO_TYPE_PROPERTIES:
return vk_error(VK_ERROR_INVALID_EXTENSION);
default:
return VK_UNSUPPORTED;
}
}
VkResult anv_EnumerateLayers(
VkPhysicalDevice physicalDevice,
size_t maxStringSize,
size_t* pLayerCount,
char* const* pOutLayers,
void* pReserved)
{
*pLayerCount = 0;
return VK_SUCCESS;
}
VkResult anv_GetDeviceQueue(
VkDevice _device,
uint32_t queueNodeIndex,
uint32_t queueIndex,
VkQueue* pQueue)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_queue *queue;
/* FIXME: Should allocate these at device create time. */
queue = anv_device_alloc(device, sizeof(*queue), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (queue == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
queue->device = device;
queue->pool = &device->surface_state_pool;
queue->completed_serial = anv_state_pool_alloc(queue->pool, 4, 4);
*(uint32_t *)queue->completed_serial.map = 0;
queue->next_serial = 1;
*pQueue = (VkQueue) queue;
return VK_SUCCESS;
}
static const uint32_t BATCH_SIZE = 8192;
VkResult
anv_batch_init(struct anv_batch *batch, struct anv_device *device)
{
VkResult result;
result = anv_bo_init_new(&batch->bo, device, BATCH_SIZE);
if (result != VK_SUCCESS)
return result;
batch->bo.map =
anv_gem_mmap(device, batch->bo.gem_handle, 0, BATCH_SIZE);
if (batch->bo.map == NULL) {
anv_gem_close(device, batch->bo.gem_handle);
return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
}
batch->cmd_relocs.num_relocs = 0;
batch->surf_relocs.num_relocs = 0;
batch->next = batch->bo.map;
return VK_SUCCESS;
}
void
anv_batch_finish(struct anv_batch *batch, struct anv_device *device)
{
anv_gem_munmap(batch->bo.map, BATCH_SIZE);
anv_gem_close(device, batch->bo.gem_handle);
}
void
anv_batch_reset(struct anv_batch *batch)
{
batch->next = batch->bo.map;
batch->cmd_relocs.num_relocs = 0;
batch->surf_relocs.num_relocs = 0;
}
void *
anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
{
void *p = batch->next;
batch->next += num_dwords * 4;
return p;
}
static void
anv_reloc_list_append(struct anv_reloc_list *list,
struct anv_reloc_list *other, uint32_t offset)
{
uint32_t i, count;
count = list->num_relocs;
memcpy(&list->relocs[count], &other->relocs[0],
other->num_relocs * sizeof(other->relocs[0]));
memcpy(&list->reloc_bos[count], &other->reloc_bos[0],
other->num_relocs * sizeof(other->reloc_bos[0]));
for (i = 0; i < other->num_relocs; i++)
list->relocs[i + count].offset += offset;
count += other->num_relocs;
}
static uint64_t
anv_reloc_list_add(struct anv_reloc_list *list,
uint32_t offset,
struct anv_bo *target_bo, uint32_t delta)
{
struct drm_i915_gem_relocation_entry *entry;
int index;
assert(list->num_relocs < ANV_BATCH_MAX_RELOCS);
/* XXX: Can we use I915_EXEC_HANDLE_LUT? */
index = list->num_relocs++;
list->reloc_bos[index] = target_bo;
entry = &list->relocs[index];
entry->target_handle = target_bo->gem_handle;
entry->delta = delta;
entry->offset = offset;
entry->presumed_offset = target_bo->offset;
entry->read_domains = 0;
entry->write_domain = 0;
return target_bo->offset + delta;
}
void
anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
{
uint32_t size, offset;
size = other->next - other->bo.map;
memcpy(batch->next, other->bo.map, size);
offset = batch->next - batch->bo.map;
anv_reloc_list_append(&batch->cmd_relocs, &other->cmd_relocs, offset);
anv_reloc_list_append(&batch->surf_relocs, &other->surf_relocs, offset);
batch->next += size;
}
uint64_t
anv_batch_emit_reloc(struct anv_batch *batch,
void *location, struct anv_bo *bo, uint32_t delta)
{
return anv_reloc_list_add(&batch->cmd_relocs,
location - batch->bo.map, bo, delta);
}
VkResult anv_QueueSubmit(
VkQueue _queue,
uint32_t cmdBufferCount,
const VkCmdBuffer* pCmdBuffers,
VkFence _fence)
{
struct anv_queue *queue = (struct anv_queue *) _queue;
struct anv_device *device = queue->device;
struct anv_fence *fence = (struct anv_fence *) _fence;
int ret;
for (uint32_t i = 0; i < cmdBufferCount; i++) {
struct anv_cmd_buffer *cmd_buffer =
(struct anv_cmd_buffer *) pCmdBuffers[i];
if (device->dump_aub)
anv_cmd_buffer_dump(cmd_buffer);
if (!device->no_hw) {
ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf);
if (ret != 0)
return vk_error(VK_ERROR_UNKNOWN);
if (fence) {
ret = anv_gem_execbuffer(device, &fence->execbuf);
if (ret != 0)
return vk_error(VK_ERROR_UNKNOWN);
}
for (uint32_t i = 0; i < cmd_buffer->bo_count; i++)
cmd_buffer->exec2_bos[i]->offset = cmd_buffer->exec2_objects[i].offset;
} else {
*(uint32_t *)queue->completed_serial.map = cmd_buffer->serial;
}
}
return VK_SUCCESS;
}
VkResult anv_QueueAddMemReferences(
VkQueue queue,
uint32_t count,
const VkDeviceMemory* pMems)
{
return VK_SUCCESS;
}
VkResult anv_QueueRemoveMemReferences(
VkQueue queue,
uint32_t count,
const VkDeviceMemory* pMems)
{
return VK_SUCCESS;
}
VkResult anv_QueueWaitIdle(
VkQueue _queue)
{
struct anv_queue *queue = (struct anv_queue *) _queue;
return vkDeviceWaitIdle((VkDevice) queue->device);
}
VkResult anv_DeviceWaitIdle(
VkDevice _device)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_state state;
struct anv_batch batch;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 exec2_objects[1];
struct anv_bo *bo = NULL;
VkResult result;
int64_t timeout;
int ret;
state = anv_state_pool_alloc(&device->dynamic_state_pool, 32, 32);
bo = &device->dynamic_state_pool.block_pool->bo;
batch.next = state.map;
anv_batch_emit(&batch, GEN8_MI_BATCH_BUFFER_END);
anv_batch_emit(&batch, GEN8_MI_NOOP);
exec2_objects[0].handle = bo->gem_handle;
exec2_objects[0].relocation_count = 0;
exec2_objects[0].relocs_ptr = 0;
exec2_objects[0].alignment = 0;
exec2_objects[0].offset = bo->offset;
exec2_objects[0].flags = 0;
exec2_objects[0].rsvd1 = 0;
exec2_objects[0].rsvd2 = 0;
execbuf.buffers_ptr = (uintptr_t) exec2_objects;
execbuf.buffer_count = 1;
execbuf.batch_start_offset = state.offset;
execbuf.batch_len = batch.next - state.map;
execbuf.cliprects_ptr = 0;
execbuf.num_cliprects = 0;
execbuf.DR1 = 0;
execbuf.DR4 = 0;
execbuf.flags =
I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
execbuf.rsvd1 = device->context_id;
execbuf.rsvd2 = 0;
if (!device->no_hw) {
ret = anv_gem_execbuffer(device, &execbuf);
if (ret != 0) {
result = vk_error(VK_ERROR_UNKNOWN);
goto fail;
}
timeout = INT64_MAX;
ret = anv_gem_wait(device, bo->gem_handle, &timeout);
if (ret != 0) {
result = vk_error(VK_ERROR_UNKNOWN);
goto fail;
}
}
anv_state_pool_free(&device->dynamic_state_pool, state);
return VK_SUCCESS;
fail:
anv_state_pool_free(&device->dynamic_state_pool, state);
return result;
}
void *
anv_device_alloc(struct anv_device * device,
size_t size,
size_t alignment,
VkSystemAllocType allocType)
{
return device->instance->pfnAlloc(device->instance->pAllocUserData,
size,
alignment,
allocType);
}
void
anv_device_free(struct anv_device * device,
void * mem)
{
return device->instance->pfnFree(device->instance->pAllocUserData,
mem);
}
VkResult
anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
{
bo->gem_handle = anv_gem_create(device, size);
if (!bo->gem_handle)
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
bo->map = NULL;
bo->index = 0;
bo->offset = 0;
bo->size = size;
return VK_SUCCESS;
}
VkResult anv_AllocMemory(
VkDevice _device,
const VkMemoryAllocInfo* pAllocInfo,
VkDeviceMemory* pMem)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_device_memory *mem;
VkResult result;
assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
mem = anv_device_alloc(device, sizeof(*mem), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (mem == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
if (result != VK_SUCCESS)
goto fail;
*pMem = (VkDeviceMemory) mem;
return VK_SUCCESS;
fail:
anv_device_free(device, mem);
return result;
}
VkResult anv_FreeMemory(
VkDevice _device,
VkDeviceMemory _mem)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
if (mem->bo.map)
anv_gem_munmap(mem->bo.map, mem->bo.size);
if (mem->bo.gem_handle != 0)
anv_gem_close(device, mem->bo.gem_handle);
anv_device_free(device, mem);
return VK_SUCCESS;
}
VkResult anv_SetMemoryPriority(
VkDevice device,
VkDeviceMemory mem,
VkMemoryPriority priority)
{
return VK_SUCCESS;
}
VkResult anv_MapMemory(
VkDevice _device,
VkDeviceMemory _mem,
VkDeviceSize offset,
VkDeviceSize size,
VkMemoryMapFlags flags,
void** ppData)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
/* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
* takes a VkDeviceMemory pointer, it seems like only one map of the memory
* at a time is valid. We could just mmap up front and return an offset
* pointer here, but that may exhaust virtual memory on 32 bit
* userspace. */
mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
mem->map_size = size;
*ppData = mem->map;
return VK_SUCCESS;
}
VkResult anv_UnmapMemory(
VkDevice _device,
VkDeviceMemory _mem)
{
struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
anv_gem_munmap(mem->map, mem->map_size);
return VK_SUCCESS;
}
VkResult anv_FlushMappedMemory(
VkDevice device,
VkDeviceMemory mem,
VkDeviceSize offset,
VkDeviceSize size)
{
/* clflush here for !llc platforms */
return VK_SUCCESS;
}
VkResult anv_PinSystemMemory(
VkDevice device,
const void* pSysMem,
size_t memSize,
VkDeviceMemory* pMem)
{
return VK_SUCCESS;
}
VkResult anv_GetMultiDeviceCompatibility(
VkPhysicalDevice physicalDevice0,
VkPhysicalDevice physicalDevice1,
VkPhysicalDeviceCompatibilityInfo* pInfo)
{
return VK_UNSUPPORTED;
}
VkResult anv_OpenSharedMemory(
VkDevice device,
const VkMemoryOpenInfo* pOpenInfo,
VkDeviceMemory* pMem)
{
return VK_UNSUPPORTED;
}
VkResult anv_OpenSharedSemaphore(
VkDevice device,
const VkSemaphoreOpenInfo* pOpenInfo,
VkSemaphore* pSemaphore)
{
return VK_UNSUPPORTED;
}
VkResult anv_OpenPeerMemory(
VkDevice device,
const VkPeerMemoryOpenInfo* pOpenInfo,
VkDeviceMemory* pMem)
{
return VK_UNSUPPORTED;
}
VkResult anv_OpenPeerImage(
VkDevice device,
const VkPeerImageOpenInfo* pOpenInfo,
VkImage* pImage,
VkDeviceMemory* pMem)
{
return VK_UNSUPPORTED;
}
static VkResult
anv_instance_destructor(struct anv_device * device,
VkObject object)
{
return vkDestroyInstance(object);
}
static VkResult
anv_noop_destructor(struct anv_device * device,
VkObject object)
{
return VK_SUCCESS;
}
static VkResult
anv_device_destructor(struct anv_device * device,
VkObject object)
{
return vkDestroyDevice(object);
}
static VkResult
anv_cmd_buffer_destructor(struct anv_device * device,
VkObject object)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) object;
anv_state_stream_finish(&cmd_buffer->surface_state_stream);
anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
anv_state_stream_finish(&cmd_buffer->binding_table_state_stream);
anv_batch_finish(&cmd_buffer->batch, device);
anv_device_free(device, cmd_buffer->exec2_objects);
anv_device_free(device, cmd_buffer->exec2_bos);
anv_device_free(device, cmd_buffer);
return VK_SUCCESS;
}
static VkResult
anv_pipeline_destructor(struct anv_device * device,
VkObject object)
{
struct anv_pipeline *pipeline = (struct anv_pipeline *) object;
return anv_pipeline_destroy(pipeline);
}
static VkResult
anv_free_destructor(struct anv_device * device,
VkObject object)
{
anv_device_free(device, (void *) object);
return VK_SUCCESS;
}
static VkResult
anv_fence_destructor(struct anv_device * device,
VkObject object)
{
struct anv_fence *fence = (struct anv_fence *) object;
anv_gem_munmap(fence->bo.map, fence->bo.size);
anv_gem_close(device, fence->bo.gem_handle);
anv_device_free(device, fence);
return VK_SUCCESS;
}
static VkResult
anv_query_pool_destructor(struct anv_device * device,
VkObject object)
{
struct anv_query_pool *pool = (struct anv_query_pool *) object;
anv_gem_munmap(pool->bo.map, pool->bo.size);
anv_gem_close(device, pool->bo.gem_handle);
anv_device_free(device, pool);
return VK_SUCCESS;
}
static VkResult (*anv_object_destructors[])(struct anv_device *device,
VkObject object) = {
[VK_OBJECT_TYPE_INSTANCE] = anv_instance_destructor,
[VK_OBJECT_TYPE_PHYSICAL_DEVICE] = anv_noop_destructor,
[VK_OBJECT_TYPE_DEVICE] = anv_device_destructor,
[VK_OBJECT_TYPE_QUEUE] = anv_noop_destructor,
[VK_OBJECT_TYPE_COMMAND_BUFFER] = anv_cmd_buffer_destructor,
[VK_OBJECT_TYPE_PIPELINE] = anv_pipeline_destructor,
[VK_OBJECT_TYPE_SHADER] = anv_free_destructor,
[VK_OBJECT_TYPE_BUFFER] = anv_free_destructor,
[VK_OBJECT_TYPE_IMAGE] = anv_free_destructor,
[VK_OBJECT_TYPE_RENDER_PASS] = anv_free_destructor,
[VK_OBJECT_TYPE_FENCE] = anv_fence_destructor,
[VK_OBJECT_TYPE_QUERY_POOL] = anv_query_pool_destructor
};
VkResult anv_DestroyObject(
VkDevice _device,
VkObjectType objType,
VkObject object)
{
struct anv_device *device = (struct anv_device *) _device;
assert(objType < ARRAY_SIZE(anv_object_destructors) &&
anv_object_destructors[objType] != NULL);
return anv_object_destructors[objType](device, object);
}
static void
fill_memory_requirements(
VkObjectType objType,
VkObject object,
VkMemoryRequirements * memory_requirements)
{
struct anv_buffer *buffer;
struct anv_image *image;
memory_requirements->memPropsAllowed =
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_DEVICE_COHERENT_BIT |
/* VK_MEMORY_PROPERTY_HOST_UNCACHED_BIT | */
VK_MEMORY_PROPERTY_HOST_WRITE_COMBINED_BIT |
VK_MEMORY_PROPERTY_PREFER_HOST_LOCAL |
VK_MEMORY_PROPERTY_SHAREABLE_BIT;
memory_requirements->memPropsRequired = 0;
switch (objType) {
case VK_OBJECT_TYPE_BUFFER:
buffer = (struct anv_buffer *) object;
memory_requirements->size = buffer->size;
memory_requirements->alignment = 16;
break;
case VK_OBJECT_TYPE_IMAGE:
image = (struct anv_image *) object;
memory_requirements->size = image->size;
memory_requirements->alignment = image->alignment;
break;
default:
memory_requirements->size = 0;
break;
}
}
static uint32_t
get_allocation_count(VkObjectType objType)
{
switch (objType) {
case VK_OBJECT_TYPE_BUFFER:
case VK_OBJECT_TYPE_IMAGE:
return 1;
default:
return 0;
}
}
VkResult anv_GetObjectInfo(
VkDevice _device,
VkObjectType objType,
VkObject object,
VkObjectInfoType infoType,
size_t* pDataSize,
void* pData)
{
VkMemoryRequirements memory_requirements;
uint32_t *count;
switch (infoType) {
case VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS:
*pDataSize = sizeof(memory_requirements);
if (pData == NULL)
return VK_SUCCESS;
fill_memory_requirements(objType, object, pData);
return VK_SUCCESS;
case VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT:
*pDataSize = sizeof(count);
if (pData == NULL)
return VK_SUCCESS;
count = pData;
*count = get_allocation_count(objType);
return VK_SUCCESS;
default:
return VK_UNSUPPORTED;
}
}
VkResult anv_QueueBindObjectMemory(
VkQueue queue,
VkObjectType objType,
VkObject object,
uint32_t allocationIdx,
VkDeviceMemory _mem,
VkDeviceSize memOffset)
{
struct anv_buffer *buffer;
struct anv_image *image;
struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
switch (objType) {
case VK_OBJECT_TYPE_BUFFER:
buffer = (struct anv_buffer *) object;
buffer->bo = &mem->bo;
buffer->offset = memOffset;
break;
case VK_OBJECT_TYPE_IMAGE:
image = (struct anv_image *) object;
image->bo = &mem->bo;
image->offset = memOffset;
break;
default:
break;
}
return VK_SUCCESS;
}
VkResult anv_QueueBindObjectMemoryRange(
VkQueue queue,
VkObjectType objType,
VkObject object,
uint32_t allocationIdx,
VkDeviceSize rangeOffset,
VkDeviceSize rangeSize,
VkDeviceMemory mem,
VkDeviceSize memOffset)
{
stub_return(VK_UNSUPPORTED);
}
VkResult anv_QueueBindImageMemoryRange(
VkQueue queue,
VkImage image,
uint32_t allocationIdx,
const VkImageMemoryBindInfo* pBindInfo,
VkDeviceMemory mem,
VkDeviceSize memOffset)
{
stub_return(VK_UNSUPPORTED);
}
VkResult anv_CreateFence(
VkDevice _device,
const VkFenceCreateInfo* pCreateInfo,
VkFence* pFence)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_fence *fence;
struct anv_batch batch;
VkResult result;
const uint32_t fence_size = 128;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
fence = anv_device_alloc(device, sizeof(*fence), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (fence == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_bo_init_new(&fence->bo, device, fence_size);
if (result != VK_SUCCESS)
goto fail;
fence->bo.map =
anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
batch.next = fence->bo.map;
anv_batch_emit(&batch, GEN8_MI_BATCH_BUFFER_END);
anv_batch_emit(&batch, GEN8_MI_NOOP);
fence->exec2_objects[0].handle = fence->bo.gem_handle;
fence->exec2_objects[0].relocation_count = 0;
fence->exec2_objects[0].relocs_ptr = 0;
fence->exec2_objects[0].alignment = 0;
fence->exec2_objects[0].offset = fence->bo.offset;
fence->exec2_objects[0].flags = 0;
fence->exec2_objects[0].rsvd1 = 0;
fence->exec2_objects[0].rsvd2 = 0;
fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
fence->execbuf.buffer_count = 1;
fence->execbuf.batch_start_offset = 0;
fence->execbuf.batch_len = batch.next - fence->bo.map;
fence->execbuf.cliprects_ptr = 0;
fence->execbuf.num_cliprects = 0;
fence->execbuf.DR1 = 0;
fence->execbuf.DR4 = 0;
fence->execbuf.flags =
I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
fence->execbuf.rsvd1 = device->context_id;
fence->execbuf.rsvd2 = 0;
*pFence = (VkQueryPool) fence;
return VK_SUCCESS;
fail:
anv_device_free(device, fence);
return result;
}
VkResult anv_ResetFences(
VkDevice _device,
uint32_t fenceCount,
VkFence* pFences)
{
struct anv_fence **fences = (struct anv_fence **) pFences;
for (uint32_t i; i < fenceCount; i++)
fences[i]->ready = false;
return VK_SUCCESS;
}
VkResult anv_GetFenceStatus(
VkDevice _device,
VkFence _fence)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_fence *fence = (struct anv_fence *) _fence;
int64_t t = 0;
int ret;
if (fence->ready)
return VK_SUCCESS;
ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
if (ret == 0) {
fence->ready = true;
return VK_SUCCESS;
}
return VK_NOT_READY;
}
VkResult anv_WaitForFences(
VkDevice _device,
uint32_t fenceCount,
const VkFence* pFences,
bool32_t waitAll,
uint64_t timeout)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_fence **fences = (struct anv_fence **) pFences;
int64_t t = timeout;
int ret;
/* FIXME: handle !waitAll */
for (uint32_t i = 0; i < fenceCount; i++) {
ret = anv_gem_wait(device, fences[i]->bo.gem_handle, &t);
if (ret == -1 && errno == ETIME)
return VK_TIMEOUT;
else if (ret == -1)
return vk_error(VK_ERROR_UNKNOWN);
}
return VK_SUCCESS;
}
// Queue semaphore functions
VkResult anv_CreateSemaphore(
VkDevice device,
const VkSemaphoreCreateInfo* pCreateInfo,
VkSemaphore* pSemaphore)
{
stub_return(VK_UNSUPPORTED);
}
VkResult anv_QueueSignalSemaphore(
VkQueue queue,
VkSemaphore semaphore)
{
stub_return(VK_UNSUPPORTED);
}
VkResult anv_QueueWaitSemaphore(
VkQueue queue,
VkSemaphore semaphore)
{
stub_return(VK_UNSUPPORTED);
}
// Event functions
VkResult anv_CreateEvent(
VkDevice device,
const VkEventCreateInfo* pCreateInfo,
VkEvent* pEvent)
{
stub_return(VK_UNSUPPORTED);
}
VkResult anv_GetEventStatus(
VkDevice device,
VkEvent event)
{
stub_return(VK_UNSUPPORTED);
}
VkResult anv_SetEvent(
VkDevice device,
VkEvent event)
{
stub_return(VK_UNSUPPORTED);
}
VkResult anv_ResetEvent(
VkDevice device,
VkEvent event)
{
stub_return(VK_UNSUPPORTED);
}
// Query functions
VkResult anv_CreateQueryPool(
VkDevice _device,
const VkQueryPoolCreateInfo* pCreateInfo,
VkQueryPool* pQueryPool)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_query_pool *pool;
VkResult result;
size_t size;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
switch (pCreateInfo->queryType) {
case VK_QUERY_TYPE_OCCLUSION:
break;
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
return VK_UNSUPPORTED;
default:
unreachable("");
}
pool = anv_device_alloc(device, sizeof(*pool), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (pool == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
pool->type = pCreateInfo->queryType;
size = pCreateInfo->slots * sizeof(struct anv_query_pool_slot);
result = anv_bo_init_new(&pool->bo, device, size);
if (result != VK_SUCCESS)
goto fail;
pool->bo.map = anv_gem_mmap(device, pool->bo.gem_handle, 0, size);
*pQueryPool = (VkQueryPool) pool;
return VK_SUCCESS;
fail:
anv_device_free(device, pool);
return result;
}
VkResult anv_GetQueryPoolResults(
VkDevice _device,
VkQueryPool queryPool,
uint32_t startQuery,
uint32_t queryCount,
size_t* pDataSize,
void* pData,
VkQueryResultFlags flags)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_query_pool *pool = (struct anv_query_pool *) queryPool;
struct anv_query_pool_slot *slot = pool->bo.map;
int64_t timeout = INT64_MAX;
uint32_t *dst32 = pData;
uint64_t *dst64 = pData;
uint64_t result;
int ret;
if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
/* Where is the availabilty info supposed to go? */
anv_finishme("VK_QUERY_RESULT_WITH_AVAILABILITY_BIT");
return VK_UNSUPPORTED;
}
assert(pool->type == VK_QUERY_TYPE_OCCLUSION);
if (flags & VK_QUERY_RESULT_64_BIT)
*pDataSize = queryCount * sizeof(uint64_t);
else
*pDataSize = queryCount * sizeof(uint32_t);
if (pData == NULL)
return VK_SUCCESS;
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
ret = anv_gem_wait(device, pool->bo.gem_handle, &timeout);
if (ret == -1)
return vk_error(VK_ERROR_UNKNOWN);
}
for (uint32_t i = 0; i < queryCount; i++) {
result = slot[startQuery + i].end - slot[startQuery + i].begin;
if (flags & VK_QUERY_RESULT_64_BIT) {
*dst64++ = result;
} else {
if (result > UINT32_MAX)
result = UINT32_MAX;
*dst32++ = result;
}
}
return VK_SUCCESS;
}
// Buffer functions
VkResult anv_CreateBuffer(
VkDevice _device,
const VkBufferCreateInfo* pCreateInfo,
VkBuffer* pBuffer)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_buffer *buffer;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
buffer = anv_device_alloc(device, sizeof(*buffer), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (buffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
buffer->size = pCreateInfo->size;
buffer->bo = NULL;
buffer->offset = 0;
*pBuffer = (VkBuffer) buffer;
return VK_SUCCESS;
}
// Buffer view functions
VkResult anv_CreateBufferView(
VkDevice _device,
const VkBufferViewCreateInfo* pCreateInfo,
VkBufferView* pView)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_buffer *buffer = (struct anv_buffer *) pCreateInfo->buffer;
struct anv_surface_view *view;
const struct anv_format *format;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
view = anv_device_alloc(device, sizeof(*view), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (view == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
view->bo = buffer->bo;
view->offset = buffer->offset + pCreateInfo->offset;
view->surface_state =
anv_state_pool_alloc(&device->surface_state_pool, 64, 64);
view->format = pCreateInfo->format;
format = anv_format_for_vk_format(pCreateInfo->format);
/* This assumes RGBA float format. */
uint32_t stride = 4;
uint32_t num_elements = pCreateInfo->range / stride;
struct GEN8_RENDER_SURFACE_STATE surface_state = {
.SurfaceType = SURFTYPE_BUFFER,
.SurfaceArray = false,
.SurfaceFormat = format->format,
.SurfaceVerticalAlignment = VALIGN4,
.SurfaceHorizontalAlignment = HALIGN4,
.TileMode = LINEAR,
.VerticalLineStride = 0,
.VerticalLineStrideOffset = 0,
.SamplerL2BypassModeDisable = true,
.RenderCacheReadWriteMode = WriteOnlyCache,
.MemoryObjectControlState = 0, /* FIXME: MOCS */
.BaseMipLevel = 0,
.SurfaceQPitch = 0,
.Height = (num_elements >> 7) & 0x3fff,
.Width = num_elements & 0x7f,
.Depth = (num_elements >> 21) & 0x3f,
.SurfacePitch = stride - 1,
.MinimumArrayElement = 0,
.NumberofMultisamples = MULTISAMPLECOUNT_1,
.XOffset = 0,
.YOffset = 0,
.SurfaceMinLOD = 0,
.MIPCountLOD = 0,
.AuxiliarySurfaceMode = AUX_NONE,
.RedClearColor = 0,
.GreenClearColor = 0,
.BlueClearColor = 0,
.AlphaClearColor = 0,
.ShaderChannelSelectRed = SCS_RED,
.ShaderChannelSelectGreen = SCS_GREEN,
.ShaderChannelSelectBlue = SCS_BLUE,
.ShaderChannelSelectAlpha = SCS_ALPHA,
.ResourceMinLOD = 0,
/* FIXME: We assume that the image must be bound at this time. */
.SurfaceBaseAddress = { NULL, view->offset },
};
GEN8_RENDER_SURFACE_STATE_pack(NULL, view->surface_state.map, &surface_state);
*pView = (VkImageView) view;
return VK_SUCCESS;
}
// Sampler functions
VkResult anv_CreateSampler(
VkDevice _device,
const VkSamplerCreateInfo* pCreateInfo,
VkSampler* pSampler)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_sampler *sampler;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
sampler = anv_device_alloc(device, sizeof(*sampler), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (!sampler)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
static const uint32_t vk_to_gen_tex_filter[] = {
[VK_TEX_FILTER_NEAREST] = MAPFILTER_NEAREST,
[VK_TEX_FILTER_LINEAR] = MAPFILTER_LINEAR
};
static const uint32_t vk_to_gen_mipmap_mode[] = {
[VK_TEX_MIPMAP_MODE_BASE] = MIPFILTER_NONE,
[VK_TEX_MIPMAP_MODE_NEAREST] = MIPFILTER_NEAREST,
[VK_TEX_MIPMAP_MODE_LINEAR] = MIPFILTER_LINEAR
};
static const uint32_t vk_to_gen_tex_address[] = {
[VK_TEX_ADDRESS_WRAP] = TCM_WRAP,
[VK_TEX_ADDRESS_MIRROR] = TCM_MIRROR,
[VK_TEX_ADDRESS_CLAMP] = TCM_CLAMP,
[VK_TEX_ADDRESS_MIRROR_ONCE] = TCM_MIRROR_ONCE,
[VK_TEX_ADDRESS_CLAMP_BORDER] = TCM_CLAMP_BORDER,
};
static const uint32_t vk_to_gen_compare_op[] = {
[VK_COMPARE_OP_NEVER] = PREFILTEROPNEVER,
[VK_COMPARE_OP_LESS] = PREFILTEROPLESS,
[VK_COMPARE_OP_EQUAL] = PREFILTEROPEQUAL,
[VK_COMPARE_OP_LESS_EQUAL] = PREFILTEROPLEQUAL,
[VK_COMPARE_OP_GREATER] = PREFILTEROPGREATER,
[VK_COMPARE_OP_NOT_EQUAL] = PREFILTEROPNOTEQUAL,
[VK_COMPARE_OP_GREATER_EQUAL] = PREFILTEROPGEQUAL,
[VK_COMPARE_OP_ALWAYS] = PREFILTEROPALWAYS,
};
if (pCreateInfo->maxAnisotropy > 0)
anv_finishme("missing support for anisotropic filtering");
struct GEN8_SAMPLER_STATE sampler_state = {
.SamplerDisable = false,
.TextureBorderColorMode = DX10OGL,
.LODPreClampMode = 0,
.BaseMipLevel = 0,
.MipModeFilter = vk_to_gen_mipmap_mode[pCreateInfo->mipMode],
.MagModeFilter = vk_to_gen_tex_filter[pCreateInfo->magFilter],
.MinModeFilter = vk_to_gen_tex_filter[pCreateInfo->minFilter],
.TextureLODBias = pCreateInfo->mipLodBias * 256,
.AnisotropicAlgorithm = EWAApproximation,
.MinLOD = pCreateInfo->minLod * 256,
.MaxLOD = pCreateInfo->maxLod * 256,
.ChromaKeyEnable = 0,
.ChromaKeyIndex = 0,
.ChromaKeyMode = 0,
.ShadowFunction = vk_to_gen_compare_op[pCreateInfo->compareOp],
.CubeSurfaceControlMode = 0,
.IndirectStatePointer = 0,
.LODClampMagnificationMode = MIPNONE,
.MaximumAnisotropy = 0,
.RAddressMinFilterRoundingEnable = 0,
.RAddressMagFilterRoundingEnable = 0,
.VAddressMinFilterRoundingEnable = 0,
.VAddressMagFilterRoundingEnable = 0,
.UAddressMinFilterRoundingEnable = 0,
.UAddressMagFilterRoundingEnable = 0,
.TrilinearFilterQuality = 0,
.NonnormalizedCoordinateEnable = 0,
.TCXAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressU],
.TCYAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressV],
.TCZAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressW],
};
GEN8_SAMPLER_STATE_pack(NULL, sampler->state, &sampler_state);
*pSampler = (VkSampler) sampler;
return VK_SUCCESS;
}
// Descriptor set functions
VkResult anv_CreateDescriptorSetLayout(
VkDevice _device,
const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
VkDescriptorSetLayout* pSetLayout)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_descriptor_set_layout *set_layout;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
uint32_t sampler_count[VK_NUM_SHADER_STAGE] = { 0, };
uint32_t surface_count[VK_NUM_SHADER_STAGE] = { 0, };
uint32_t num_dynamic_buffers = 0;
uint32_t count = 0;
uint32_t s;
for (uint32_t i = 0; i < pCreateInfo->count; i++) {
switch (pCreateInfo->pBinding[i].descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
sampler_count[s] += pCreateInfo->pBinding[i].count;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
sampler_count[s] += pCreateInfo->pBinding[i].count;
/* fall through */
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
surface_count[s] += pCreateInfo->pBinding[i].count;
break;
default:
break;
}
count += pCreateInfo->pBinding[i].count;
}
for (uint32_t i = 0; i < pCreateInfo->count; i++) {
switch (pCreateInfo->pBinding[i].descriptorType) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
num_dynamic_buffers++;
break;
default:
break;
}
}
uint32_t sampler_total = 0;
uint32_t surface_total = 0;
for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
sampler_total += sampler_count[s];
surface_total += surface_count[s];
}
size_t size = sizeof(*set_layout) +
(sampler_total + surface_total) * sizeof(uint32_t);
set_layout = anv_device_alloc(device, size, 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (!set_layout)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
set_layout->num_dynamic_buffers = num_dynamic_buffers;
set_layout->count = count;
uint32_t *p = set_layout->entries;
uint32_t *sampler[VK_NUM_SHADER_STAGE];
uint32_t *surface[VK_NUM_SHADER_STAGE];
for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
set_layout->stage[s].surface_count = surface_count[s];
set_layout->stage[s].surface_start = surface[s] = p;
p += surface_count[s];
set_layout->stage[s].sampler_count = sampler_count[s];
set_layout->stage[s].sampler_start = sampler[s] = p;
p += sampler_count[s];
}
uint32_t descriptor = 0;
for (uint32_t i = 0; i < pCreateInfo->count; i++) {
switch (pCreateInfo->pBinding[i].descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
for (uint32_t j = 0; j < pCreateInfo->pBinding[i].count; j++)
*(sampler[s])++ = descriptor + j;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
for (uint32_t j = 0; j < pCreateInfo->pBinding[i].count; j++)
*(sampler[s])++ = descriptor + j;
/* fallthrough */
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
for (uint32_t j = 0; j < pCreateInfo->pBinding[i].count; j++) {
*(surface[s])++ = descriptor + j;
}
break;
default:
unreachable("");
}
descriptor += pCreateInfo->pBinding[i].count;
}
*pSetLayout = (VkDescriptorSetLayout) set_layout;
return VK_SUCCESS;
}
VkResult anv_BeginDescriptorPoolUpdate(
VkDevice device,
VkDescriptorUpdateMode updateMode)
{
return VK_SUCCESS;
}
VkResult anv_EndDescriptorPoolUpdate(
VkDevice device,
VkCmdBuffer cmd)
{
return VK_SUCCESS;
}
VkResult anv_CreateDescriptorPool(
VkDevice device,
VkDescriptorPoolUsage poolUsage,
uint32_t maxSets,
const VkDescriptorPoolCreateInfo* pCreateInfo,
VkDescriptorPool* pDescriptorPool)
{
*pDescriptorPool = 1;
return VK_SUCCESS;
}
VkResult anv_ResetDescriptorPool(
VkDevice device,
VkDescriptorPool descriptorPool)
{
return VK_SUCCESS;
}
VkResult anv_AllocDescriptorSets(
VkDevice _device,
VkDescriptorPool descriptorPool,
VkDescriptorSetUsage setUsage,
uint32_t count,
const VkDescriptorSetLayout* pSetLayouts,
VkDescriptorSet* pDescriptorSets,
uint32_t* pCount)
{
struct anv_device *device = (struct anv_device *) _device;
const struct anv_descriptor_set_layout *layout;
struct anv_descriptor_set *set;
size_t size;
for (uint32_t i = 0; i < count; i++) {
layout = (struct anv_descriptor_set_layout *) pSetLayouts[i];
size = sizeof(*set) + layout->count * sizeof(set->descriptors[0]);
set = anv_device_alloc(device, size, 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (!set) {
*pCount = i;
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
pDescriptorSets[i] = (VkDescriptorSet) set;
}
*pCount = count;
return VK_SUCCESS;
}
void anv_ClearDescriptorSets(
VkDevice device,
VkDescriptorPool descriptorPool,
uint32_t count,
const VkDescriptorSet* pDescriptorSets)
{
}
void anv_UpdateDescriptors(
VkDevice _device,
VkDescriptorSet descriptorSet,
uint32_t updateCount,
const void** ppUpdateArray)
{
struct anv_descriptor_set *set = (struct anv_descriptor_set *) descriptorSet;
VkUpdateSamplers *update_samplers;
VkUpdateSamplerTextures *update_sampler_textures;
VkUpdateImages *update_images;
VkUpdateBuffers *update_buffers;
VkUpdateAsCopy *update_as_copy;
for (uint32_t i = 0; i < updateCount; i++) {
const struct anv_common *common = ppUpdateArray[i];
switch (common->sType) {
case VK_STRUCTURE_TYPE_UPDATE_SAMPLERS:
update_samplers = (VkUpdateSamplers *) common;
for (uint32_t j = 0; j < update_samplers->count; j++) {
set->descriptors[update_samplers->binding + j].sampler =
(struct anv_sampler *) update_samplers->pSamplers[j];
}
break;
case VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES:
/* FIXME: Shouldn't this be *_UPDATE_SAMPLER_IMAGES? */
update_sampler_textures = (VkUpdateSamplerTextures *) common;
for (uint32_t j = 0; j < update_sampler_textures->count; j++) {
set->descriptors[update_sampler_textures->binding + j].view =
(struct anv_surface_view *)
update_sampler_textures->pSamplerImageViews[j].pImageView->view;
set->descriptors[update_sampler_textures->binding + j].sampler =
(struct anv_sampler *)
update_sampler_textures->pSamplerImageViews[j].sampler;
}
break;
case VK_STRUCTURE_TYPE_UPDATE_IMAGES:
update_images = (VkUpdateImages *) common;
for (uint32_t j = 0; j < update_images->count; j++) {
set->descriptors[update_images->binding + j].view =
(struct anv_surface_view *) update_images->pImageViews[j].view;
}
break;
case VK_STRUCTURE_TYPE_UPDATE_BUFFERS:
update_buffers = (VkUpdateBuffers *) common;
for (uint32_t j = 0; j < update_buffers->count; j++) {
set->descriptors[update_buffers->binding + j].view =
(struct anv_surface_view *) update_buffers->pBufferViews[j].view;
}
/* FIXME: descriptor arrays? */
break;
case VK_STRUCTURE_TYPE_UPDATE_AS_COPY:
update_as_copy = (VkUpdateAsCopy *) common;
(void) update_as_copy;
break;
default:
break;
}
}
}
// State object functions
static inline int64_t
clamp_int64(int64_t x, int64_t min, int64_t max)
{
if (x < min)
return min;
else if (x < max)
return x;
else
return max;
}
VkResult anv_CreateDynamicViewportState(
VkDevice _device,
const VkDynamicVpStateCreateInfo* pCreateInfo,
VkDynamicVpState* pState)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_dynamic_vp_state *state;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO);
state = anv_device_alloc(device, sizeof(*state), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (state == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
unsigned count = pCreateInfo->viewportAndScissorCount;
state->sf_clip_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
count * 64, 64);
state->cc_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
count * 8, 32);
state->scissor = anv_state_pool_alloc(&device->dynamic_state_pool,
count * 32, 32);
for (uint32_t i = 0; i < pCreateInfo->viewportAndScissorCount; i++) {
const VkViewport *vp = &pCreateInfo->pViewports[i];
const VkRect *s = &pCreateInfo->pScissors[i];
struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport = {
.ViewportMatrixElementm00 = vp->width / 2,
.ViewportMatrixElementm11 = vp->height / 2,
.ViewportMatrixElementm22 = (vp->maxDepth - vp->minDepth) / 2,
.ViewportMatrixElementm30 = vp->originX + vp->width / 2,
.ViewportMatrixElementm31 = vp->originY + vp->height / 2,
.ViewportMatrixElementm32 = (vp->maxDepth + vp->minDepth) / 2,
.XMinClipGuardband = -1.0f,
.XMaxClipGuardband = 1.0f,
.YMinClipGuardband = -1.0f,
.YMaxClipGuardband = 1.0f,
.XMinViewPort = vp->originX,
.XMaxViewPort = vp->originX + vp->width - 1,
.YMinViewPort = vp->originY,
.YMaxViewPort = vp->originY + vp->height - 1,
};
struct GEN8_CC_VIEWPORT cc_viewport = {
.MinimumDepth = vp->minDepth,
.MaximumDepth = vp->maxDepth
};
/* Since xmax and ymax are inclusive, we have to have xmax < xmin or
* ymax < ymin for empty clips. In case clip x, y, width height are all
* 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
* what we want. Just special case empty clips and produce a canonical
* empty clip. */
static const struct GEN8_SCISSOR_RECT empty_scissor = {
.ScissorRectangleYMin = 1,
.ScissorRectangleXMin = 1,
.ScissorRectangleYMax = 0,
.ScissorRectangleXMax = 0
};
const int max = 0xffff;
struct GEN8_SCISSOR_RECT scissor = {
/* Do this math using int64_t so overflow gets clamped correctly. */
.ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
.ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
.ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
.ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
};
GEN8_SF_CLIP_VIEWPORT_pack(NULL, state->sf_clip_vp.map + i * 64, &sf_clip_viewport);
GEN8_CC_VIEWPORT_pack(NULL, state->cc_vp.map + i * 32, &cc_viewport);
if (s->extent.width <= 0 || s->extent.height <= 0) {
GEN8_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &empty_scissor);
} else {
GEN8_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &scissor);
}
}
*pState = (VkDynamicVpState) state;
return VK_SUCCESS;
}
VkResult anv_CreateDynamicRasterState(
VkDevice _device,
const VkDynamicRsStateCreateInfo* pCreateInfo,
VkDynamicRsState* pState)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_dynamic_rs_state *state;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO);
state = anv_device_alloc(device, sizeof(*state), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (state == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
/* Missing these:
* float depthBias;
* float depthBiasClamp;
* float slopeScaledDepthBias;
* float pointFadeThreshold;
* // optional (GL45) - Size of point fade threshold
*/
struct GEN8_3DSTATE_SF sf = {
GEN8_3DSTATE_SF_header,
.LineWidth = pCreateInfo->lineWidth,
.PointWidth = pCreateInfo->pointSize,
};
GEN8_3DSTATE_SF_pack(NULL, state->state_sf, &sf);
*pState = (VkDynamicRsState) state;
return VK_SUCCESS;
}
VkResult anv_CreateDynamicColorBlendState(
VkDevice _device,
const VkDynamicCbStateCreateInfo* pCreateInfo,
VkDynamicCbState* pState)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_dynamic_cb_state *state;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_CB_STATE_CREATE_INFO);
state = anv_device_alloc(device, sizeof(*state), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (state == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
*pState = (VkDynamicCbState) state;
return VK_SUCCESS;
}
VkResult anv_CreateDynamicDepthStencilState(
VkDevice device,
const VkDynamicDsStateCreateInfo* pCreateInfo,
VkDynamicDsState* pState)
{
stub_return(VK_UNSUPPORTED);
}
// Command buffer functions
VkResult anv_CreateCommandBuffer(
VkDevice _device,
const VkCmdBufferCreateInfo* pCreateInfo,
VkCmdBuffer* pCmdBuffer)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_cmd_buffer *cmd_buffer;
VkResult result;
cmd_buffer = anv_device_alloc(device, sizeof(*cmd_buffer), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (cmd_buffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
cmd_buffer->device = device;
cmd_buffer->rs_state = NULL;
cmd_buffer->vp_state = NULL;
memset(&cmd_buffer->default_bindings, 0, sizeof(cmd_buffer->default_bindings));
cmd_buffer->bindings = &cmd_buffer->default_bindings;
result = anv_batch_init(&cmd_buffer->batch, device);
if (result != VK_SUCCESS)
goto fail;
cmd_buffer->exec2_objects =
anv_device_alloc(device, 8192 * sizeof(cmd_buffer->exec2_objects[0]), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (cmd_buffer->exec2_objects == NULL) {
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail_batch;
}
cmd_buffer->exec2_bos =
anv_device_alloc(device, 8192 * sizeof(cmd_buffer->exec2_bos[0]), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (cmd_buffer->exec2_bos == NULL) {
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail_exec2_objects;
}
anv_state_stream_init(&cmd_buffer->binding_table_state_stream,
&device->binding_table_block_pool);
anv_state_stream_init(&cmd_buffer->surface_state_stream,
&device->surface_state_block_pool);
anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
&device->dynamic_state_block_pool);
cmd_buffer->dirty = 0;
cmd_buffer->vb_dirty = 0;
*pCmdBuffer = (VkCmdBuffer) cmd_buffer;
return VK_SUCCESS;
fail_exec2_objects:
anv_device_free(device, cmd_buffer->exec2_objects);
fail_batch:
anv_batch_finish(&cmd_buffer->batch, device);
fail:
anv_device_free(device, cmd_buffer);
return result;
}
VkResult anv_BeginCommandBuffer(
VkCmdBuffer cmdBuffer,
const VkCmdBufferBeginInfo* pBeginInfo)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
struct anv_device *device = cmd_buffer->device;
anv_batch_emit(&cmd_buffer->batch, GEN8_PIPELINE_SELECT,
.PipelineSelection = _3D);
anv_batch_emit(&cmd_buffer->batch, GEN8_STATE_SIP);
anv_batch_emit(&cmd_buffer->batch, GEN8_STATE_BASE_ADDRESS,
.GeneralStateBaseAddress = { NULL, 0 },
.GeneralStateBaseAddressModifyEnable = true,
.GeneralStateBufferSize = 0xfffff,
.GeneralStateBufferSizeModifyEnable = true,
.SurfaceStateBaseAddress = { &device->surface_state_block_pool.bo, 0 },
.SurfaceStateMemoryObjectControlState = 0, /* FIXME: MOCS */
.SurfaceStateBaseAddressModifyEnable = true,
.DynamicStateBaseAddress = { &device->dynamic_state_block_pool.bo, 0 },
.DynamicStateBaseAddressModifyEnable = true,
.DynamicStateBufferSize = 0xfffff,
.DynamicStateBufferSizeModifyEnable = true,
.IndirectObjectBaseAddress = { NULL, 0 },
.IndirectObjectBaseAddressModifyEnable = true,
.IndirectObjectBufferSize = 0xfffff,
.IndirectObjectBufferSizeModifyEnable = true,
.InstructionBaseAddress = { &device->instruction_block_pool.bo, 0 },
.InstructionBaseAddressModifyEnable = true,
.InstructionBufferSize = 0xfffff,
.InstructionBuffersizeModifyEnable = true);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VF_STATISTICS,
.StatisticsEnable = true);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_HS, .Enable = false);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_TE, .TEEnable = false);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DS, .FunctionEnable = false);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_STREAMOUT, .SOFunctionEnable = false);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
.ConstantBufferOffset = 0,
.ConstantBufferSize = 4);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
.ConstantBufferOffset = 4,
.ConstantBufferSize = 4);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
.ConstantBufferOffset = 8,
.ConstantBufferSize = 4);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_WM_CHROMAKEY,
.ChromaKeyKillEnable = false);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_SBE_SWIZ);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_AA_LINE_PARAMETERS);
/* Hardcoded state: */
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DEPTH_BUFFER,
.SurfaceType = SURFTYPE_2D,
.Width = 1,
.Height = 1,
.SurfaceFormat = D16_UNORM,
.SurfaceBaseAddress = { NULL, 0 },
.HierarchicalDepthBufferEnable = 0);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_WM_DEPTH_STENCIL,
.DepthTestEnable = false,
.DepthBufferWriteEnable = false);
return VK_SUCCESS;
}
static void
anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer,
struct anv_bo *bo, struct anv_reloc_list *list)
{
struct drm_i915_gem_exec_object2 *obj;
bo->index = cmd_buffer->bo_count;
obj = &cmd_buffer->exec2_objects[bo->index];
cmd_buffer->exec2_bos[bo->index] = bo;
cmd_buffer->bo_count++;
obj->handle = bo->gem_handle;
obj->relocation_count = 0;
obj->relocs_ptr = 0;
obj->alignment = 0;
obj->offset = bo->offset;
obj->flags = 0;
obj->rsvd1 = 0;
obj->rsvd2 = 0;
if (list) {
obj->relocation_count = list->num_relocs;
obj->relocs_ptr = (uintptr_t) list->relocs;
}
}
static void
anv_cmd_buffer_add_validate_bos(struct anv_cmd_buffer *cmd_buffer,
struct anv_reloc_list *list)
{
struct anv_bo *bo, *batch_bo;
batch_bo = &cmd_buffer->batch.bo;
for (size_t i = 0; i < list->num_relocs; i++) {
bo = list->reloc_bos[i];
/* Skip any relocations targeting the batch bo. We need to make sure
* it's the last in the list so we'll add it manually later.
*/
if (bo == batch_bo)
continue;
if (bo->index < cmd_buffer->bo_count && cmd_buffer->exec2_bos[bo->index] == bo)
continue;
anv_cmd_buffer_add_bo(cmd_buffer, bo, NULL);
}
}
static void
anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
struct anv_reloc_list *list)
{
struct anv_bo *bo;
/* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
* struct drm_i915_gem_exec_object2 against the bos current offset and if
* all bos haven't moved it will skip relocation processing alltogether.
* If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
* value of offset so we can set it either way. For that to work we need
* to make sure all relocs use the same presumed offset.
*/
for (size_t i = 0; i < list->num_relocs; i++) {
bo = list->reloc_bos[i];
if (bo->offset != list->relocs[i].presumed_offset)
cmd_buffer->need_reloc = true;
list->relocs[i].target_handle = bo->index;
}
}
VkResult anv_EndCommandBuffer(
VkCmdBuffer cmdBuffer)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
struct anv_device *device = cmd_buffer->device;
struct anv_batch *batch = &cmd_buffer->batch;
anv_batch_emit(batch, GEN8_MI_BATCH_BUFFER_END);
/* Round batch up to an even number of dwords. */
if ((batch->next - batch->bo.map) & 4)
anv_batch_emit(batch, GEN8_MI_NOOP);
cmd_buffer->bo_count = 0;
cmd_buffer->need_reloc = false;
/* Lock for access to bo->index. */
pthread_mutex_lock(&device->mutex);
/* Add block pool bos first so we can add them with their relocs. */
anv_cmd_buffer_add_bo(cmd_buffer, &device->surface_state_block_pool.bo,
&batch->surf_relocs);
anv_cmd_buffer_add_validate_bos(cmd_buffer, &batch->surf_relocs);
anv_cmd_buffer_add_validate_bos(cmd_buffer, &batch->cmd_relocs);
anv_cmd_buffer_add_bo(cmd_buffer, &batch->bo, &batch->cmd_relocs);
anv_cmd_buffer_process_relocs(cmd_buffer, &batch->surf_relocs);
anv_cmd_buffer_process_relocs(cmd_buffer, &batch->cmd_relocs);
cmd_buffer->execbuf.buffers_ptr = (uintptr_t) cmd_buffer->exec2_objects;
cmd_buffer->execbuf.buffer_count = cmd_buffer->bo_count;
cmd_buffer->execbuf.batch_start_offset = 0;
cmd_buffer->execbuf.batch_len = batch->next - batch->bo.map;
cmd_buffer->execbuf.cliprects_ptr = 0;
cmd_buffer->execbuf.num_cliprects = 0;
cmd_buffer->execbuf.DR1 = 0;
cmd_buffer->execbuf.DR4 = 0;
cmd_buffer->execbuf.flags = I915_EXEC_HANDLE_LUT;
if (!cmd_buffer->need_reloc)
cmd_buffer->execbuf.flags |= I915_EXEC_NO_RELOC;
cmd_buffer->execbuf.flags |= I915_EXEC_RENDER;
cmd_buffer->execbuf.rsvd1 = device->context_id;
cmd_buffer->execbuf.rsvd2 = 0;
pthread_mutex_unlock(&device->mutex);
return VK_SUCCESS;
}
VkResult anv_ResetCommandBuffer(
VkCmdBuffer cmdBuffer)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
anv_batch_reset(&cmd_buffer->batch);
return VK_SUCCESS;
}
// Command buffer building functions
void anv_CmdBindPipeline(
VkCmdBuffer cmdBuffer,
VkPipelineBindPoint pipelineBindPoint,
VkPipeline _pipeline)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
cmd_buffer->pipeline = (struct anv_pipeline *) _pipeline;
cmd_buffer->dirty |= ANV_CMD_BUFFER_PIPELINE_DIRTY;
}
void anv_CmdBindDynamicStateObject(
VkCmdBuffer cmdBuffer,
VkStateBindPoint stateBindPoint,
VkDynamicStateObject dynamicState)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
struct anv_dynamic_vp_state *vp_state;
switch (stateBindPoint) {
case VK_STATE_BIND_POINT_VIEWPORT:
vp_state = (struct anv_dynamic_vp_state *) dynamicState;
/* We emit state immediately, but set cmd_buffer->vp_state to indicate
* that vp state has been set in this command buffer. */
cmd_buffer->vp_state = vp_state;
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_SCISSOR_STATE_POINTERS,
.ScissorRectPointer = vp_state->scissor.offset);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
.CCViewportPointer = vp_state->cc_vp.offset);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
.SFClipViewportPointer = vp_state->sf_clip_vp.offset);
break;
case VK_STATE_BIND_POINT_RASTER:
cmd_buffer->rs_state = (struct anv_dynamic_rs_state *) dynamicState;
cmd_buffer->dirty |= ANV_CMD_BUFFER_RS_DIRTY;
break;
case VK_STATE_BIND_POINT_COLOR_BLEND:
case VK_STATE_BIND_POINT_DEPTH_STENCIL:
break;
default:
break;
};
}
void anv_CmdBindDescriptorSets(
VkCmdBuffer cmdBuffer,
VkPipelineBindPoint pipelineBindPoint,
uint32_t firstSet,
uint32_t setCount,
const VkDescriptorSet* pDescriptorSets,
uint32_t dynamicOffsetCount,
const uint32_t* pDynamicOffsets)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
struct anv_pipeline_layout *layout = cmd_buffer->pipeline->layout;
struct anv_bindings *bindings = cmd_buffer->bindings;
uint32_t offset = 0;
for (uint32_t i = 0; i < setCount; i++) {
struct anv_descriptor_set *set =
(struct anv_descriptor_set *) pDescriptorSets[i];
struct anv_descriptor_set_layout *set_layout = layout->set[firstSet + i].layout;
for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
uint32_t *surface_to_desc = set_layout->stage[s].surface_start;
uint32_t *sampler_to_desc = set_layout->stage[s].sampler_start;
uint32_t bias = s == VK_SHADER_STAGE_FRAGMENT ? MAX_RTS : 0;
uint32_t start;
start = bias + layout->set[firstSet + i].surface_start[s];
for (uint32_t b = 0; b < set_layout->stage[s].surface_count; b++) {
struct anv_surface_view *view = set->descriptors[surface_to_desc[b]].view;
bindings->descriptors[s].surfaces[start + b] =
view->surface_state.offset;
bindings->descriptors[s].relocs[start + b].bo = view->bo;
bindings->descriptors[s].relocs[start + b].offset = view->offset;
}
start = layout->set[firstSet + i].sampler_start[s];
for (uint32_t b = 0; b < set_layout->stage[s].sampler_count; b++) {
struct anv_sampler *sampler = set->descriptors[sampler_to_desc[b]].sampler;
memcpy(&bindings->descriptors[s].samplers[start + b],
sampler->state, sizeof(sampler->state));
}
}
offset += layout->set[firstSet + i].layout->num_dynamic_buffers;
}
cmd_buffer->dirty |= ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY;
}
void anv_CmdBindIndexBuffer(
VkCmdBuffer cmdBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
VkIndexType indexType)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
static const uint32_t vk_to_gen_index_type[] = {
[VK_INDEX_TYPE_UINT8] = INDEX_BYTE,
[VK_INDEX_TYPE_UINT16] = INDEX_WORD,
[VK_INDEX_TYPE_UINT32] = INDEX_DWORD,
};
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_INDEX_BUFFER,
.IndexFormat = vk_to_gen_index_type[indexType],
.MemoryObjectControlState = 0,
.BufferStartingAddress = { buffer->bo, buffer->offset + offset },
.BufferSize = buffer->size - offset);
}
void anv_CmdBindVertexBuffers(
VkCmdBuffer cmdBuffer,
uint32_t startBinding,
uint32_t bindingCount,
const VkBuffer* pBuffers,
const VkDeviceSize* pOffsets)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
struct anv_bindings *bindings = cmd_buffer->bindings;
/* We have to defer setting up vertex buffer since we need the buffer
* stride from the pipeline. */
for (uint32_t i = 0; i < bindingCount; i++) {
bindings->vb[startBinding + i].buffer = (struct anv_buffer *) pBuffers[i];
bindings->vb[startBinding + i].offset = pOffsets[i];
cmd_buffer->vb_dirty |= 1 << (startBinding + i);
}
}
static void
flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_pipeline_layout *layout = cmd_buffer->pipeline->layout;
struct anv_bindings *bindings = cmd_buffer->bindings;
uint32_t layers = cmd_buffer->framebuffer->layers;
for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
uint32_t bias;
if (s == VK_SHADER_STAGE_FRAGMENT) {
bias = MAX_RTS;
layers = cmd_buffer->framebuffer->layers;
} else {
bias = 0;
layers = 0;
}
/* This is a little awkward: layout can be NULL but we still have to
* allocate and set a binding table for the PS stage for render
* targets. */
uint32_t surface_count = layout ? layout->stage[s].surface_count : 0;
if (layers + surface_count > 0) {
struct anv_state state;
uint32_t size;
size = (bias + surface_count) * sizeof(uint32_t);
state = anv_state_stream_alloc(&cmd_buffer->binding_table_state_stream,
size, 32);
memcpy(state.map, bindings->descriptors[s].surfaces, size);
for (uint32_t i = 0; i < layers; i++)
anv_reloc_list_add(&cmd_buffer->batch.surf_relocs,
bindings->descriptors[s].surfaces[i] + 8 * sizeof(int32_t),
bindings->descriptors[s].relocs[i].bo,
bindings->descriptors[s].relocs[i].offset);
for (uint32_t i = 0; i < surface_count; i++)
anv_reloc_list_add(&cmd_buffer->batch.surf_relocs,
bindings->descriptors[s].surfaces[bias + i] + 8 * sizeof(int32_t),
bindings->descriptors[s].relocs[bias + i].bo,
bindings->descriptors[s].relocs[bias + i].offset);
static const uint32_t binding_table_opcodes[] = {
[VK_SHADER_STAGE_VERTEX] = 38,
[VK_SHADER_STAGE_TESS_CONTROL] = 39,
[VK_SHADER_STAGE_TESS_EVALUATION] = 40,
[VK_SHADER_STAGE_GEOMETRY] = 41,
[VK_SHADER_STAGE_FRAGMENT] = 42,
[VK_SHADER_STAGE_COMPUTE] = 0,
};
anv_batch_emit(&cmd_buffer->batch,
GEN8_3DSTATE_BINDING_TABLE_POINTERS_VS,
._3DCommandSubOpcode = binding_table_opcodes[s],
.PointertoVSBindingTable = state.offset);
}
if (layout && layout->stage[s].sampler_count > 0) {
struct anv_state state;
size_t size;
size = layout->stage[s].sampler_count * 16;
state = anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream, size, 32);
memcpy(state.map, bindings->descriptors[s].samplers, size);
static const uint32_t sampler_state_opcodes[] = {
[VK_SHADER_STAGE_VERTEX] = 43,
[VK_SHADER_STAGE_TESS_CONTROL] = 44, /* HS */
[VK_SHADER_STAGE_TESS_EVALUATION] = 45, /* DS */
[VK_SHADER_STAGE_GEOMETRY] = 46,
[VK_SHADER_STAGE_FRAGMENT] = 47,
[VK_SHADER_STAGE_COMPUTE] = 0,
};
anv_batch_emit(&cmd_buffer->batch,
GEN8_3DSTATE_SAMPLER_STATE_POINTERS_VS,
._3DCommandSubOpcode = sampler_state_opcodes[s],
.PointertoVSSamplerState = state.offset);
}
}
}
static void
anv_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_pipeline *pipeline = cmd_buffer->pipeline;
struct anv_bindings *bindings = cmd_buffer->bindings;
const uint32_t num_buffers = __builtin_popcount(cmd_buffer->vb_dirty);
const uint32_t num_dwords = 1 + num_buffers * 4;
uint32_t *p;
if (cmd_buffer->vb_dirty) {
p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
GEN8_3DSTATE_VERTEX_BUFFERS);
uint32_t vb, i = 0;
for_each_bit(vb, cmd_buffer->vb_dirty) {
struct anv_buffer *buffer = bindings->vb[vb].buffer;
uint32_t offset = bindings->vb[vb].offset;
struct GEN8_VERTEX_BUFFER_STATE state = {
.VertexBufferIndex = vb,
.MemoryObjectControlState = 0,
.AddressModifyEnable = true,
.BufferPitch = pipeline->binding_stride[vb],
.BufferStartingAddress = { buffer->bo, buffer->offset + offset },
.BufferSize = buffer->size - offset
};
GEN8_VERTEX_BUFFER_STATE_pack(&cmd_buffer->batch, &p[1 + i * 4], &state);
i++;
}
}
if (cmd_buffer->dirty & ANV_CMD_BUFFER_PIPELINE_DIRTY)
anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
if (cmd_buffer->dirty & ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY)
flush_descriptor_sets(cmd_buffer);
if (cmd_buffer->dirty & (ANV_CMD_BUFFER_PIPELINE_DIRTY | ANV_CMD_BUFFER_RS_DIRTY))
anv_batch_emit_merge(&cmd_buffer->batch,
cmd_buffer->rs_state->state_sf, pipeline->state_sf);
cmd_buffer->vb_dirty = 0;
cmd_buffer->dirty = 0;
}
void anv_CmdDraw(
VkCmdBuffer cmdBuffer,
uint32_t firstVertex,
uint32_t vertexCount,
uint32_t firstInstance,
uint32_t instanceCount)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
anv_cmd_buffer_flush_state(cmd_buffer);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
.VertexAccessType = SEQUENTIAL,
.VertexCountPerInstance = vertexCount,
.StartVertexLocation = firstVertex,
.InstanceCount = instanceCount,
.StartInstanceLocation = firstInstance,
.BaseVertexLocation = 0);
}
void anv_CmdDrawIndexed(
VkCmdBuffer cmdBuffer,
uint32_t firstIndex,
uint32_t indexCount,
int32_t vertexOffset,
uint32_t firstInstance,
uint32_t instanceCount)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
anv_cmd_buffer_flush_state(cmd_buffer);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
.VertexAccessType = RANDOM,
.VertexCountPerInstance = indexCount,
.StartVertexLocation = firstIndex,
.InstanceCount = instanceCount,
.StartInstanceLocation = firstInstance,
.BaseVertexLocation = 0);
}
static void
anv_batch_lrm(struct anv_batch *batch,
uint32_t reg, struct anv_bo *bo, uint32_t offset)
{
anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_MEM,
.RegisterAddress = reg,
.MemoryAddress = { bo, offset });
}
static void
anv_batch_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
{
anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_IMM,
.RegisterOffset = reg,
.DataDWord = imm);
}
/* Auto-Draw / Indirect Registers */
#define GEN7_3DPRIM_END_OFFSET 0x2420
#define GEN7_3DPRIM_START_VERTEX 0x2430
#define GEN7_3DPRIM_VERTEX_COUNT 0x2434
#define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
#define GEN7_3DPRIM_START_INSTANCE 0x243C
#define GEN7_3DPRIM_BASE_VERTEX 0x2440
void anv_CmdDrawIndirect(
VkCmdBuffer cmdBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
uint32_t count,
uint32_t stride)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
struct anv_bo *bo = buffer->bo;
uint32_t bo_offset = buffer->offset + offset;
anv_cmd_buffer_flush_state(cmd_buffer);
anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
anv_batch_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
.IndirectParameterEnable = true,
.VertexAccessType = SEQUENTIAL);
}
void anv_CmdDrawIndexedIndirect(
VkCmdBuffer cmdBuffer,
VkBuffer _buffer,
VkDeviceSize offset,
uint32_t count,
uint32_t stride)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
struct anv_bo *bo = buffer->bo;
uint32_t bo_offset = buffer->offset + offset;
anv_cmd_buffer_flush_state(cmd_buffer);
anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
.IndirectParameterEnable = true,
.VertexAccessType = RANDOM);
}
void anv_CmdDispatch(
VkCmdBuffer cmdBuffer,
uint32_t x,
uint32_t y,
uint32_t z)
{
stub();
}
void anv_CmdDispatchIndirect(
VkCmdBuffer cmdBuffer,
VkBuffer buffer,
VkDeviceSize offset)
{
stub();
}
void anv_CmdSetEvent(
VkCmdBuffer cmdBuffer,
VkEvent event,
VkPipeEvent pipeEvent)
{
stub();
}
void anv_CmdResetEvent(
VkCmdBuffer cmdBuffer,
VkEvent event,
VkPipeEvent pipeEvent)
{
stub();
}
void anv_CmdWaitEvents(
VkCmdBuffer cmdBuffer,
VkWaitEvent waitEvent,
uint32_t eventCount,
const VkEvent* pEvents,
uint32_t memBarrierCount,
const void** ppMemBarriers)
{
stub();
}
void anv_CmdPipelineBarrier(
VkCmdBuffer cmdBuffer,
VkWaitEvent waitEvent,
uint32_t pipeEventCount,
const VkPipeEvent* pPipeEvents,
uint32_t memBarrierCount,
const void** ppMemBarriers)
{
stub();
}
static void
anv_batch_emit_ps_depth_count(struct anv_batch *batch,
struct anv_bo *bo, uint32_t offset)
{
anv_batch_emit(batch, GEN8_PIPE_CONTROL,
.DestinationAddressType = DAT_PPGTT,
.PostSyncOperation = WritePSDepthCount,
.Address = { bo, offset }); /* FIXME: This is only lower 32 bits */
}
void anv_CmdBeginQuery(
VkCmdBuffer cmdBuffer,
VkQueryPool queryPool,
uint32_t slot,
VkQueryControlFlags flags)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
struct anv_query_pool *pool = (struct anv_query_pool *) queryPool;
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
anv_batch_emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
slot * sizeof(struct anv_query_pool_slot));
break;
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
default:
unreachable("");
}
}
void anv_CmdEndQuery(
VkCmdBuffer cmdBuffer,
VkQueryPool queryPool,
uint32_t slot)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
struct anv_query_pool *pool = (struct anv_query_pool *) queryPool;
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
anv_batch_emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
slot * sizeof(struct anv_query_pool_slot) + 8);
break;
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
default:
unreachable("");
}
}
void anv_CmdResetQueryPool(
VkCmdBuffer cmdBuffer,
VkQueryPool queryPool,
uint32_t startQuery,
uint32_t queryCount)
{
stub();
}
#define TIMESTAMP 0x2358
void anv_CmdWriteTimestamp(
VkCmdBuffer cmdBuffer,
VkTimestampType timestampType,
VkBuffer destBuffer,
VkDeviceSize destOffset)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
struct anv_buffer *buffer = (struct anv_buffer *) destBuffer;
struct anv_bo *bo = buffer->bo;
switch (timestampType) {
case VK_TIMESTAMP_TYPE_TOP:
anv_batch_emit(&cmd_buffer->batch, GEN8_MI_STORE_REGISTER_MEM,
.RegisterAddress = TIMESTAMP,
.MemoryAddress = { bo, buffer->offset + destOffset });
anv_batch_emit(&cmd_buffer->batch, GEN8_MI_STORE_REGISTER_MEM,
.RegisterAddress = TIMESTAMP + 4,
.MemoryAddress = { bo, buffer->offset + destOffset + 4 });
break;
case VK_TIMESTAMP_TYPE_BOTTOM:
anv_batch_emit(&cmd_buffer->batch, GEN8_PIPE_CONTROL,
.DestinationAddressType = DAT_PPGTT,
.PostSyncOperation = WriteTimestamp,
.Address = /* FIXME: This is only lower 32 bits */
{ bo, buffer->offset + destOffset });
break;
default:
break;
}
}
#define alu_opcode(v) __gen_field((v), 20, 31)
#define alu_operand1(v) __gen_field((v), 10, 19)
#define alu_operand2(v) __gen_field((v), 0, 9)
#define alu(opcode, operand1, operand2) \
alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
#define OPCODE_NOOP 0x000
#define OPCODE_LOAD 0x080
#define OPCODE_LOADINV 0x480
#define OPCODE_LOAD0 0x081
#define OPCODE_LOAD1 0x481
#define OPCODE_ADD 0x100
#define OPCODE_SUB 0x101
#define OPCODE_AND 0x102
#define OPCODE_OR 0x103
#define OPCODE_XOR 0x104
#define OPCODE_STORE 0x180
#define OPCODE_STOREINV 0x580
#define OPERAND_R0 0x00
#define OPERAND_R1 0x01
#define OPERAND_R2 0x02
#define OPERAND_R3 0x03
#define OPERAND_R4 0x04
#define OPERAND_SRCA 0x20
#define OPERAND_SRCB 0x21
#define OPERAND_ACCU 0x31
#define OPERAND_ZF 0x32
#define OPERAND_CF 0x33
#define CS_GPR(n) (0x2600 + (n) * 8)
static void
emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
struct anv_bo *bo, uint32_t offset)
{
anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_MEM,
.RegisterAddress = reg,
.MemoryAddress = { bo, offset });
anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_MEM,
.RegisterAddress = reg + 4,
.MemoryAddress = { bo, offset + 4 });
}
void anv_CmdCopyQueryPoolResults(
VkCmdBuffer cmdBuffer,
VkQueryPool queryPool,
uint32_t startQuery,
uint32_t queryCount,
VkBuffer destBuffer,
VkDeviceSize destOffset,
VkDeviceSize destStride,
VkQueryResultFlags flags)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
struct anv_query_pool *pool = (struct anv_query_pool *) queryPool;
struct anv_buffer *buffer = (struct anv_buffer *) destBuffer;
uint32_t slot_offset, dst_offset;
if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
/* Where is the availabilty info supposed to go? */
anv_finishme("VK_QUERY_RESULT_WITH_AVAILABILITY_BIT");
return;
}
assert(pool->type == VK_QUERY_TYPE_OCCLUSION);
/* FIXME: If we're not waiting, should we just do this on the CPU? */
if (flags & VK_QUERY_RESULT_WAIT_BIT)
anv_batch_emit(&cmd_buffer->batch, GEN8_PIPE_CONTROL,
.CommandStreamerStallEnable = true);
dst_offset = buffer->offset + destOffset;
for (uint32_t i = 0; i < queryCount; i++) {
slot_offset = (startQuery + i) * sizeof(struct anv_query_pool_slot);
emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0), &pool->bo, slot_offset);
emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(1), &pool->bo, slot_offset + 8);
/* FIXME: We need to clamp the result for 32 bit. */
uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GEN8_MI_MATH);
dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
dw[3] = alu(OPCODE_SUB, 0, 0);
dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
anv_batch_emit(&cmd_buffer->batch, GEN8_MI_STORE_REGISTER_MEM,
.RegisterAddress = CS_GPR(2),
/* FIXME: This is only lower 32 bits */
.MemoryAddress = { buffer->bo, dst_offset });
if (flags & VK_QUERY_RESULT_64_BIT)
anv_batch_emit(&cmd_buffer->batch, GEN8_MI_STORE_REGISTER_MEM,
.RegisterAddress = CS_GPR(2) + 4,
/* FIXME: This is only lower 32 bits */
.MemoryAddress = { buffer->bo, dst_offset + 4 });
dst_offset += destStride;
}
}
void anv_CmdInitAtomicCounters(
VkCmdBuffer cmdBuffer,
VkPipelineBindPoint pipelineBindPoint,
uint32_t startCounter,
uint32_t counterCount,
const uint32_t* pData)
{
stub();
}
void anv_CmdLoadAtomicCounters(
VkCmdBuffer cmdBuffer,
VkPipelineBindPoint pipelineBindPoint,
uint32_t startCounter,
uint32_t counterCount,
VkBuffer srcBuffer,
VkDeviceSize srcOffset)
{
stub();
}
void anv_CmdSaveAtomicCounters(
VkCmdBuffer cmdBuffer,
VkPipelineBindPoint pipelineBindPoint,
uint32_t startCounter,
uint32_t counterCount,
VkBuffer destBuffer,
VkDeviceSize destOffset)
{
stub();
}
VkResult anv_CreateFramebuffer(
VkDevice _device,
const VkFramebufferCreateInfo* pCreateInfo,
VkFramebuffer* pFramebuffer)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_framebuffer *framebuffer;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
framebuffer = anv_device_alloc(device, sizeof(*framebuffer), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (framebuffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
framebuffer->color_attachment_count = pCreateInfo->colorAttachmentCount;
for (uint32_t i = 0; i < pCreateInfo->colorAttachmentCount; i++) {
framebuffer->color_attachments[i] =
(struct anv_surface_view *) pCreateInfo->pColorAttachments[i].view;
}
if (pCreateInfo->pDepthStencilAttachment) {
framebuffer->depth_stencil =
(struct anv_depth_stencil_view *) pCreateInfo->pDepthStencilAttachment->view;
}
framebuffer->sample_count = pCreateInfo->sampleCount;
framebuffer->width = pCreateInfo->width;
framebuffer->height = pCreateInfo->height;
framebuffer->layers = pCreateInfo->layers;
vkCreateDynamicViewportState((VkDevice) device,
&(VkDynamicVpStateCreateInfo) {
.sType = VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO,
.viewportAndScissorCount = 2,
.pViewports = (VkViewport[]) {
{
.originX = 0,
.originY = 0,
.width = pCreateInfo->width,
.height = pCreateInfo->height,
.minDepth = 0,
.maxDepth = 1
},
},
.pScissors = (VkRect[]) {
{ { 0, 0 },
{ pCreateInfo->width, pCreateInfo->height } },
}
},
&framebuffer->vp_state);
*pFramebuffer = (VkFramebuffer) framebuffer;
return VK_SUCCESS;
}
VkResult anv_CreateRenderPass(
VkDevice _device,
const VkRenderPassCreateInfo* pCreateInfo,
VkRenderPass* pRenderPass)
{
struct anv_device *device = (struct anv_device *) _device;
struct anv_render_pass *pass;
size_t size;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
size = sizeof(*pass) +
pCreateInfo->layers * sizeof(struct anv_render_pass_layer);
pass = anv_device_alloc(device, size, 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (pass == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
pass->render_area = pCreateInfo->renderArea;
pass->num_layers = pCreateInfo->layers;
pass->num_clear_layers = 0;
for (uint32_t i = 0; i < pCreateInfo->layers; i++) {
pass->layers[i].color_load_op = pCreateInfo->pColorLoadOps[i];
pass->layers[i].clear_color = pCreateInfo->pColorLoadClearValues[i];
if (pass->layers[i].color_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
pass->num_clear_layers++;
}
*pRenderPass = (VkRenderPass) pass;
return VK_SUCCESS;
}
void
anv_cmd_buffer_fill_render_targets(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_framebuffer *framebuffer = cmd_buffer->framebuffer;
struct anv_bindings *bindings = cmd_buffer->bindings;
for (uint32_t i = 0; i < framebuffer->color_attachment_count; i++) {
struct anv_surface_view *view = framebuffer->color_attachments[i];
bindings->descriptors[VK_SHADER_STAGE_FRAGMENT].surfaces[i] = view->surface_state.offset;
bindings->descriptors[VK_SHADER_STAGE_FRAGMENT].relocs[i].bo = view->bo;
bindings->descriptors[VK_SHADER_STAGE_FRAGMENT].relocs[i].offset = view->offset;
}
cmd_buffer->dirty |= ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY;
}
void anv_CmdBeginRenderPass(
VkCmdBuffer cmdBuffer,
const VkRenderPassBegin* pRenderPassBegin)
{
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
struct anv_render_pass *pass = (struct anv_render_pass *) pRenderPassBegin->renderPass;
struct anv_framebuffer *framebuffer =
(struct anv_framebuffer *) pRenderPassBegin->framebuffer;
cmd_buffer->framebuffer = framebuffer;
anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DRAWING_RECTANGLE,
.ClippedDrawingRectangleYMin = pass->render_area.offset.y,
.ClippedDrawingRectangleXMin = pass->render_area.offset.x,
.ClippedDrawingRectangleYMax =
pass->render_area.offset.y + pass->render_area.extent.height - 1,
.ClippedDrawingRectangleXMax =
pass->render_area.offset.x + pass->render_area.extent.width - 1,
.DrawingRectangleOriginY = 0,
.DrawingRectangleOriginX = 0);
anv_cmd_buffer_fill_render_targets(cmd_buffer);
anv_cmd_buffer_clear(cmd_buffer, pass);
}
void anv_CmdEndRenderPass(
VkCmdBuffer cmdBuffer,
VkRenderPass renderPass)
{
/* Emit a flushing pipe control at the end of a pass. This is kind of a
* hack but it ensures that render targets always actually get written.
* Eventually, we should do flushing based on image format transitions
* or something of that nature.
*/
struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *)cmdBuffer;
anv_batch_emit(&cmd_buffer->batch, GEN8_PIPE_CONTROL,
.PostSyncOperation = NoWrite,
.RenderTargetCacheFlushEnable = true,
.InstructionCacheInvalidateEnable = true,
.DepthCacheFlushEnable = true,
.VFCacheInvalidationEnable = true,
.TextureCacheInvalidationEnable = true,
.CommandStreamerStallEnable = true);
stub();
}