blob: fcc27da1f83fae4d8a9123f6397172d48d39341b [file] [log] [blame]
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
Emil Velikov83548e12016-11-24 20:30:39 +000024#include <dlfcn.h>
Kristian Høgsberg769785c2015-05-08 22:32:37 -070025#include <assert.h>
26#include <stdbool.h>
27#include <string.h>
Jason Ekstrand920f34a2016-11-07 17:24:24 -080028#include <sys/mman.h>
Emil Velikov83548e12016-11-24 20:30:39 +000029#include <sys/stat.h>
Kristian Høgsberg769785c2015-05-08 22:32:37 -070030#include <unistd.h>
31#include <fcntl.h>
32
Chad Versace2c2233e2015-07-17 15:04:27 -070033#include "anv_private.h"
Jason Ekstrand6a7ca4e2015-08-14 17:25:04 -070034#include "util/strtod.h"
Jason Ekstrande45748b2016-01-20 11:16:44 -080035#include "util/debug.h"
Kristian Høgsberg769785c2015-05-08 22:32:37 -070036
Jason Ekstrandf6d95872016-02-18 10:19:02 -080037#include "genxml/gen7_pack.h"
Jason Ekstrandde54b4b2015-11-16 12:29:07 -080038
Jason Ekstranda95f51c2015-09-24 14:20:35 -070039struct anv_dispatch_table dtable;
40
Jason Ekstranda71e6142015-10-19 22:06:59 -070041static void
42compiler_debug_log(void *data, const char *fmt, ...)
43{ }
44
45static void
46compiler_perf_log(void *data, const char *fmt, ...)
47{
48 va_list args;
49 va_start(args, fmt);
50
51 if (unlikely(INTEL_DEBUG & DEBUG_PERF))
52 vfprintf(stderr, fmt, args);
53
54 va_end(args);
55}
56
Emil Velikov83548e12016-11-24 20:30:39 +000057static bool
58anv_get_function_timestamp(void *ptr, uint32_t* timestamp)
59{
60 Dl_info info;
61 struct stat st;
62 if (!dladdr(ptr, &info) || !info.dli_fname)
63 return false;
64
65 if (stat(info.dli_fname, &st))
66 return false;
67
68 *timestamp = st.st_mtim.tv_sec;
69 return true;
70}
71
72static bool
Emil Velikovde138e92016-11-24 20:30:38 +000073anv_device_get_cache_uuid(void *uuid)
74{
Emil Velikov83548e12016-11-24 20:30:39 +000075 uint32_t timestamp;
76
Emil Velikovde138e92016-11-24 20:30:38 +000077 memset(uuid, 0, VK_UUID_SIZE);
Kenneth Graunke15d3fc12016-11-28 13:37:44 -080078 if (!anv_get_function_timestamp(anv_device_get_cache_uuid, &timestamp))
79 return false;
Emil Velikov83548e12016-11-24 20:30:39 +000080
81 snprintf(uuid, VK_UUID_SIZE, "anv-%d", timestamp);
82 return true;
Emil Velikovde138e92016-11-24 20:30:38 +000083}
84
Kristian Høgsberg769785c2015-05-08 22:32:37 -070085static VkResult
Chad Versace4422bd42015-07-09 16:22:18 -070086anv_physical_device_init(struct anv_physical_device *device,
87 struct anv_instance *instance,
88 const char *path)
Kristian Høgsberg769785c2015-05-08 22:32:37 -070089{
Kristian Høgsberg Kristensenc4b30e72015-08-26 04:03:38 -070090 VkResult result;
Kristian Høgsberg Kristensen9564dd32015-07-21 13:09:25 -070091 int fd;
92
93 fd = open(path, O_RDWR | O_CLOEXEC);
94 if (fd < 0)
Jason Ekstrand34ff4fb2016-08-22 18:10:14 -070095 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
Kristian Høgsberg769785c2015-05-08 22:32:37 -070096
Jason Ekstrand39cd3782015-09-24 13:51:40 -070097 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
Kristian Høgsberg769785c2015-05-08 22:32:37 -070098 device->instance = instance;
Jason Ekstrande023c102016-05-24 11:02:18 -070099
100 assert(strlen(path) < ARRAY_SIZE(device->path));
101 strncpy(device->path, path, ARRAY_SIZE(device->path));
Chad Versacef9c948e2015-10-07 11:36:51 -0700102
Kristian Høgsberg Kristensenaac6f7c2015-08-14 09:39:01 -0700103 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
Kristian Høgsberg Kristensenc4b30e72015-08-26 04:03:38 -0700104 if (!device->chipset_id) {
Jason Ekstrand34ff4fb2016-08-22 18:10:14 -0700105 result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700106 goto fail;
Kristian Høgsberg Kristensenc4b30e72015-08-26 04:03:38 -0700107 }
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700108
Jason Ekstrand979d0ac2016-08-25 16:22:58 -0700109 device->name = gen_get_device_name(device->chipset_id);
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300110 if (!gen_get_device_info(device->chipset_id, &device->info)) {
Jason Ekstrand34ff4fb2016-08-22 18:10:14 -0700111 result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700112 goto fail;
Kristian Høgsberg Kristensenc4b30e72015-08-26 04:03:38 -0700113 }
Jason Ekstrand584f9d42015-11-02 12:14:37 -0800114
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300115 if (device->info.is_haswell) {
Jason Ekstrandf0390bc2015-11-17 07:07:02 -0800116 fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n");
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300117 } else if (device->info.gen == 7 && !device->info.is_baytrail) {
Jason Ekstrand862da6a2015-11-09 12:18:12 -0800118 fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300119 } else if (device->info.gen == 7 && device->info.is_baytrail) {
Kristian Høgsbergdac57752015-12-01 15:39:30 -0800120 fprintf(stderr, "WARNING: Bay Trail Vulkan support is incomplete\n");
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300121 } else if (device->info.gen >= 8) {
Kristian Høgsberg Kristensen7c5e1fd2016-01-08 22:24:58 -0800122 /* Broadwell, Cherryview, Skylake, Broxton, Kabylake is as fully
123 * supported as anything */
Jason Ekstrand584f9d42015-11-02 12:14:37 -0800124 } else {
Jason Ekstrandfed35862015-12-02 16:14:58 -0800125 result = vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
Jason Ekstrand584f9d42015-11-02 12:14:37 -0800126 "Vulkan not yet supported on %s", device->name);
127 goto fail;
128 }
129
Jordan Justen1a3adae2016-03-28 14:45:24 -0700130 device->cmd_parser_version = -1;
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300131 if (device->info.gen == 7) {
Jordan Justen1a3adae2016-03-28 14:45:24 -0700132 device->cmd_parser_version =
133 anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
134 if (device->cmd_parser_version == -1) {
135 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
136 "failed to get command parser version");
137 goto fail;
138 }
139 }
140
Kristian Høgsberg Kristensenc4b30e72015-08-26 04:03:38 -0700141 if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
Chad Versacef9c948e2015-10-07 11:36:51 -0700142 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
143 "failed to get aperture size: %m");
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700144 goto fail;
Kristian Høgsberg Kristensenc4b30e72015-08-26 04:03:38 -0700145 }
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700146
Kristian Høgsberg Kristensenc4b30e72015-08-26 04:03:38 -0700147 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
Chad Versacef9c948e2015-10-07 11:36:51 -0700148 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
149 "kernel missing gem wait");
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700150 goto fail;
Kristian Høgsberg Kristensenc4b30e72015-08-26 04:03:38 -0700151 }
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700152
Kristian Høgsberg Kristensenc4b30e72015-08-26 04:03:38 -0700153 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
Chad Versacef9c948e2015-10-07 11:36:51 -0700154 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
155 "kernel missing execbuf2");
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700156 goto fail;
Kristian Høgsberg Kristensenc4b30e72015-08-26 04:03:38 -0700157 }
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700158
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300159 if (!device->info.has_llc &&
Kristian Høgsberg Kristensen220ac932015-12-19 22:25:57 -0800160 anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
Kristian Høgsberg Kristensenbbb68752015-12-03 23:58:05 -0800161 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
162 "kernel missing wc mmap");
163 goto fail;
164 }
165
Emil Velikov83548e12016-11-24 20:30:39 +0000166 if (!anv_device_get_cache_uuid(device->uuid)) {
167 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
168 "cannot generate UUID");
169 goto fail;
170 }
Jason Ekstrand580b2e82016-01-05 13:53:05 -0800171 bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
172
Lionel Landwerlin09394ee2016-09-07 17:19:35 +0100173 /* GENs prior to 8 do not support EU/Subslice info */
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300174 if (device->info.gen >= 8) {
Lionel Landwerlin09394ee2016-09-07 17:19:35 +0100175 device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
176 device->eu_total = anv_gem_get_param(fd, I915_PARAM_EU_TOTAL);
177
178 /* Without this information, we cannot get the right Braswell
179 * brandstrings, and we have to use conservative numbers for GPGPU on
180 * many platforms, but otherwise, things will just work.
181 */
182 if (device->subslice_total < 1 || device->eu_total < 1) {
183 fprintf(stderr, "WARNING: Kernel 4.1 required to properly"
184 " query GPU properties.\n");
185 }
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300186 } else if (device->info.gen == 7) {
187 device->subslice_total = 1 << (device->info.gt - 1);
Lionel Landwerlin09394ee2016-09-07 17:19:35 +0100188 }
189
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300190 if (device->info.is_cherryview &&
Lionel Landwerlin09394ee2016-09-07 17:19:35 +0100191 device->subslice_total > 0 && device->eu_total > 0) {
192 /* Logical CS threads = EUs per subslice * 7 threads per EU */
Lionel Landwerlin6b217282016-09-23 01:04:25 +0300193 uint32_t max_cs_threads = device->eu_total / device->subslice_total * 7;
Lionel Landwerlin09394ee2016-09-07 17:19:35 +0100194
195 /* Fuse configurations may give more threads than expected, never less. */
Lionel Landwerlin6b217282016-09-23 01:04:25 +0300196 if (max_cs_threads > device->info.max_cs_threads)
197 device->info.max_cs_threads = max_cs_threads;
Lionel Landwerlin09394ee2016-09-07 17:19:35 +0100198 }
199
Jason Ekstranda71e6142015-10-19 22:06:59 -0700200 brw_process_intel_debug_variable();
201
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300202 device->compiler = brw_compiler_create(NULL, &device->info);
Jason Ekstrand6fb44692015-10-19 20:21:45 -0700203 if (device->compiler == NULL) {
204 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
205 goto fail;
206 }
Jason Ekstranda71e6142015-10-19 22:06:59 -0700207 device->compiler->shader_debug_log = compiler_debug_log;
208 device->compiler->shader_perf_log = compiler_perf_log;
Jason Ekstrand6fb44692015-10-19 20:21:45 -0700209
Emil Velikovace54032016-05-28 20:03:34 +0100210 result = anv_init_wsi(device);
Emil Velikova1cf4942016-11-24 20:30:43 +0000211 if (result != VK_SUCCESS) {
212 ralloc_free(device->compiler);
213 goto fail;
214 }
Jason Ekstrandeb6baa32016-05-15 22:21:24 -0700215
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300216 isl_device_init(&device->isl_dev, &device->info, swizzled);
Chad Versaceaf392912015-11-13 10:12:51 -0800217
Emil Velikov3af81712016-11-24 20:30:42 +0000218 close(fd);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700219 return VK_SUCCESS;
Chad Versace477383e2015-11-13 10:12:18 -0800220
Chad Versace8cda3e92015-07-09 16:31:39 -0700221fail:
Kristian Høgsberg Kristensen9564dd32015-07-21 13:09:25 -0700222 close(fd);
Kristian Høgsberg Kristensenc4b30e72015-08-26 04:03:38 -0700223 return result;
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700224}
225
Jason Ekstrand6fb44692015-10-19 20:21:45 -0700226static void
227anv_physical_device_finish(struct anv_physical_device *device)
228{
Jason Ekstrandeb6baa32016-05-15 22:21:24 -0700229 anv_finish_wsi(device);
Jason Ekstrand6fb44692015-10-19 20:21:45 -0700230 ralloc_free(device->compiler);
231}
232
Jason Ekstrandb5f68892015-09-17 11:19:16 -0700233static const VkExtensionProperties global_extensions[] = {
234 {
Jason Ekstrandd6664872015-12-02 16:28:36 -0800235 .extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
Jason Ekstrandc688e4d2016-01-28 15:34:22 -0800236 .specVersion = 25,
Jason Ekstrandb5f68892015-09-17 11:19:16 -0700237 },
Emil Velikov6dc169e2016-04-20 19:15:18 +0100238#ifdef VK_USE_PLATFORM_XCB_KHR
Jason Ekstrandd6664872015-12-02 16:28:36 -0800239 {
240 .extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
Emil Velikovf373a912016-11-09 18:10:46 +0000241 .specVersion = 6,
Jason Ekstrandd6664872015-12-02 16:28:36 -0800242 },
Emil Velikov6dc169e2016-04-20 19:15:18 +0100243#endif
Kevin Strasser71258e92016-08-12 14:17:20 -0700244#ifdef VK_USE_PLATFORM_XLIB_KHR
245 {
246 .extensionName = VK_KHR_XLIB_SURFACE_EXTENSION_NAME,
Emil Velikovf373a912016-11-09 18:10:46 +0000247 .specVersion = 6,
Kevin Strasser71258e92016-08-12 14:17:20 -0700248 },
249#endif
Emil Velikovcbc48372016-04-20 19:01:00 +0100250#ifdef VK_USE_PLATFORM_WAYLAND_KHR
Jason Ekstrandd6664872015-12-02 16:28:36 -0800251 {
252 .extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
Emil Velikovf373a912016-11-09 18:10:46 +0000253 .specVersion = 5,
Jason Ekstrandd6664872015-12-02 16:28:36 -0800254 },
255#endif
Jason Ekstrandb5f68892015-09-17 11:19:16 -0700256};
257
258static const VkExtensionProperties device_extensions[] = {
259 {
Jason Ekstrandd6664872015-12-02 16:28:36 -0800260 .extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
Emil Velikovf373a912016-11-09 18:10:46 +0000261 .specVersion = 68,
Jason Ekstrandb5f68892015-09-17 11:19:16 -0700262 },
263};
264
Jason Ekstrandfcfb4042015-12-02 03:28:27 -0800265static void *
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300266default_alloc_func(void *pUserData, size_t size, size_t align,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -0800267 VkSystemAllocationScope allocationScope)
268{
269 return malloc(size);
270}
271
272static void *
273default_realloc_func(void *pUserData, void *pOriginal, size_t size,
274 size_t align, VkSystemAllocationScope allocationScope)
275{
276 return realloc(pOriginal, size);
277}
278
279static void
280default_free_func(void *pUserData, void *pMemory)
281{
282 free(pMemory);
283}
284
285static const VkAllocationCallbacks default_alloc = {
286 .pUserData = NULL,
287 .pfnAllocation = default_alloc_func,
288 .pfnReallocation = default_realloc_func,
289 .pfnFree = default_free_func,
290};
291
Kristian Høgsberg454345d2015-05-17 16:33:48 -0700292VkResult anv_CreateInstance(
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700293 const VkInstanceCreateInfo* pCreateInfo,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -0800294 const VkAllocationCallbacks* pAllocator,
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700295 VkInstance* pInstance)
296{
297 struct anv_instance *instance;
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700298
299 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
300
Jason Ekstrandc32273d2016-02-25 08:52:35 -0800301 uint32_t client_version;
302 if (pCreateInfo->pApplicationInfo &&
303 pCreateInfo->pApplicationInfo->apiVersion != 0) {
304 client_version = pCreateInfo->pApplicationInfo->apiVersion;
305 } else {
306 client_version = VK_MAKE_VERSION(1, 0, 0);
307 }
308
Jason Ekstranda19ceee2016-01-28 15:43:44 -0800309 if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
Jason Ekstrand204d9372016-03-22 16:17:09 -0700310 client_version > VK_MAKE_VERSION(1, 0, 0xfff)) {
Jason Ekstrandfd99f3d2016-02-02 13:15:18 -0800311 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
312 "Client requested version %d.%d.%d",
313 VK_VERSION_MAJOR(client_version),
314 VK_VERSION_MINOR(client_version),
315 VK_VERSION_PATCH(client_version));
Jason Ekstrand608b4112016-01-28 08:18:50 -0800316 }
Jason Ekstrande21ecb82015-10-12 18:25:19 -0700317
Jason Ekstrandaab95172016-01-14 07:41:45 -0800318 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Jason Ekstrandb5f68892015-09-17 11:19:16 -0700319 bool found = false;
320 for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
321 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
Jason Ekstrandaadb7dc2015-11-30 21:10:14 -0800322 global_extensions[j].extensionName) == 0) {
Jason Ekstrandb5f68892015-09-17 11:19:16 -0700323 found = true;
324 break;
325 }
326 }
327 if (!found)
Chad Versacef9c948e2015-10-07 11:36:51 -0700328 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
Jason Ekstrandb5f68892015-09-17 11:19:16 -0700329 }
330
Dave Airlie1ae6ece2016-10-14 13:31:35 +1000331 instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
Jason Ekstrand45d17fc2016-01-18 14:04:13 -0800332 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700333 if (!instance)
334 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
335
Jason Ekstrand39cd3782015-09-24 13:51:40 -0700336 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
Jason Ekstrandfcfb4042015-12-02 03:28:27 -0800337
338 if (pAllocator)
339 instance->alloc = *pAllocator;
340 else
341 instance->alloc = default_alloc;
342
Philipp Zabelecd1d942016-02-16 22:55:33 +0100343 instance->apiVersion = client_version;
Jason Ekstrand584f9d42015-11-02 12:14:37 -0800344 instance->physicalDeviceCount = -1;
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700345
Jason Ekstrand6a7ca4e2015-08-14 17:25:04 -0700346 _mesa_locale_init();
347
Jason Ekstrand930598a2015-07-31 10:18:00 -0700348 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
349
Jason Ekstrand098209e2015-07-09 18:41:27 -0700350 *pInstance = anv_instance_to_handle(instance);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700351
352 return VK_SUCCESS;
353}
354
Jason Ekstrand05a26a62015-10-05 20:50:51 -0700355void anv_DestroyInstance(
Jason Ekstrandfcfb4042015-12-02 03:28:27 -0800356 VkInstance _instance,
357 const VkAllocationCallbacks* pAllocator)
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700358{
Jason Ekstrand73f91872015-07-09 18:41:27 -0700359 ANV_FROM_HANDLE(anv_instance, instance, _instance);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700360
Chad Versace0ab926d2015-10-21 11:36:39 -0700361 if (instance->physicalDeviceCount > 0) {
362 /* We support at most one physical device. */
363 assert(instance->physicalDeviceCount == 1);
364 anv_physical_device_finish(&instance->physicalDevice);
365 }
366
Jason Ekstrand930598a2015-07-31 10:18:00 -0700367 VG(VALGRIND_DESTROY_MEMPOOL(instance));
368
Jason Ekstrand6a7ca4e2015-08-14 17:25:04 -0700369 _mesa_locale_fini();
370
Dave Airlie1ae6ece2016-10-14 13:31:35 +1000371 vk_free(&instance->alloc, instance);
Jason Ekstrande40bdce2015-07-31 10:13:24 -0700372}
373
Kristian Høgsberg454345d2015-05-17 16:33:48 -0700374VkResult anv_EnumeratePhysicalDevices(
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700375 VkInstance _instance,
376 uint32_t* pPhysicalDeviceCount,
377 VkPhysicalDevice* pPhysicalDevices)
378{
Jason Ekstrand73f91872015-07-09 18:41:27 -0700379 ANV_FROM_HANDLE(anv_instance, instance, _instance);
Chad Versacefa915b62015-07-09 15:38:58 -0700380 VkResult result;
381
Jason Ekstrand584f9d42015-11-02 12:14:37 -0800382 if (instance->physicalDeviceCount < 0) {
Jason Ekstrandb93b5932016-05-24 12:06:35 -0700383 char path[20];
384 for (unsigned i = 0; i < 8; i++) {
385 snprintf(path, sizeof(path), "/dev/dri/renderD%d", 128 + i);
386 result = anv_physical_device_init(&instance->physicalDevice,
387 instance, path);
Jason Ekstranda5f8ff62016-11-01 17:51:56 -0700388 if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
Jason Ekstrandb93b5932016-05-24 12:06:35 -0700389 break;
390 }
391
Jason Ekstrandfed35862015-12-02 16:14:58 -0800392 if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
Jason Ekstrand584f9d42015-11-02 12:14:37 -0800393 instance->physicalDeviceCount = 0;
394 } else if (result == VK_SUCCESS) {
395 instance->physicalDeviceCount = 1;
396 } else {
Chad Versacefa915b62015-07-09 15:38:58 -0700397 return result;
Jason Ekstrand584f9d42015-11-02 12:14:37 -0800398 }
Chad Versacefa915b62015-07-09 15:38:58 -0700399 }
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700400
Chad Versace5b75dff2015-07-09 15:51:06 -0700401 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
402 * otherwise it's an inout parameter.
403 *
404 * The Vulkan spec (git aaed022) says:
405 *
406 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
407 * that is initialized with the number of devices the application is
408 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
409 * an array of at least this many VkPhysicalDevice handles [...].
410 *
411 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
412 * overwrites the contents of the variable pointed to by
413 * pPhysicalDeviceCount with the number of physical devices in in the
414 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
415 * pPhysicalDeviceCount with the number of physical handles written to
416 * pPhysicalDevices.
417 */
418 if (!pPhysicalDevices) {
419 *pPhysicalDeviceCount = instance->physicalDeviceCount;
420 } else if (*pPhysicalDeviceCount >= 1) {
Jason Ekstrand098209e2015-07-09 18:41:27 -0700421 pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
Chad Versace5b75dff2015-07-09 15:51:06 -0700422 *pPhysicalDeviceCount = 1;
Nicolas Kochfd27d5f2016-10-06 21:21:32 +0200423 } else if (*pPhysicalDeviceCount < instance->physicalDeviceCount) {
424 return VK_INCOMPLETE;
Chad Versace5b75dff2015-07-09 15:51:06 -0700425 } else {
426 *pPhysicalDeviceCount = 0;
427 }
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700428
429 return VK_SUCCESS;
430}
431
Jason Ekstrandf1a7c782015-11-30 12:21:19 -0800432void anv_GetPhysicalDeviceFeatures(
Jason Ekstrandf6d51f32015-07-09 13:54:08 -0700433 VkPhysicalDevice physicalDevice,
434 VkPhysicalDeviceFeatures* pFeatures)
435{
Kristian Høgsberg Kristensen4a2d17f2016-02-15 21:24:40 -0800436 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
Jason Ekstrandf6d51f32015-07-09 13:54:08 -0700437
438 *pFeatures = (VkPhysicalDeviceFeatures) {
Kristian Høgsberg Kristensendc5fdcd2016-02-01 11:54:40 -0800439 .robustBufferAccess = true,
Kristian Høgsberg Kristensen4a2d17f2016-02-15 21:24:40 -0800440 .fullDrawIndexUint32 = true,
Ilia Mirkina34f89c2016-11-27 14:41:42 -0500441 .imageCubeArray = true,
Jason Ekstrandf124f4a2016-07-14 18:01:29 -0700442 .independentBlend = true,
Jason Ekstrandf6d51f32015-07-09 13:54:08 -0700443 .geometryShader = true,
444 .tessellationShader = false,
Anuj Phogatc4cd0e82016-08-08 16:10:00 -0700445 .sampleRateShading = true,
Jason Ekstrandd6897452015-12-02 16:58:54 -0800446 .dualSrcBlend = true,
Jason Ekstrandf6d51f32015-07-09 13:54:08 -0700447 .logicOp = true,
Jason Ekstrand802f0022016-01-14 06:58:11 -0800448 .multiDrawIndirect = false,
449 .drawIndirectFirstInstance = false,
Jason Ekstrandeb6764c2016-06-14 08:40:49 -0700450 .depthClamp = true,
Jason Ekstrandf6d51f32015-07-09 13:54:08 -0700451 .depthBiasClamp = false,
452 .fillModeNonSolid = true,
453 .depthBounds = false,
454 .wideLines = true,
455 .largePoints = true,
Jason Ekstrandd6897452015-12-02 16:58:54 -0800456 .alphaToOne = true,
457 .multiViewport = true,
Lionel Landwerlin014bd4a2016-10-07 13:53:04 +0100458 .samplerAnisotropy = true,
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300459 .textureCompressionETC2 = pdevice->info.gen >= 8 ||
460 pdevice->info.is_baytrail,
461 .textureCompressionASTC_LDR = pdevice->info.gen >= 9, /* FINISHME CHV */
Jason Ekstrandf6d51f32015-07-09 13:54:08 -0700462 .textureCompressionBC = true,
Kristian Høgsberg Kristensen4a2d17f2016-02-15 21:24:40 -0800463 .occlusionQueryPrecise = true,
Kristian Høgsberg Kristensen9d8bae62016-02-29 10:55:39 -0800464 .pipelineStatisticsQuery = false,
Jason Ekstrandd6897452015-12-02 16:58:54 -0800465 .fragmentStoresAndAtomics = true,
466 .shaderTessellationAndGeometryPointSize = true,
Jason Ekstrand51865452016-05-12 10:56:58 -0700467 .shaderImageGatherExtended = false,
Ilia Mirkin76b97d52016-11-27 16:37:17 -0500468 .shaderStorageImageExtendedFormats = true,
Jason Ekstrandf6d51f32015-07-09 13:54:08 -0700469 .shaderStorageImageMultisample = false,
Jason Ekstrandf6d51f32015-07-09 13:54:08 -0700470 .shaderUniformBufferArrayDynamicIndexing = true,
Kristian Høgsberg Kristensen4a2d17f2016-02-15 21:24:40 -0800471 .shaderSampledImageArrayDynamicIndexing = true,
472 .shaderStorageBufferArrayDynamicIndexing = true,
473 .shaderStorageImageArrayDynamicIndexing = true,
Jason Ekstrandd6897452015-12-02 16:58:54 -0800474 .shaderStorageImageReadWithoutFormat = false,
475 .shaderStorageImageWriteWithoutFormat = true,
Kenneth Graunkea4d7a5b2016-10-03 20:44:38 -0700476 .shaderClipDistance = true,
477 .shaderCullDistance = true,
Jason Ekstrandf6d51f32015-07-09 13:54:08 -0700478 .shaderFloat64 = false,
479 .shaderInt64 = false,
Jason Ekstrandf6d51f32015-07-09 13:54:08 -0700480 .shaderInt16 = false,
Chad Versace545f5cc2015-10-07 10:05:02 -0700481 .alphaToOne = true,
Jason Ekstrandd6897452015-12-02 16:58:54 -0800482 .variableMultisampleRate = false,
Jason Ekstrand802f0022016-01-14 06:58:11 -0800483 .inheritedQueries = false,
Jason Ekstrandf6d51f32015-07-09 13:54:08 -0700484 };
Jason Ekstrand5ec4ecc2016-04-15 14:53:16 -0700485
486 /* We can't do image stores in vec4 shaders */
487 pFeatures->vertexPipelineStoresAndAtomics =
Jason Ekstrand93db8282016-04-15 16:39:17 -0700488 pdevice->compiler->scalar_stage[MESA_SHADER_VERTEX] &&
489 pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
Jason Ekstrandf6d51f32015-07-09 13:54:08 -0700490}
491
Jason Ekstrandf1a7c782015-11-30 12:21:19 -0800492void anv_GetPhysicalDeviceProperties(
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700493 VkPhysicalDevice physicalDevice,
Chad Versaced48e71c2015-10-07 10:36:46 -0700494 VkPhysicalDeviceProperties* pProperties)
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700495{
Chad Versaced48e71c2015-10-07 10:36:46 -0700496 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300497 const struct gen_device_info *devinfo = &pdevice->info;
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700498
Kristian Høgsberg Kristensendae800d2016-01-09 00:50:04 -0800499 const float time_stamp_base = devinfo->gen >= 9 ? 83.333 : 80.0;
500
Nanley Cherya5748cb2016-07-06 11:13:48 -0700501 /* See assertions made when programming the buffer surface state. */
502 const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
503 (1ul << 30) : (1ul << 27);
504
Jason Ekstrandd6897452015-12-02 16:58:54 -0800505 VkSampleCountFlags sample_counts =
Chad Versace1c5d7b32016-01-20 16:04:28 -0800506 isl_device_get_sample_counts(&pdevice->isl_dev);
Jason Ekstrandd6897452015-12-02 16:58:54 -0800507
Chad Versaced48e71c2015-10-07 10:36:46 -0700508 VkPhysicalDeviceLimits limits = {
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700509 .maxImageDimension1D = (1 << 14),
510 .maxImageDimension2D = (1 << 14),
Nanley Chery181b1422016-03-05 15:17:00 -0800511 .maxImageDimension3D = (1 << 11),
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700512 .maxImageDimensionCube = (1 << 14),
Nanley Chery181b1422016-03-05 15:17:00 -0800513 .maxImageArrayLayers = (1 << 11),
Kenneth Graunke38a3a532016-01-26 23:09:45 -0800514 .maxTexelBufferElements = 128 * 1024 * 1024,
Nanley Cherya5748cb2016-07-06 11:13:48 -0700515 .maxUniformBufferRange = (1ul << 27),
516 .maxStorageBufferRange = max_raw_buffer_sz,
Jason Ekstrand5446bf32015-08-26 15:01:38 -0700517 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700518 .maxMemoryAllocationCount = UINT32_MAX,
Kristian Høgsberg Kristensen7b7a7c22016-01-20 14:36:52 -0800519 .maxSamplerAllocationCount = 64 * 1024,
Jason Ekstrande5db2092015-07-14 17:10:37 -0700520 .bufferImageGranularity = 64, /* A cache line */
Chad Versace033a37f2015-10-07 09:57:51 -0700521 .sparseAddressSpaceSize = 0,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700522 .maxBoundDescriptorSets = MAX_SETS,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700523 .maxPerStageDescriptorSamplers = 64,
524 .maxPerStageDescriptorUniformBuffers = 64,
525 .maxPerStageDescriptorStorageBuffers = 64,
526 .maxPerStageDescriptorSampledImages = 64,
527 .maxPerStageDescriptorStorageImages = 64,
Jason Ekstrandd6897452015-12-02 16:58:54 -0800528 .maxPerStageDescriptorInputAttachments = 64,
529 .maxPerStageResources = 128,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700530 .maxDescriptorSetSamplers = 256,
531 .maxDescriptorSetUniformBuffers = 256,
Chad Versace033a37f2015-10-07 09:57:51 -0700532 .maxDescriptorSetUniformBuffersDynamic = 256,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700533 .maxDescriptorSetStorageBuffers = 256,
Chad Versace033a37f2015-10-07 09:57:51 -0700534 .maxDescriptorSetStorageBuffersDynamic = 256,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700535 .maxDescriptorSetSampledImages = 256,
536 .maxDescriptorSetStorageImages = 256,
Jason Ekstrandd6897452015-12-02 16:58:54 -0800537 .maxDescriptorSetInputAttachments = 256,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700538 .maxVertexInputAttributes = 32,
Chad Versace033a37f2015-10-07 09:57:51 -0700539 .maxVertexInputBindings = 32,
Kenneth Graunke38a3a532016-01-26 23:09:45 -0800540 .maxVertexInputAttributeOffset = 2047,
541 .maxVertexInputBindingStride = 2048,
542 .maxVertexOutputComponents = 128,
Jason Ekstrandd6897452015-12-02 16:58:54 -0800543 .maxTessellationGenerationLevel = 0,
544 .maxTessellationPatchSize = 0,
545 .maxTessellationControlPerVertexInputComponents = 0,
546 .maxTessellationControlPerVertexOutputComponents = 0,
547 .maxTessellationControlPerPatchOutputComponents = 0,
548 .maxTessellationControlTotalOutputComponents = 0,
549 .maxTessellationEvaluationInputComponents = 0,
550 .maxTessellationEvaluationOutputComponents = 0,
Kenneth Graunke38a3a532016-01-26 23:09:45 -0800551 .maxGeometryShaderInvocations = 32,
552 .maxGeometryInputComponents = 64,
553 .maxGeometryOutputComponents = 128,
554 .maxGeometryOutputVertices = 256,
555 .maxGeometryTotalOutputComponents = 1024,
556 .maxFragmentInputComponents = 128,
Jason Ekstrandd6897452015-12-02 16:58:54 -0800557 .maxFragmentOutputAttachments = 8,
Dave Airlieeaf07682016-11-29 11:16:56 +1000558 .maxFragmentDualSrcAttachments = 1,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700559 .maxFragmentCombinedOutputResources = 8,
Kenneth Graunke38a3a532016-01-26 23:09:45 -0800560 .maxComputeSharedMemorySize = 32768,
561 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
Lionel Landwerlin6b217282016-09-23 01:04:25 +0300562 .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700563 .maxComputeWorkGroupSize = {
Lionel Landwerlin6b217282016-09-23 01:04:25 +0300564 16 * devinfo->max_cs_threads,
565 16 * devinfo->max_cs_threads,
566 16 * devinfo->max_cs_threads,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700567 },
568 .subPixelPrecisionBits = 4 /* FIXME */,
569 .subTexelPrecisionBits = 4 /* FIXME */,
570 .mipmapPrecisionBits = 4 /* FIXME */,
571 .maxDrawIndexedIndexValue = UINT32_MAX,
Jason Ekstrandd6897452015-12-02 16:58:54 -0800572 .maxDrawIndirectCount = UINT32_MAX,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700573 .maxSamplerLodBias = 16,
574 .maxSamplerAnisotropy = 16,
Jason Ekstranddaf68a92015-10-06 17:21:44 -0700575 .maxViewports = MAX_VIEWPORTS,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700576 .maxViewportDimensions = { (1 << 14), (1 << 14) },
Nanley Chery7ac08ad2016-05-17 15:28:01 -0700577 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700578 .viewportSubPixelBits = 13, /* We take a float? */
Jason Ekstrandf076d532016-01-01 09:26:06 -0800579 .minMemoryMapAlignment = 4096, /* A page */
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700580 .minTexelBufferOffsetAlignment = 1,
581 .minUniformBufferOffsetAlignment = 1,
582 .minStorageBufferOffsetAlignment = 1,
Kenneth Graunke38a3a532016-01-26 23:09:45 -0800583 .minTexelOffset = -8,
584 .maxTexelOffset = 7,
585 .minTexelGatherOffset = -8,
586 .maxTexelGatherOffset = 7,
Anuj Phogat0bf531a2016-07-28 17:37:20 -0700587 .minInterpolationOffset = -0.5,
588 .maxInterpolationOffset = 0.4375,
589 .subPixelInterpolationOffsetBits = 4,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700590 .maxFramebufferWidth = (1 << 14),
591 .maxFramebufferHeight = (1 << 14),
592 .maxFramebufferLayers = (1 << 10),
Jason Ekstrandd6897452015-12-02 16:58:54 -0800593 .framebufferColorSampleCounts = sample_counts,
594 .framebufferDepthSampleCounts = sample_counts,
595 .framebufferStencilSampleCounts = sample_counts,
596 .framebufferNoAttachmentsSampleCounts = sample_counts,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700597 .maxColorAttachments = MAX_RTS,
Jason Ekstrandd6897452015-12-02 16:58:54 -0800598 .sampledImageColorSampleCounts = sample_counts,
599 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
600 .sampledImageDepthSampleCounts = sample_counts,
601 .sampledImageStencilSampleCounts = sample_counts,
602 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700603 .maxSampleMaskWords = 1,
Jason Ekstrand802f0022016-01-14 06:58:11 -0800604 .timestampComputeAndGraphics = false,
Philipp Zabel0408d502016-10-06 01:48:04 +0200605 .timestampPeriod = time_stamp_base,
Kenneth Graunkea4d7a5b2016-10-03 20:44:38 -0700606 .maxClipDistances = 8,
607 .maxCullDistances = 8,
608 .maxCombinedClipAndCullDistances = 8,
Jason Ekstrandd6897452015-12-02 16:58:54 -0800609 .discreteQueuePriorities = 1,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700610 .pointSizeRange = { 0.125, 255.875 },
611 .lineWidthRange = { 0.0, 7.9921875 },
612 .pointSizeGranularity = (1.0 / 8.0),
613 .lineWidthGranularity = (1.0 / 128.0),
Jason Ekstrandd6897452015-12-02 16:58:54 -0800614 .strictLines = false, /* FINISHME */
Chad Versace8cc6f052016-01-26 10:56:06 -0800615 .standardSampleLocations = true,
Jason Ekstrandd6897452015-12-02 16:58:54 -0800616 .optimalBufferCopyOffsetAlignment = 128,
617 .optimalBufferCopyRowPitchAlignment = 128,
618 .nonCoherentAtomSize = 64,
Jason Ekstrand65e0b302015-07-09 15:38:30 -0700619 };
620
Jason Ekstrand977a4692015-07-09 15:53:03 -0700621 *pProperties = (VkPhysicalDeviceProperties) {
Jason Ekstrand20417b22016-03-22 16:21:21 -0700622 .apiVersion = VK_MAKE_VERSION(1, 0, 5),
Jason Ekstrand977a4692015-07-09 15:53:03 -0700623 .driverVersion = 1,
Jason Ekstrandaadb7dc2015-11-30 21:10:14 -0800624 .vendorID = 0x8086,
625 .deviceID = pdevice->chipset_id,
Jason Ekstrand977a4692015-07-09 15:53:03 -0700626 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
Chad Versaced48e71c2015-10-07 10:36:46 -0700627 .limits = limits,
628 .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
Jason Ekstrand977a4692015-07-09 15:53:03 -0700629 };
630
631 strcpy(pProperties->deviceName, pdevice->name);
Emil Velikovde138e92016-11-24 20:30:38 +0000632 memcpy(pProperties->pipelineCacheUUID, pdevice->uuid, VK_UUID_SIZE);
Jason Ekstrand977a4692015-07-09 15:53:03 -0700633}
634
Jason Ekstrandf1a7c782015-11-30 12:21:19 -0800635void anv_GetPhysicalDeviceQueueFamilyProperties(
Jason Ekstrand1f907012015-07-09 16:11:24 -0700636 VkPhysicalDevice physicalDevice,
Jason Ekstranda6eba402015-10-05 21:17:12 -0700637 uint32_t* pCount,
638 VkQueueFamilyProperties* pQueueFamilyProperties)
Jason Ekstrand1f907012015-07-09 16:11:24 -0700639{
Jason Ekstranda6eba402015-10-05 21:17:12 -0700640 if (pQueueFamilyProperties == NULL) {
641 *pCount = 1;
Jason Ekstrandf1a7c782015-11-30 12:21:19 -0800642 return;
Jason Ekstranda6eba402015-10-05 21:17:12 -0700643 }
Jason Ekstrand1f907012015-07-09 16:11:24 -0700644
Jason Ekstranda6eba402015-10-05 21:17:12 -0700645 assert(*pCount >= 1);
Jason Ekstrand1f907012015-07-09 16:11:24 -0700646
Jason Ekstranda6eba402015-10-05 21:17:12 -0700647 *pQueueFamilyProperties = (VkQueueFamilyProperties) {
Jason Ekstrand1f907012015-07-09 16:11:24 -0700648 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
649 VK_QUEUE_COMPUTE_BIT |
Jason Ekstrand6a8a5422015-11-30 11:12:44 -0800650 VK_QUEUE_TRANSFER_BIT,
Jason Ekstrand1f907012015-07-09 16:11:24 -0700651 .queueCount = 1,
Kristian Høgsberg Kristensen925ad842016-01-09 00:51:14 -0800652 .timestampValidBits = 36, /* XXX: Real value here */
Jason Ekstrand74c4c4a2015-12-02 16:20:40 -0800653 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
Jason Ekstrand1f907012015-07-09 16:11:24 -0700654 };
Jason Ekstrand1f907012015-07-09 16:11:24 -0700655}
656
Jason Ekstrandf1a7c782015-11-30 12:21:19 -0800657void anv_GetPhysicalDeviceMemoryProperties(
Chad Versacedf2a0132015-07-09 19:49:19 -0700658 VkPhysicalDevice physicalDevice,
659 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
660{
661 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
Kristian Høgsberg Kristensen9564dd32015-07-21 13:09:25 -0700662 VkDeviceSize heap_size;
Chad Versacedf2a0132015-07-09 19:49:19 -0700663
664 /* Reserve some wiggle room for the driver by exposing only 75% of the
665 * aperture to the heap.
666 */
Kristian Høgsberg Kristensen9564dd32015-07-21 13:09:25 -0700667 heap_size = 3 * physical_device->aperture_size / 4;
Chad Versacedf2a0132015-07-09 19:49:19 -0700668
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300669 if (physical_device->info.has_llc) {
Kristian Høgsberg Kristensenc3c61d22015-12-03 23:09:09 -0800670 /* Big core GPUs share LLC with the CPU and thus one memory type can be
671 * both cached and coherent at the same time.
672 */
673 pMemoryProperties->memoryTypeCount = 1;
674 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
675 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
676 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
677 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
678 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
Jason Ekstrand3421ba12015-12-30 19:32:41 -0800679 .heapIndex = 0,
Kristian Høgsberg Kristensenc3c61d22015-12-03 23:09:09 -0800680 };
681 } else {
682 /* The spec requires that we expose a host-visible, coherent memory
683 * type, but Atom GPUs don't share LLC. Thus we offer two memory types
684 * to give the application a choice between cached, but not coherent and
685 * coherent but uncached (WC though).
686 */
687 pMemoryProperties->memoryTypeCount = 2;
688 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
689 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
690 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
691 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
Jason Ekstrand3421ba12015-12-30 19:32:41 -0800692 .heapIndex = 0,
Kristian Høgsberg Kristensenc3c61d22015-12-03 23:09:09 -0800693 };
694 pMemoryProperties->memoryTypes[1] = (VkMemoryType) {
695 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
696 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
697 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
Jason Ekstrand3421ba12015-12-30 19:32:41 -0800698 .heapIndex = 0,
Kristian Høgsberg Kristensenc3c61d22015-12-03 23:09:09 -0800699 };
700 }
Chad Versacedf2a0132015-07-09 19:49:19 -0700701
702 pMemoryProperties->memoryHeapCount = 1;
703 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
704 .size = heap_size,
Jason Ekstrande6ab06a2015-12-02 10:39:15 -0800705 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
Chad Versacedf2a0132015-07-09 19:49:19 -0700706 };
Chad Versacedf2a0132015-07-09 19:49:19 -0700707}
708
Jason Ekstrande7acdda2015-07-07 18:51:53 -0700709PFN_vkVoidFunction anv_GetInstanceProcAddr(
710 VkInstance instance,
711 const char* pName)
712{
Jason Ekstrand6d557ae2016-10-07 15:47:45 -0700713 return anv_lookup_entrypoint(NULL, pName);
Jason Ekstrande7acdda2015-07-07 18:51:53 -0700714}
715
Emil Velikov40e4fff2016-07-28 14:40:08 +0100716/* With version 1+ of the loader interface the ICD should expose
717 * vk_icdGetInstanceProcAddr to work around certain LD_PRELOAD issues seen in apps.
Jason Ekstrand3a2b23a2016-02-11 21:18:02 -0800718 */
Emil Velikov40e4fff2016-07-28 14:40:08 +0100719PUBLIC
Jason Ekstrand3a2b23a2016-02-11 21:18:02 -0800720VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
721 VkInstance instance,
722 const char* pName);
723
Emil Velikov40e4fff2016-07-28 14:40:08 +0100724PUBLIC
Jason Ekstrand3a2b23a2016-02-11 21:18:02 -0800725VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
726 VkInstance instance,
727 const char* pName)
728{
729 return anv_GetInstanceProcAddr(instance, pName);
730}
731
Jason Ekstrande7acdda2015-07-07 18:51:53 -0700732PFN_vkVoidFunction anv_GetDeviceProcAddr(
Jason Ekstrand6d557ae2016-10-07 15:47:45 -0700733 VkDevice _device,
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700734 const char* pName)
735{
Jason Ekstrand6d557ae2016-10-07 15:47:45 -0700736 ANV_FROM_HANDLE(anv_device, device, _device);
737 return anv_lookup_entrypoint(&device->info, pName);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700738}
739
Gwan-gyeong Munca470692016-11-25 23:34:43 +0900740static void
Jason Ekstrand66b00d52015-06-09 12:28:58 -0700741anv_queue_init(struct anv_device *device, struct anv_queue *queue)
742{
Jason Ekstrand39cd3782015-09-24 13:51:40 -0700743 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
Jason Ekstrand66b00d52015-06-09 12:28:58 -0700744 queue->device = device;
745 queue->pool = &device->surface_state_pool;
Jason Ekstrand66b00d52015-06-09 12:28:58 -0700746}
747
748static void
749anv_queue_finish(struct anv_queue *queue)
750{
Jason Ekstrand66b00d52015-06-09 12:28:58 -0700751}
752
Kristian Høgsberg77359202015-12-01 15:37:12 -0800753static struct anv_state
754anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
755{
756 struct anv_state state;
757
758 state = anv_state_pool_alloc(pool, size, align);
759 memcpy(state.map, p, size);
760
761 if (!pool->block_pool->device->info.has_llc)
762 anv_state_clflush(state);
763
764 return state;
765}
766
Jason Ekstrandd49298c2016-01-18 12:16:31 -0800767struct gen8_border_color {
768 union {
769 float float32[4];
770 uint32_t uint32[4];
771 };
772 /* Pad out to 64 bytes */
773 uint32_t _pad[12];
774};
775
Kristian Høgsberg Kristensendc56e4f2015-05-29 16:06:06 -0700776static void
777anv_device_init_border_colors(struct anv_device *device)
778{
Jason Ekstrandd49298c2016-01-18 12:16:31 -0800779 static const struct gen8_border_color border_colors[] = {
Jason Ekstrandbd4cde72015-10-06 10:07:47 -0700780 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
781 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
782 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
783 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
784 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
785 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
Kristian Høgsberg Kristensendc56e4f2015-05-29 16:06:06 -0700786 };
787
Kristian Høgsberg77359202015-12-01 15:37:12 -0800788 device->border_colors = anv_state_pool_emit_data(&device->dynamic_state_pool,
Jason Ekstrandd49298c2016-01-18 12:16:31 -0800789 sizeof(border_colors), 64,
790 border_colors);
Kristian Høgsberg Kristensendc56e4f2015-05-29 16:06:06 -0700791}
792
Kristian Høgsberg Kristensen6cdada02016-02-05 16:11:12 -0800793VkResult
794anv_device_submit_simple_batch(struct anv_device *device,
795 struct anv_batch *batch)
Kristian Høgsberg Kristensenc9c33442016-02-05 15:23:38 -0800796{
Kristian Høgsberg Kristensenc9c33442016-02-05 15:23:38 -0800797 struct drm_i915_gem_execbuffer2 execbuf;
798 struct drm_i915_gem_exec_object2 exec2_objects[1];
Jason Ekstrand07798c92016-10-31 20:36:26 -0700799 struct anv_bo bo, *exec_bos[1];
Kristian Høgsberg Kristensenc9c33442016-02-05 15:23:38 -0800800 VkResult result = VK_SUCCESS;
801 uint32_t size;
802 int64_t timeout;
803 int ret;
804
Kristian Høgsberg Kristensen6cdada02016-02-05 16:11:12 -0800805 /* Kernel driver requires 8 byte aligned batch length */
Kristian Høgsberg Kristensenc9c33442016-02-05 15:23:38 -0800806 size = align_u32(batch->next - batch->start, 8);
Jason Ekstrandecfb0742016-03-18 13:06:08 -0700807 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
Jason Ekstrandea930412016-02-11 18:41:04 -0800808 if (result != VK_SUCCESS)
809 return result;
Kristian Høgsberg Kristensenc9c33442016-02-05 15:23:38 -0800810
Jason Ekstrandea930412016-02-11 18:41:04 -0800811 memcpy(bo.map, batch->start, size);
Jason Ekstrand699f2122016-02-12 11:00:42 -0800812 if (!device->info.has_llc)
813 anv_clflush_range(bo.map, size);
Jason Ekstrandea930412016-02-11 18:41:04 -0800814
Jason Ekstrand07798c92016-10-31 20:36:26 -0700815 exec_bos[0] = &bo;
Jason Ekstrandea930412016-02-11 18:41:04 -0800816 exec2_objects[0].handle = bo.gem_handle;
Kristian Høgsberg Kristensenc9c33442016-02-05 15:23:38 -0800817 exec2_objects[0].relocation_count = 0;
818 exec2_objects[0].relocs_ptr = 0;
819 exec2_objects[0].alignment = 0;
Jason Ekstrandea930412016-02-11 18:41:04 -0800820 exec2_objects[0].offset = bo.offset;
Kristian Høgsberg Kristensenc9c33442016-02-05 15:23:38 -0800821 exec2_objects[0].flags = 0;
822 exec2_objects[0].rsvd1 = 0;
823 exec2_objects[0].rsvd2 = 0;
824
825 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
826 execbuf.buffer_count = 1;
Jason Ekstrandea930412016-02-11 18:41:04 -0800827 execbuf.batch_start_offset = 0;
Kristian Høgsberg Kristensen6cdada02016-02-05 16:11:12 -0800828 execbuf.batch_len = size;
Kristian Høgsberg Kristensenc9c33442016-02-05 15:23:38 -0800829 execbuf.cliprects_ptr = 0;
830 execbuf.num_cliprects = 0;
831 execbuf.DR1 = 0;
832 execbuf.DR4 = 0;
833
834 execbuf.flags =
835 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
836 execbuf.rsvd1 = device->context_id;
837 execbuf.rsvd2 = 0;
838
Jason Ekstrand07798c92016-10-31 20:36:26 -0700839 result = anv_device_execbuf(device, &execbuf, exec_bos);
840 if (result != VK_SUCCESS)
Kristian Høgsberg Kristensenc9c33442016-02-05 15:23:38 -0800841 goto fail;
Kristian Høgsberg Kristensenc9c33442016-02-05 15:23:38 -0800842
843 timeout = INT64_MAX;
Jason Ekstrandea930412016-02-11 18:41:04 -0800844 ret = anv_gem_wait(device, bo.gem_handle, &timeout);
Kristian Høgsberg Kristensenc9c33442016-02-05 15:23:38 -0800845 if (ret != 0) {
846 /* We don't know the real error. */
Jason Ekstrandc41ec162016-10-31 16:33:43 -0700847 result = vk_errorf(VK_ERROR_DEVICE_LOST, "execbuf2 failed: %m");
Kristian Høgsberg Kristensenc9c33442016-02-05 15:23:38 -0800848 goto fail;
849 }
850
851 fail:
Jason Ekstrandea930412016-02-11 18:41:04 -0800852 anv_bo_pool_free(&device->batch_bo_pool, &bo);
Kristian Høgsberg Kristensenc9c33442016-02-05 15:23:38 -0800853
854 return result;
855}
856
Kristian Høgsberg454345d2015-05-17 16:33:48 -0700857VkResult anv_CreateDevice(
Jason Ekstrandc95f9b62015-07-09 18:20:10 -0700858 VkPhysicalDevice physicalDevice,
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700859 const VkDeviceCreateInfo* pCreateInfo,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -0800860 const VkAllocationCallbacks* pAllocator,
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700861 VkDevice* pDevice)
862{
Jason Ekstrandc95f9b62015-07-09 18:20:10 -0700863 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
Kristian Høgsberg Kristensen5526c172016-01-03 22:43:47 -0800864 VkResult result;
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700865 struct anv_device *device;
866
867 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
868
Jason Ekstrandaab95172016-01-14 07:41:45 -0800869 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Jason Ekstrandb5f68892015-09-17 11:19:16 -0700870 bool found = false;
871 for (uint32_t j = 0; j < ARRAY_SIZE(device_extensions); j++) {
872 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
Jason Ekstrandaadb7dc2015-11-30 21:10:14 -0800873 device_extensions[j].extensionName) == 0) {
Jason Ekstrandb5f68892015-09-17 11:19:16 -0700874 found = true;
875 break;
876 }
877 }
878 if (!found)
Chad Versacef9c948e2015-10-07 11:36:51 -0700879 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
Jason Ekstrandb5f68892015-09-17 11:19:16 -0700880 }
881
Dave Airlie1ae6ece2016-10-14 13:31:35 +1000882 device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -0800883 sizeof(*device), 8,
Jason Ekstrand45d17fc2016-01-18 14:04:13 -0800884 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700885 if (!device)
886 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
887
Jason Ekstrand39cd3782015-09-24 13:51:40 -0700888 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
Jason Ekstrandc95f9b62015-07-09 18:20:10 -0700889 device->instance = physical_device->instance;
Kristian Høgsberg Kristensen39a120a2016-02-10 09:43:03 -0800890 device->chipset_id = physical_device->chipset_id;
Chad Versace8cda3e92015-07-09 16:31:39 -0700891
Jason Ekstrandfcfb4042015-12-02 03:28:27 -0800892 if (pAllocator)
893 device->alloc = *pAllocator;
894 else
895 device->alloc = physical_device->instance->alloc;
896
Chad Versace8cda3e92015-07-09 16:31:39 -0700897 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
Jason Ekstrandc95f9b62015-07-09 18:20:10 -0700898 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
Kristian Høgsberg Kristensen5526c172016-01-03 22:43:47 -0800899 if (device->fd == -1) {
900 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700901 goto fail_device;
Kristian Høgsberg Kristensen5526c172016-01-03 22:43:47 -0800902 }
Chad Versace477383e2015-11-13 10:12:18 -0800903
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700904 device->context_id = anv_gem_create_context(device);
Kristian Høgsberg Kristensen5526c172016-01-03 22:43:47 -0800905 if (device->context_id == -1) {
906 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700907 goto fail_fd;
Kristian Høgsberg Kristensen5526c172016-01-03 22:43:47 -0800908 }
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700909
Lionel Landwerlinbc245902016-09-22 14:58:11 +0300910 device->info = physical_device->info;
Jason Ekstrand9c84b6c2015-12-28 13:26:49 -0800911 device->isl_dev = physical_device->isl_dev;
912
Jason Ekstrand869e3932016-03-18 16:32:46 -0700913 /* On Broadwell and later, we can use batch chaining to more efficiently
914 * implement growing command buffers. Prior to Haswell, the kernel
915 * command parser gets in the way and we have to fall back to growing
916 * the batch.
917 */
918 device->can_chain_batches = device->info.gen >= 8;
919
Jason Ekstrandc29ffea2016-05-14 14:52:36 -0700920 device->robust_buffer_access = pCreateInfo->pEnabledFeatures &&
921 pCreateInfo->pEnabledFeatures->robustBufferAccess;
922
Jason Ekstranda788e7c2015-09-17 18:23:21 -0700923 pthread_mutex_init(&device->mutex, NULL);
924
Jason Ekstrand843775b2016-11-02 09:11:11 -0700925 pthread_condattr_t condattr;
926 pthread_condattr_init(&condattr);
927 pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC);
928 pthread_cond_init(&device->queue_submit, NULL);
929 pthread_condattr_destroy(&condattr);
930
Jason Ekstrandecfb0742016-03-18 13:06:08 -0700931 anv_bo_pool_init(&device->batch_bo_pool, device);
Jason Ekstrand5ef81f02015-05-25 15:46:48 -0700932
Jordan Justenc7f6e422016-01-08 12:15:29 -0800933 anv_block_pool_init(&device->dynamic_state_block_pool, device, 16384);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700934
Kristian Høgsberg0a775e12015-05-13 15:34:34 -0700935 anv_state_pool_init(&device->dynamic_state_pool,
936 &device->dynamic_state_block_pool);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700937
Jason Ekstrandaee970c2016-01-12 13:48:32 -0800938 anv_block_pool_init(&device->instruction_block_pool, device, 128 * 1024);
Jason Ekstrand68997182016-08-24 23:48:32 -0700939 anv_state_pool_init(&device->instruction_state_pool,
940 &device->instruction_block_pool);
Kristian Høgsberg Kristensen30521fb2016-01-05 12:00:54 -0800941
Jason Ekstrand0e944462015-09-22 16:36:00 -0700942 anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700943
944 anv_state_pool_init(&device->surface_state_pool,
945 &device->surface_state_block_pool);
946
Jason Ekstrand3a3d79b2015-11-10 16:42:34 -0800947 anv_bo_init_new(&device->workaround_bo, device, 1024);
948
Jason Ekstrandc2f2c8e2016-06-16 15:26:54 -0700949 anv_scratch_pool_init(device, &device->scratch_pool);
Kristian Høgsberg Kristensen9b9f9732015-06-19 15:41:30 -0700950
Jason Ekstrand66b00d52015-06-09 12:28:58 -0700951 anv_queue_init(device, &device->queue);
952
Kristian Høgsberg Kristensen6cdada02016-02-05 16:11:12 -0800953 switch (device->info.gen) {
954 case 7:
955 if (!device->info.is_haswell)
956 result = gen7_init_device_state(device);
957 else
958 result = gen75_init_device_state(device);
959 break;
960 case 8:
961 result = gen8_init_device_state(device);
962 break;
963 case 9:
964 result = gen9_init_device_state(device);
965 break;
Kristian Høgsberg Kristensen5d72d7b2016-02-14 13:20:06 -0800966 default:
967 /* Shouldn't get here as we don't create physical devices for any other
968 * gens. */
969 unreachable("unhandled gen");
Kristian Høgsberg Kristensen6cdada02016-02-05 16:11:12 -0800970 }
971 if (result != VK_SUCCESS)
972 goto fail_fd;
973
Jason Ekstrand8f780af2016-08-22 21:37:28 -0700974 anv_device_init_blorp(device);
975
Kristian Høgsberg Kristensendc56e4f2015-05-29 16:06:06 -0700976 anv_device_init_border_colors(device);
977
Jason Ekstrand098209e2015-07-09 18:41:27 -0700978 *pDevice = anv_device_to_handle(device);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700979
980 return VK_SUCCESS;
981
982 fail_fd:
983 close(device->fd);
984 fail_device:
Dave Airlie1ae6ece2016-10-14 13:31:35 +1000985 vk_free(&device->alloc, device);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700986
Kristian Høgsberg Kristensen5526c172016-01-03 22:43:47 -0800987 return result;
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700988}
989
Jason Ekstrand05a26a62015-10-05 20:50:51 -0700990void anv_DestroyDevice(
Jason Ekstrandfcfb4042015-12-02 03:28:27 -0800991 VkDevice _device,
992 const VkAllocationCallbacks* pAllocator)
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700993{
Jason Ekstrandc95f9b62015-07-09 18:20:10 -0700994 ANV_FROM_HANDLE(anv_device, device, _device);
Kristian Høgsberg769785c2015-05-08 22:32:37 -0700995
Jason Ekstrand8f780af2016-08-22 21:37:28 -0700996 anv_device_finish_blorp(device);
997
Gwan-gyeong Munb1786522016-11-25 23:34:46 +0900998 anv_queue_finish(&device->queue);
999
Jason Ekstrand38f5eef2015-06-09 11:41:31 -07001000#ifdef HAVE_VALGRIND
1001 /* We only need to free these to prevent valgrind errors. The backing
1002 * BO will go away in a couple of lines so we don't actually leak.
1003 */
Jason Ekstrand522ab832015-07-08 11:44:52 -07001004 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
Jason Ekstrand38f5eef2015-06-09 11:41:31 -07001005#endif
1006
Gwan-gyeong Munb1786522016-11-25 23:34:46 +09001007 anv_scratch_pool_finish(device, &device->scratch_pool);
1008
Jason Ekstrand3a3d79b2015-11-10 16:42:34 -08001009 anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
1010 anv_gem_close(device, device->workaround_bo.gem_handle);
1011
Jason Ekstrand1920ef92015-07-31 10:30:57 -07001012 anv_state_pool_finish(&device->surface_state_pool);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001013 anv_block_pool_finish(&device->surface_state_block_pool);
Gwan-gyeong Munb1786522016-11-25 23:34:46 +09001014 anv_state_pool_finish(&device->instruction_state_pool);
1015 anv_block_pool_finish(&device->instruction_block_pool);
1016 anv_state_pool_finish(&device->dynamic_state_pool);
1017 anv_block_pool_finish(&device->dynamic_state_block_pool);
1018
1019 anv_bo_pool_finish(&device->batch_bo_pool);
1020
1021 pthread_cond_destroy(&device->queue_submit);
1022 pthread_mutex_destroy(&device->mutex);
1023
1024 anv_gem_destroy_context(device, device->context_id);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001025
1026 close(device->fd);
1027
Dave Airlie1ae6ece2016-10-14 13:31:35 +10001028 vk_free(&device->alloc, device);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001029}
1030
Jason Ekstrand8ba684c2015-10-06 09:25:03 -07001031VkResult anv_EnumerateInstanceExtensionProperties(
Jason Ekstrand02db21a2015-07-14 16:11:21 -07001032 const char* pLayerName,
Jason Ekstrandfe644722015-11-30 16:28:36 -08001033 uint32_t* pPropertyCount,
Jason Ekstrand8e05bbe2015-07-08 10:38:07 -07001034 VkExtensionProperties* pProperties)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001035{
Jason Ekstrand02db21a2015-07-14 16:11:21 -07001036 if (pProperties == NULL) {
Jason Ekstrandfe644722015-11-30 16:28:36 -08001037 *pPropertyCount = ARRAY_SIZE(global_extensions);
Jason Ekstrand02db21a2015-07-14 16:11:21 -07001038 return VK_SUCCESS;
1039 }
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001040
Emil Velikov5cc07d82016-10-06 14:12:27 +01001041 *pPropertyCount = MIN2(*pPropertyCount, ARRAY_SIZE(global_extensions));
1042 typed_memcpy(pProperties, global_extensions, *pPropertyCount);
Kristian Høgsberg783e6212015-05-17 19:22:52 -07001043
Emil Velikov5cc07d82016-10-06 14:12:27 +01001044 if (*pPropertyCount < ARRAY_SIZE(global_extensions))
1045 return VK_INCOMPLETE;
Jason Ekstrand8e05bbe2015-07-08 10:38:07 -07001046
1047 return VK_SUCCESS;
1048}
1049
Jason Ekstrand8ba684c2015-10-06 09:25:03 -07001050VkResult anv_EnumerateDeviceExtensionProperties(
Jason Ekstrand8e05bbe2015-07-08 10:38:07 -07001051 VkPhysicalDevice physicalDevice,
Jason Ekstrand02db21a2015-07-14 16:11:21 -07001052 const char* pLayerName,
Jason Ekstrandfe644722015-11-30 16:28:36 -08001053 uint32_t* pPropertyCount,
Jason Ekstrand8e05bbe2015-07-08 10:38:07 -07001054 VkExtensionProperties* pProperties)
1055{
Jason Ekstrand02db21a2015-07-14 16:11:21 -07001056 if (pProperties == NULL) {
Jason Ekstrandfe644722015-11-30 16:28:36 -08001057 *pPropertyCount = ARRAY_SIZE(device_extensions);
Jason Ekstrand02db21a2015-07-14 16:11:21 -07001058 return VK_SUCCESS;
1059 }
1060
Emil Velikov5cc07d82016-10-06 14:12:27 +01001061 *pPropertyCount = MIN2(*pPropertyCount, ARRAY_SIZE(device_extensions));
1062 typed_memcpy(pProperties, device_extensions, *pPropertyCount);
Jason Ekstrand9a7600c2015-09-01 16:44:42 -07001063
Emil Velikov5cc07d82016-10-06 14:12:27 +01001064 if (*pPropertyCount < ARRAY_SIZE(device_extensions))
1065 return VK_INCOMPLETE;
Jason Ekstrand9a7600c2015-09-01 16:44:42 -07001066
1067 return VK_SUCCESS;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001068}
1069
Jason Ekstrand8ba684c2015-10-06 09:25:03 -07001070VkResult anv_EnumerateInstanceLayerProperties(
Jason Ekstrandfe644722015-11-30 16:28:36 -08001071 uint32_t* pPropertyCount,
Jason Ekstrand02db21a2015-07-14 16:11:21 -07001072 VkLayerProperties* pProperties)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001073{
Jason Ekstrand02db21a2015-07-14 16:11:21 -07001074 if (pProperties == NULL) {
Jason Ekstrandfe644722015-11-30 16:28:36 -08001075 *pPropertyCount = 0;
Jason Ekstrand02db21a2015-07-14 16:11:21 -07001076 return VK_SUCCESS;
1077 }
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001078
Jason Ekstrand02db21a2015-07-14 16:11:21 -07001079 /* None supported at this time */
Chad Versacef9c948e2015-10-07 11:36:51 -07001080 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
Jason Ekstrand02db21a2015-07-14 16:11:21 -07001081}
1082
Jason Ekstrand8ba684c2015-10-06 09:25:03 -07001083VkResult anv_EnumerateDeviceLayerProperties(
Jason Ekstrand02db21a2015-07-14 16:11:21 -07001084 VkPhysicalDevice physicalDevice,
Jason Ekstrandfe644722015-11-30 16:28:36 -08001085 uint32_t* pPropertyCount,
Jason Ekstrand02db21a2015-07-14 16:11:21 -07001086 VkLayerProperties* pProperties)
1087{
1088 if (pProperties == NULL) {
Jason Ekstrandfe644722015-11-30 16:28:36 -08001089 *pPropertyCount = 0;
Jason Ekstrand02db21a2015-07-14 16:11:21 -07001090 return VK_SUCCESS;
1091 }
1092
1093 /* None supported at this time */
Chad Versacef9c948e2015-10-07 11:36:51 -07001094 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001095}
1096
Jason Ekstrandf1a7c782015-11-30 12:21:19 -08001097void anv_GetDeviceQueue(
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001098 VkDevice _device,
1099 uint32_t queueNodeIndex,
1100 uint32_t queueIndex,
1101 VkQueue* pQueue)
1102{
Jason Ekstrandc95f9b62015-07-09 18:20:10 -07001103 ANV_FROM_HANDLE(anv_device, device, _device);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001104
Jason Ekstrand66b00d52015-06-09 12:28:58 -07001105 assert(queueIndex == 0);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001106
Jason Ekstrand098209e2015-07-09 18:41:27 -07001107 *pQueue = anv_queue_to_handle(&device->queue);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001108}
1109
Jason Ekstrand07798c92016-10-31 20:36:26 -07001110VkResult
1111anv_device_execbuf(struct anv_device *device,
1112 struct drm_i915_gem_execbuffer2 *execbuf,
1113 struct anv_bo **execbuf_bos)
1114{
1115 int ret = anv_gem_execbuffer(device, execbuf);
1116 if (ret != 0) {
1117 /* We don't know the real error. */
1118 return vk_errorf(VK_ERROR_DEVICE_LOST, "execbuf2 failed: %m");
1119 }
1120
Jason Ekstrand18266242016-11-09 18:45:21 -08001121 struct drm_i915_gem_exec_object2 *objects =
1122 (void *)(uintptr_t)execbuf->buffers_ptr;
Jason Ekstrand07798c92016-10-31 20:36:26 -07001123 for (uint32_t k = 0; k < execbuf->buffer_count; k++)
1124 execbuf_bos[k]->offset = objects[k].offset;
1125
1126 return VK_SUCCESS;
1127}
1128
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001129VkResult anv_QueueSubmit(
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001130 VkQueue _queue,
Jason Ekstrand4e904a02015-12-02 17:18:41 -08001131 uint32_t submitCount,
1132 const VkSubmitInfo* pSubmits,
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001133 VkFence _fence)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001134{
Jason Ekstrandc95f9b62015-07-09 18:20:10 -07001135 ANV_FROM_HANDLE(anv_queue, queue, _queue);
1136 ANV_FROM_HANDLE(anv_fence, fence, _fence);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001137 struct anv_device *device = queue->device;
Jason Ekstrand07798c92016-10-31 20:36:26 -07001138 VkResult result = VK_SUCCESS;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001139
Kristian Høgsberg Kristensenb3a29f22016-03-08 15:31:47 -08001140 /* We lock around QueueSubmit for three main reasons:
Jason Ekstrand8b61c572016-11-05 19:47:33 -07001141 *
1142 * 1) When a block pool is resized, we create a new gem handle with a
1143 * different size and, in the case of surface states, possibly a
1144 * different center offset but we re-use the same anv_bo struct when
1145 * we do so. If this happens in the middle of setting up an execbuf,
1146 * we could end up with our list of BOs out of sync with our list of
1147 * gem handles.
1148 *
1149 * 2) The algorithm we use for building the list of unique buffers isn't
1150 * thread-safe. While the client is supposed to syncronize around
1151 * QueueSubmit, this would be extremely difficult to debug if it ever
1152 * came up in the wild due to a broken app. It's better to play it
1153 * safe and just lock around QueueSubmit.
1154 *
Kristian Høgsberg Kristensenb3a29f22016-03-08 15:31:47 -08001155 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
1156 * userspace. Due to the fact that the surface state buffer is shared
1157 * between batches, we can't afford to have that happen from multiple
1158 * threads at the same time. Even though the user is supposed to
1159 * ensure this doesn't happen, we play it safe as in (2) above.
1160 *
Jason Ekstrand8b61c572016-11-05 19:47:33 -07001161 * Since the only other things that ever take the device lock such as block
1162 * pool resize only rarely happen, this will almost never be contended so
1163 * taking a lock isn't really an expensive operation in this case.
1164 */
1165 pthread_mutex_lock(&device->mutex);
1166
Jason Ekstrand4e904a02015-12-02 17:18:41 -08001167 for (uint32_t i = 0; i < submitCount; i++) {
1168 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1169 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
1170 pSubmits[i].pCommandBuffers[j]);
1171 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001172
Jason Ekstranddb9f4b22016-11-02 10:33:54 -07001173 result = anv_cmd_buffer_execbuf(device, cmd_buffer);
Jason Ekstrand07798c92016-10-31 20:36:26 -07001174 if (result != VK_SUCCESS)
Jason Ekstrand8b61c572016-11-05 19:47:33 -07001175 goto out;
Jason Ekstrand4e904a02015-12-02 17:18:41 -08001176 }
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001177 }
1178
Kristian Høgsberg Kristensenb0c30b72016-02-12 15:08:09 -08001179 if (fence) {
Jason Ekstrand07798c92016-10-31 20:36:26 -07001180 struct anv_bo *fence_bo = &fence->bo;
1181 result = anv_device_execbuf(device, &fence->execbuf, &fence_bo);
1182 if (result != VK_SUCCESS)
Jason Ekstrand8b61c572016-11-05 19:47:33 -07001183 goto out;
Jason Ekstrand843775b2016-11-02 09:11:11 -07001184
1185 /* Update the fence and wake up any waiters */
1186 assert(fence->state == ANV_FENCE_STATE_RESET);
1187 fence->state = ANV_FENCE_STATE_SUBMITTED;
1188 pthread_cond_broadcast(&device->queue_submit);
Kristian Høgsberg Kristensenb0c30b72016-02-12 15:08:09 -08001189 }
1190
Jason Ekstrand8b61c572016-11-05 19:47:33 -07001191out:
1192 pthread_mutex_unlock(&device->mutex);
1193
1194 return result;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001195}
1196
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001197VkResult anv_QueueWaitIdle(
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001198 VkQueue _queue)
1199{
Jason Ekstrandc95f9b62015-07-09 18:20:10 -07001200 ANV_FROM_HANDLE(anv_queue, queue, _queue);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001201
Jason Ekstrand4c9dec82016-10-07 15:41:17 -07001202 return anv_DeviceWaitIdle(anv_device_to_handle(queue->device));
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001203}
1204
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001205VkResult anv_DeviceWaitIdle(
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001206 VkDevice _device)
1207{
Jason Ekstrandc95f9b62015-07-09 18:20:10 -07001208 ANV_FROM_HANDLE(anv_device, device, _device);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001209 struct anv_batch batch;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001210
Kristian Høgsberg Kristensenc9c33442016-02-05 15:23:38 -08001211 uint32_t cmds[8];
1212 batch.start = batch.next = cmds;
1213 batch.end = (void *) cmds + sizeof(cmds);
1214
Jason Ekstrand50018522016-04-18 17:03:00 -07001215 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
1216 anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001217
Kristian Høgsberg Kristensen6cdada02016-02-05 16:11:12 -08001218 return anv_device_submit_simple_batch(device, &batch);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001219}
1220
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001221VkResult
1222anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
1223{
Jason Ekstrand6283b6d2016-11-01 13:09:36 -07001224 uint32_t gem_handle = anv_gem_create(device, size);
1225 if (!gem_handle)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001226 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
1227
Jason Ekstrand6283b6d2016-11-01 13:09:36 -07001228 anv_bo_init(bo, gem_handle, size);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001229
1230 return VK_SUCCESS;
1231}
1232
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001233VkResult anv_AllocateMemory(
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001234 VkDevice _device,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001235 const VkMemoryAllocateInfo* pAllocateInfo,
1236 const VkAllocationCallbacks* pAllocator,
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001237 VkDeviceMemory* pMem)
1238{
Jason Ekstrandc95f9b62015-07-09 18:20:10 -07001239 ANV_FROM_HANDLE(anv_device, device, _device);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001240 struct anv_device_memory *mem;
1241 VkResult result;
1242
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001243 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001244
Jason Ekstrandb1325402015-12-17 11:00:38 -08001245 if (pAllocateInfo->allocationSize == 0) {
1246 /* Apparently, this is allowed */
1247 *pMem = VK_NULL_HANDLE;
1248 return VK_SUCCESS;
1249 }
1250
Chad Versacef9c948e2015-10-07 11:36:51 -07001251 /* We support exactly one memory heap. */
Kristian Høgsberg Kristensenc3c61d22015-12-03 23:09:09 -08001252 assert(pAllocateInfo->memoryTypeIndex == 0 ||
1253 (!device->info.has_llc && pAllocateInfo->memoryTypeIndex < 2));
Chad Versacef43a3042015-07-09 19:59:44 -07001254
1255 /* FINISHME: Fail if allocation request exceeds heap size. */
1256
Dave Airlie1ae6ece2016-10-14 13:31:35 +10001257 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001258 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001259 if (mem == NULL)
1260 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1261
Jason Ekstrand6b0b5722016-01-02 07:52:22 -08001262 /* The kernel is going to give us whole pages anyway */
1263 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
1264
1265 result = anv_bo_init_new(&mem->bo, device, alloc_size);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001266 if (result != VK_SUCCESS)
1267 goto fail;
1268
Kristian Høgsberg Kristensenc3c61d22015-12-03 23:09:09 -08001269 mem->type_index = pAllocateInfo->memoryTypeIndex;
1270
Jason Ekstrandb1217ea2016-11-07 17:25:07 -08001271 mem->map = NULL;
1272 mem->map_size = 0;
1273
Jason Ekstrand098209e2015-07-09 18:41:27 -07001274 *pMem = anv_device_memory_to_handle(mem);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001275
Jason Ekstrandc95f9b62015-07-09 18:20:10 -07001276 return VK_SUCCESS;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001277
1278 fail:
Dave Airlie1ae6ece2016-10-14 13:31:35 +10001279 vk_free2(&device->alloc, pAllocator, mem);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001280
1281 return result;
1282}
1283
Jason Ekstrand05a26a62015-10-05 20:50:51 -07001284void anv_FreeMemory(
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001285 VkDevice _device,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001286 VkDeviceMemory _mem,
1287 const VkAllocationCallbacks* pAllocator)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001288{
Jason Ekstrandc95f9b62015-07-09 18:20:10 -07001289 ANV_FROM_HANDLE(anv_device, device, _device);
1290 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001291
Jason Ekstrandb1325402015-12-17 11:00:38 -08001292 if (mem == NULL)
1293 return;
1294
Jason Ekstrandb1217ea2016-11-07 17:25:07 -08001295 if (mem->map)
1296 anv_UnmapMemory(_device, _mem);
1297
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001298 if (mem->bo.map)
1299 anv_gem_munmap(mem->bo.map, mem->bo.size);
1300
1301 if (mem->bo.gem_handle != 0)
1302 anv_gem_close(device, mem->bo.gem_handle);
1303
Dave Airlie1ae6ece2016-10-14 13:31:35 +10001304 vk_free2(&device->alloc, pAllocator, mem);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001305}
1306
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001307VkResult anv_MapMemory(
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001308 VkDevice _device,
Jason Ekstrand6a6da542015-11-30 21:18:12 -08001309 VkDeviceMemory _memory,
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001310 VkDeviceSize offset,
1311 VkDeviceSize size,
1312 VkMemoryMapFlags flags,
1313 void** ppData)
1314{
Jason Ekstrandc95f9b62015-07-09 18:20:10 -07001315 ANV_FROM_HANDLE(anv_device, device, _device);
Jason Ekstrand6a6da542015-11-30 21:18:12 -08001316 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001317
Jason Ekstrandb1325402015-12-17 11:00:38 -08001318 if (mem == NULL) {
1319 *ppData = NULL;
1320 return VK_SUCCESS;
1321 }
1322
Jason Ekstrand56dbf132016-01-19 15:01:10 -08001323 if (size == VK_WHOLE_SIZE)
1324 size = mem->bo.size - offset;
1325
Jason Ekstrand73ef9c82016-11-07 17:23:44 -08001326 /* From the Vulkan spec version 1.0.32 docs for MapMemory:
1327 *
1328 * * If size is not equal to VK_WHOLE_SIZE, size must be greater than 0
1329 * assert(size != 0);
1330 * * If size is not equal to VK_WHOLE_SIZE, size must be less than or
1331 * equal to the size of the memory minus offset
1332 */
1333 assert(size > 0);
1334 assert(offset + size <= mem->bo.size);
1335
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001336 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
1337 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
1338 * at a time is valid. We could just mmap up front and return an offset
1339 * pointer here, but that may exhaust virtual memory on 32 bit
1340 * userspace. */
1341
Kristian Høgsberg Kristensenbbb68752015-12-03 23:58:05 -08001342 uint32_t gem_flags = 0;
1343 if (!device->info.has_llc && mem->type_index == 0)
1344 gem_flags |= I915_MMAP_WC;
1345
Jason Ekstrandf076d532016-01-01 09:26:06 -08001346 /* GEM will fail to map if the offset isn't 4k-aligned. Round down. */
1347 uint64_t map_offset = offset & ~4095ull;
1348 assert(offset >= map_offset);
1349 uint64_t map_size = (offset + size) - map_offset;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001350
Jason Ekstrandf076d532016-01-01 09:26:06 -08001351 /* Let's map whole pages */
Jason Ekstrand6b0b5722016-01-02 07:52:22 -08001352 map_size = align_u64(map_size, 4096);
Jason Ekstrandf076d532016-01-01 09:26:06 -08001353
Jason Ekstrand920f34a2016-11-07 17:24:24 -08001354 void *map = anv_gem_mmap(device, mem->bo.gem_handle,
1355 map_offset, map_size, gem_flags);
1356 if (map == MAP_FAILED)
1357 return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
1358
1359 mem->map = map;
Jason Ekstrandf076d532016-01-01 09:26:06 -08001360 mem->map_size = map_size;
1361
1362 *ppData = mem->map + (offset - map_offset);
Chad Versace477383e2015-11-13 10:12:18 -08001363
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001364 return VK_SUCCESS;
1365}
1366
Jason Ekstrand05a26a62015-10-05 20:50:51 -07001367void anv_UnmapMemory(
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001368 VkDevice _device,
Jason Ekstrand6a6da542015-11-30 21:18:12 -08001369 VkDeviceMemory _memory)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001370{
Jason Ekstrand6a6da542015-11-30 21:18:12 -08001371 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001372
Jason Ekstrandb1325402015-12-17 11:00:38 -08001373 if (mem == NULL)
1374 return;
1375
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001376 anv_gem_munmap(mem->map, mem->map_size);
Jason Ekstrandb1217ea2016-11-07 17:25:07 -08001377
1378 mem->map = NULL;
1379 mem->map_size = 0;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001380}
1381
Kristian Høgsberge0b5f032015-12-01 15:25:07 -08001382static void
1383clflush_mapped_ranges(struct anv_device *device,
1384 uint32_t count,
1385 const VkMappedMemoryRange *ranges)
1386{
1387 for (uint32_t i = 0; i < count; i++) {
1388 ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
1389 void *p = mem->map + (ranges[i].offset & ~CACHELINE_MASK);
Kristian Høgsberg Kristensen31d34862016-01-29 12:07:34 -08001390 void *end;
1391
1392 if (ranges[i].offset + ranges[i].size > mem->map_size)
1393 end = mem->map + mem->map_size;
1394 else
1395 end = mem->map + ranges[i].offset + ranges[i].size;
Kristian Høgsberge0b5f032015-12-01 15:25:07 -08001396
1397 while (p < end) {
1398 __builtin_ia32_clflush(p);
1399 p += CACHELINE_SIZE;
1400 }
1401 }
1402}
1403
Jason Ekstrandd9c2cae2015-07-07 17:22:29 -07001404VkResult anv_FlushMappedMemoryRanges(
Kristian Høgsberge0b5f032015-12-01 15:25:07 -08001405 VkDevice _device,
Jason Ekstrand6a6da542015-11-30 21:18:12 -08001406 uint32_t memoryRangeCount,
1407 const VkMappedMemoryRange* pMemoryRanges)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001408{
Kristian Høgsberge0b5f032015-12-01 15:25:07 -08001409 ANV_FROM_HANDLE(anv_device, device, _device);
1410
1411 if (device->info.has_llc)
1412 return VK_SUCCESS;
1413
1414 /* Make sure the writes we're flushing have landed. */
Kristian Høgsberg Kristensen0c4ef362016-01-29 12:10:12 -08001415 __builtin_ia32_mfence();
Kristian Høgsberge0b5f032015-12-01 15:25:07 -08001416
1417 clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001418
1419 return VK_SUCCESS;
1420}
1421
Jason Ekstrandd9c2cae2015-07-07 17:22:29 -07001422VkResult anv_InvalidateMappedMemoryRanges(
Kristian Høgsberge0b5f032015-12-01 15:25:07 -08001423 VkDevice _device,
Jason Ekstrand6a6da542015-11-30 21:18:12 -08001424 uint32_t memoryRangeCount,
1425 const VkMappedMemoryRange* pMemoryRanges)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001426{
Kristian Høgsberge0b5f032015-12-01 15:25:07 -08001427 ANV_FROM_HANDLE(anv_device, device, _device);
1428
1429 if (device->info.has_llc)
1430 return VK_SUCCESS;
1431
1432 clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
1433
1434 /* Make sure no reads get moved up above the invalidate. */
Kristian Høgsberg Kristensen0c4ef362016-01-29 12:10:12 -08001435 __builtin_ia32_mfence();
Kristian Høgsberge0b5f032015-12-01 15:25:07 -08001436
1437 return VK_SUCCESS;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001438}
1439
Jason Ekstrandf1a7c782015-11-30 12:21:19 -08001440void anv_GetBufferMemoryRequirements(
Jason Ekstrandef8980e2015-07-07 18:16:42 -07001441 VkDevice device,
Jason Ekstrand55723e92015-07-14 14:59:39 -07001442 VkBuffer _buffer,
Jason Ekstrandef8980e2015-07-07 18:16:42 -07001443 VkMemoryRequirements* pMemoryRequirements)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001444{
Jason Ekstrand55723e92015-07-14 14:59:39 -07001445 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001446
Chad Versacef43a3042015-07-09 19:59:44 -07001447 /* The Vulkan spec (git aaed022) says:
1448 *
1449 * memoryTypeBits is a bitfield and contains one bit set for every
1450 * supported memory type for the resource. The bit `1<<i` is set if and
1451 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1452 * structure for the physical device is supported.
1453 *
1454 * We support exactly one memory type.
1455 */
1456 pMemoryRequirements->memoryTypeBits = 1;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001457
Jason Ekstrand55723e92015-07-14 14:59:39 -07001458 pMemoryRequirements->size = buffer->size;
1459 pMemoryRequirements->alignment = 16;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001460}
1461
Jason Ekstrandf1a7c782015-11-30 12:21:19 -08001462void anv_GetImageMemoryRequirements(
Jason Ekstrandbb6567f2015-07-08 09:04:16 -07001463 VkDevice device,
Jason Ekstrand55723e92015-07-14 14:59:39 -07001464 VkImage _image,
1465 VkMemoryRequirements* pMemoryRequirements)
1466{
1467 ANV_FROM_HANDLE(anv_image, image, _image);
1468
1469 /* The Vulkan spec (git aaed022) says:
1470 *
1471 * memoryTypeBits is a bitfield and contains one bit set for every
1472 * supported memory type for the resource. The bit `1<<i` is set if and
1473 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1474 * structure for the physical device is supported.
1475 *
1476 * We support exactly one memory type.
1477 */
1478 pMemoryRequirements->memoryTypeBits = 1;
1479
1480 pMemoryRequirements->size = image->size;
1481 pMemoryRequirements->alignment = image->alignment;
Jason Ekstrand55723e92015-07-14 14:59:39 -07001482}
1483
Jason Ekstrandf1a7c782015-11-30 12:21:19 -08001484void anv_GetImageSparseMemoryRequirements(
Jason Ekstrandc7fcfeb2015-07-14 17:06:11 -07001485 VkDevice device,
1486 VkImage image,
Jason Ekstrand5a024412015-12-02 03:34:43 -08001487 uint32_t* pSparseMemoryRequirementCount,
Jason Ekstrandc7fcfeb2015-07-14 17:06:11 -07001488 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1489{
Jason Ekstrandf1a7c782015-11-30 12:21:19 -08001490 stub();
Jason Ekstrandc7fcfeb2015-07-14 17:06:11 -07001491}
1492
Jason Ekstrandf1a7c782015-11-30 12:21:19 -08001493void anv_GetDeviceMemoryCommitment(
Jason Ekstrandc7fcfeb2015-07-14 17:06:11 -07001494 VkDevice device,
1495 VkDeviceMemory memory,
1496 VkDeviceSize* pCommittedMemoryInBytes)
1497{
1498 *pCommittedMemoryInBytes = 0;
Jason Ekstrandc7fcfeb2015-07-14 17:06:11 -07001499}
1500
Jason Ekstrand55723e92015-07-14 14:59:39 -07001501VkResult anv_BindBufferMemory(
1502 VkDevice device,
1503 VkBuffer _buffer,
Jason Ekstrand6a6da542015-11-30 21:18:12 -08001504 VkDeviceMemory _memory,
1505 VkDeviceSize memoryOffset)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001506{
Jason Ekstrand6a6da542015-11-30 21:18:12 -08001507 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
Jason Ekstrand55723e92015-07-14 14:59:39 -07001508 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001509
Jason Ekstrandb1325402015-12-17 11:00:38 -08001510 if (mem) {
1511 buffer->bo = &mem->bo;
1512 buffer->offset = memoryOffset;
1513 } else {
1514 buffer->bo = NULL;
1515 buffer->offset = 0;
1516 }
Jason Ekstrand55723e92015-07-14 14:59:39 -07001517
1518 return VK_SUCCESS;
1519}
1520
Jason Ekstrandfd536032015-11-30 16:42:12 -08001521VkResult anv_QueueBindSparse(
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001522 VkQueue queue,
Jason Ekstrandfd536032015-11-30 16:42:12 -08001523 uint32_t bindInfoCount,
1524 const VkBindSparseInfo* pBindInfo,
1525 VkFence fence)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001526{
Jason Ekstrandfed35862015-12-02 16:14:58 -08001527 stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001528}
1529
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001530VkResult anv_CreateFence(
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001531 VkDevice _device,
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001532 const VkFenceCreateInfo* pCreateInfo,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001533 const VkAllocationCallbacks* pAllocator,
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001534 VkFence* pFence)
1535{
Jason Ekstrandc95f9b62015-07-09 18:20:10 -07001536 ANV_FROM_HANDLE(anv_device, device, _device);
Jason Ekstrand23088912016-03-07 13:45:25 -08001537 struct anv_bo fence_bo;
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001538 struct anv_fence *fence;
1539 struct anv_batch batch;
1540 VkResult result;
1541
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001542 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1543
Jason Ekstrandb1c5d452016-03-18 11:50:53 -07001544 result = anv_bo_pool_alloc(&device->batch_bo_pool, &fence_bo, 4096);
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001545 if (result != VK_SUCCESS)
Jason Ekstrand23088912016-03-07 13:45:25 -08001546 return result;
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001547
Jason Ekstrand23088912016-03-07 13:45:25 -08001548 /* Fences are small. Just store the CPU data structure in the BO. */
1549 fence = fence_bo.map;
1550 fence->bo = fence_bo;
1551
1552 /* Place the batch after the CPU data but on its own cache line. */
1553 const uint32_t batch_offset = align_u32(sizeof(*fence), CACHELINE_SIZE);
1554 batch.next = batch.start = fence->bo.map + batch_offset;
Jason Ekstrandda8f1482015-05-27 11:42:55 -07001555 batch.end = fence->bo.map + fence->bo.size;
Jason Ekstrand50018522016-04-18 17:03:00 -07001556 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
1557 anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001558
Kristian Høgsberg77359202015-12-01 15:37:12 -08001559 if (!device->info.has_llc) {
Jason Ekstrand23088912016-03-07 13:45:25 -08001560 assert(((uintptr_t) batch.start & CACHELINE_MASK) == 0);
1561 assert(batch.next - batch.start <= CACHELINE_SIZE);
Kristian Høgsberg Kristensen0c4ef362016-01-29 12:10:12 -08001562 __builtin_ia32_mfence();
Jason Ekstrandabaa3be2016-03-15 15:24:24 -07001563 __builtin_ia32_clflush(batch.start);
Kristian Høgsberg77359202015-12-01 15:37:12 -08001564 }
1565
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001566 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1567 fence->exec2_objects[0].relocation_count = 0;
1568 fence->exec2_objects[0].relocs_ptr = 0;
1569 fence->exec2_objects[0].alignment = 0;
1570 fence->exec2_objects[0].offset = fence->bo.offset;
1571 fence->exec2_objects[0].flags = 0;
1572 fence->exec2_objects[0].rsvd1 = 0;
1573 fence->exec2_objects[0].rsvd2 = 0;
1574
1575 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1576 fence->execbuf.buffer_count = 1;
Jason Ekstrand23088912016-03-07 13:45:25 -08001577 fence->execbuf.batch_start_offset = batch.start - fence->bo.map;
1578 fence->execbuf.batch_len = batch.next - batch.start;
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001579 fence->execbuf.cliprects_ptr = 0;
1580 fence->execbuf.num_cliprects = 0;
1581 fence->execbuf.DR1 = 0;
1582 fence->execbuf.DR4 = 0;
1583
1584 fence->execbuf.flags =
1585 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1586 fence->execbuf.rsvd1 = device->context_id;
1587 fence->execbuf.rsvd2 = 0;
1588
Jason Ekstrand1c974322016-11-10 21:46:13 -08001589 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
1590 fence->state = ANV_FENCE_STATE_SIGNALED;
1591 } else {
1592 fence->state = ANV_FENCE_STATE_RESET;
1593 }
Jason Ekstrand0d2145b2016-02-02 12:22:00 -08001594
Jason Ekstrand098209e2015-07-09 18:41:27 -07001595 *pFence = anv_fence_to_handle(fence);
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001596
1597 return VK_SUCCESS;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001598}
1599
Jason Ekstrand05a26a62015-10-05 20:50:51 -07001600void anv_DestroyFence(
Chad Versaceebb191f2015-07-14 09:29:35 -07001601 VkDevice _device,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001602 VkFence _fence,
1603 const VkAllocationCallbacks* pAllocator)
Chad Versaceebb191f2015-07-14 09:29:35 -07001604{
1605 ANV_FROM_HANDLE(anv_device, device, _device);
1606 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1607
Jason Ekstrand49f08ad2016-11-10 21:32:32 -08001608 if (!fence)
1609 return;
1610
Jason Ekstrand23088912016-03-07 13:45:25 -08001611 assert(fence->bo.map == fence);
Jason Ekstrand23de7872016-03-06 14:16:51 -08001612 anv_bo_pool_free(&device->batch_bo_pool, &fence->bo);
Chad Versaceebb191f2015-07-14 09:29:35 -07001613}
1614
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001615VkResult anv_ResetFences(
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001616 VkDevice _device,
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001617 uint32_t fenceCount,
Jason Ekstrandd5349b12015-07-07 17:18:00 -07001618 const VkFence* pFences)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001619{
Chad Versace169251b2015-07-17 13:59:48 -07001620 for (uint32_t i = 0; i < fenceCount; i++) {
1621 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
Jason Ekstrand843775b2016-11-02 09:11:11 -07001622 fence->state = ANV_FENCE_STATE_RESET;
Chad Versace169251b2015-07-17 13:59:48 -07001623 }
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001624
1625 return VK_SUCCESS;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001626}
1627
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001628VkResult anv_GetFenceStatus(
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001629 VkDevice _device,
1630 VkFence _fence)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001631{
Jason Ekstrandc95f9b62015-07-09 18:20:10 -07001632 ANV_FROM_HANDLE(anv_device, device, _device);
1633 ANV_FROM_HANDLE(anv_fence, fence, _fence);
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001634 int64_t t = 0;
1635 int ret;
1636
Jason Ekstrand843775b2016-11-02 09:11:11 -07001637 switch (fence->state) {
1638 case ANV_FENCE_STATE_RESET:
1639 /* If it hasn't even been sent off to the GPU yet, it's not ready */
1640 return VK_NOT_READY;
1641
1642 case ANV_FENCE_STATE_SIGNALED:
1643 /* It's been signaled, return success */
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001644 return VK_SUCCESS;
1645
Jason Ekstrand843775b2016-11-02 09:11:11 -07001646 case ANV_FENCE_STATE_SUBMITTED:
1647 /* It's been submitted to the GPU but we don't know if it's done yet. */
1648 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1649 if (ret == 0) {
1650 fence->state = ANV_FENCE_STATE_SIGNALED;
1651 return VK_SUCCESS;
1652 } else {
1653 return VK_NOT_READY;
1654 }
1655 default:
1656 unreachable("Invalid fence status");
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001657 }
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001658}
1659
Jason Ekstrand843775b2016-11-02 09:11:11 -07001660#define NSEC_PER_SEC 1000000000
1661#define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
1662
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001663VkResult anv_WaitForFences(
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001664 VkDevice _device,
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001665 uint32_t fenceCount,
1666 const VkFence* pFences,
Chad Versace8f3b2182015-07-13 12:59:42 -07001667 VkBool32 waitAll,
Jason Ekstrand843775b2016-11-02 09:11:11 -07001668 uint64_t _timeout)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001669{
Jason Ekstrandc8577b52015-07-08 14:24:12 -07001670 ANV_FROM_HANDLE(anv_device, device, _device);
Jason Ekstrand843775b2016-11-02 09:11:11 -07001671 int ret;
Jason Ekstrandaafc8742015-11-10 11:24:08 -08001672
1673 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
1674 * to block indefinitely timeouts <= 0. Unfortunately, this was broken
1675 * for a couple of kernel releases. Since there's no way to know
1676 * whether or not the kernel we're using is one of the broken ones, the
1677 * best we can do is to clamp the timeout to INT64_MAX. This limits the
1678 * maximum timeout from 584 years to 292 years - likely not a big deal.
1679 */
Jason Ekstrand843775b2016-11-02 09:11:11 -07001680 int64_t timeout = MIN2(_timeout, INT64_MAX);
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001681
Jason Ekstrand843775b2016-11-02 09:11:11 -07001682 uint32_t pending_fences = fenceCount;
1683 while (pending_fences) {
1684 pending_fences = 0;
1685 bool signaled_fences = false;
1686 for (uint32_t i = 0; i < fenceCount; i++) {
1687 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1688 switch (fence->state) {
1689 case ANV_FENCE_STATE_RESET:
1690 /* This fence hasn't been submitted yet, we'll catch it the next
1691 * time around. Yes, this may mean we dead-loop but, short of
1692 * lots of locking and a condition variable, there's not much that
1693 * we can do about that.
1694 */
1695 pending_fences++;
1696 continue;
Jason Ekstrand427978d2015-11-10 15:02:52 -08001697
Jason Ekstrand843775b2016-11-02 09:11:11 -07001698 case ANV_FENCE_STATE_SIGNALED:
1699 /* This fence is not pending. If waitAll isn't set, we can return
1700 * early. Otherwise, we have to keep going.
1701 */
1702 if (!waitAll)
1703 return VK_SUCCESS;
1704 continue;
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001705
Jason Ekstrand843775b2016-11-02 09:11:11 -07001706 case ANV_FENCE_STATE_SUBMITTED:
1707 /* These are the fences we really care about. Go ahead and wait
1708 * on it until we hit a timeout.
1709 */
1710 ret = anv_gem_wait(device, fence->bo.gem_handle, &timeout);
1711 if (ret == -1 && errno == ETIME) {
1712 return VK_TIMEOUT;
1713 } else if (ret == -1) {
1714 /* We don't know the real error. */
1715 return vk_errorf(VK_ERROR_DEVICE_LOST, "gem wait failed: %m");
1716 } else {
1717 fence->state = ANV_FENCE_STATE_SIGNALED;
1718 signaled_fences = true;
1719 if (!waitAll)
1720 return VK_SUCCESS;
1721 continue;
1722 }
1723 }
1724 }
1725
1726 if (pending_fences && !signaled_fences) {
1727 /* If we've hit this then someone decided to vkWaitForFences before
1728 * they've actually submitted any of them to a queue. This is a
1729 * fairly pessimal case, so it's ok to lock here and use a standard
1730 * pthreads condition variable.
1731 */
1732 pthread_mutex_lock(&device->mutex);
1733
1734 /* It's possible that some of the fences have changed state since the
1735 * last time we checked. Now that we have the lock, check for
1736 * pending fences again and don't wait if it's changed.
1737 */
1738 uint32_t now_pending_fences = 0;
1739 for (uint32_t i = 0; i < fenceCount; i++) {
1740 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1741 if (fence->state == ANV_FENCE_STATE_RESET)
1742 now_pending_fences++;
1743 }
1744 assert(now_pending_fences <= pending_fences);
1745
1746 if (now_pending_fences == pending_fences) {
1747 struct timespec before;
1748 clock_gettime(CLOCK_MONOTONIC, &before);
1749
1750 uint32_t abs_nsec = before.tv_nsec + timeout % NSEC_PER_SEC;
1751 uint64_t abs_sec = before.tv_sec + (abs_nsec / NSEC_PER_SEC) +
1752 (timeout / NSEC_PER_SEC);
1753 abs_nsec %= NSEC_PER_SEC;
1754
1755 /* Avoid roll-over in tv_sec on 32-bit systems if the user
1756 * provided timeout is UINT64_MAX
1757 */
1758 struct timespec abstime;
1759 abstime.tv_nsec = abs_nsec;
1760 abstime.tv_sec = MIN2(abs_sec, INT_TYPE_MAX(abstime.tv_sec));
1761
1762 ret = pthread_cond_timedwait(&device->queue_submit,
1763 &device->mutex, &abstime);
1764 assert(ret != EINVAL);
1765
1766 struct timespec after;
1767 clock_gettime(CLOCK_MONOTONIC, &after);
1768 uint64_t time_elapsed =
1769 ((uint64_t)after.tv_sec * NSEC_PER_SEC + after.tv_nsec) -
1770 ((uint64_t)before.tv_sec * NSEC_PER_SEC + before.tv_nsec);
1771
1772 if (time_elapsed >= timeout) {
1773 pthread_mutex_unlock(&device->mutex);
1774 return VK_TIMEOUT;
1775 }
1776
1777 timeout -= time_elapsed;
1778 }
1779
1780 pthread_mutex_unlock(&device->mutex);
Chad Versacef9c948e2015-10-07 11:36:51 -07001781 }
Jason Ekstrand5c497302015-07-09 18:20:28 -07001782 }
Kristian Høgsberg6afb2642015-05-18 08:49:15 -07001783
1784 return VK_SUCCESS;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001785}
1786
1787// Queue semaphore functions
1788
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001789VkResult anv_CreateSemaphore(
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001790 VkDevice device,
1791 const VkSemaphoreCreateInfo* pCreateInfo,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001792 const VkAllocationCallbacks* pAllocator,
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001793 VkSemaphore* pSemaphore)
1794{
Kristian Høgsberg Kristensena00524a2015-12-20 22:58:38 -08001795 /* The DRM execbuffer ioctl always execute in-oder, even between different
1796 * rings. As such, there's nothing to do for the user space semaphore.
1797 */
1798
Jason Ekstrand3db43e82015-11-30 10:31:44 -08001799 *pSemaphore = (VkSemaphore)1;
Kristian Høgsberg Kristensena00524a2015-12-20 22:58:38 -08001800
1801 return VK_SUCCESS;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001802}
1803
Jason Ekstrand05a26a62015-10-05 20:50:51 -07001804void anv_DestroySemaphore(
Chad Versace549070b2015-07-14 09:31:34 -07001805 VkDevice device,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001806 VkSemaphore semaphore,
1807 const VkAllocationCallbacks* pAllocator)
Chad Versace549070b2015-07-14 09:31:34 -07001808{
Chad Versace549070b2015-07-14 09:31:34 -07001809}
1810
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001811// Event functions
1812
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001813VkResult anv_CreateEvent(
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001814 VkDevice _device,
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001815 const VkEventCreateInfo* pCreateInfo,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001816 const VkAllocationCallbacks* pAllocator,
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001817 VkEvent* pEvent)
1818{
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001819 ANV_FROM_HANDLE(anv_device, device, _device);
1820 struct anv_state state;
1821 struct anv_event *event;
1822
1823 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
1824
1825 state = anv_state_pool_alloc(&device->dynamic_state_pool,
Jason Ekstrand25b09d12016-02-11 18:57:37 -08001826 sizeof(*event), 8);
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001827 event = state.map;
1828 event->state = state;
1829 event->semaphore = VK_EVENT_RESET;
1830
1831 if (!device->info.has_llc) {
1832 /* Make sure the writes we're flushing have landed. */
Kristian Høgsberg Kristensen0c4ef362016-01-29 12:10:12 -08001833 __builtin_ia32_mfence();
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001834 __builtin_ia32_clflush(event);
1835 }
1836
1837 *pEvent = anv_event_to_handle(event);
1838
1839 return VK_SUCCESS;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001840}
1841
Jason Ekstrand05a26a62015-10-05 20:50:51 -07001842void anv_DestroyEvent(
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001843 VkDevice _device,
1844 VkEvent _event,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001845 const VkAllocationCallbacks* pAllocator)
Chad Versace68c7ef52015-07-14 09:33:47 -07001846{
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001847 ANV_FROM_HANDLE(anv_device, device, _device);
1848 ANV_FROM_HANDLE(anv_event, event, _event);
1849
Jason Ekstrand49f08ad2016-11-10 21:32:32 -08001850 if (!event)
1851 return;
1852
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001853 anv_state_pool_free(&device->dynamic_state_pool, event->state);
Chad Versace68c7ef52015-07-14 09:33:47 -07001854}
1855
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001856VkResult anv_GetEventStatus(
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001857 VkDevice _device,
1858 VkEvent _event)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001859{
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001860 ANV_FROM_HANDLE(anv_device, device, _device);
1861 ANV_FROM_HANDLE(anv_event, event, _event);
1862
1863 if (!device->info.has_llc) {
Kristian Høgsberg Kristensen0c4ef362016-01-29 12:10:12 -08001864 /* Invalidate read cache before reading event written by GPU. */
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001865 __builtin_ia32_clflush(event);
Kristian Høgsberg Kristensen0c4ef362016-01-29 12:10:12 -08001866 __builtin_ia32_mfence();
1867
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001868 }
1869
1870 return event->semaphore;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001871}
1872
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001873VkResult anv_SetEvent(
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001874 VkDevice _device,
1875 VkEvent _event)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001876{
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001877 ANV_FROM_HANDLE(anv_device, device, _device);
1878 ANV_FROM_HANDLE(anv_event, event, _event);
1879
1880 event->semaphore = VK_EVENT_SET;
1881
1882 if (!device->info.has_llc) {
1883 /* Make sure the writes we're flushing have landed. */
Kristian Høgsberg Kristensen0c4ef362016-01-29 12:10:12 -08001884 __builtin_ia32_mfence();
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001885 __builtin_ia32_clflush(event);
1886 }
1887
1888 return VK_SUCCESS;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001889}
1890
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001891VkResult anv_ResetEvent(
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001892 VkDevice _device,
1893 VkEvent _event)
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001894{
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001895 ANV_FROM_HANDLE(anv_device, device, _device);
1896 ANV_FROM_HANDLE(anv_event, event, _event);
1897
1898 event->semaphore = VK_EVENT_RESET;
1899
1900 if (!device->info.has_llc) {
1901 /* Make sure the writes we're flushing have landed. */
Kristian Høgsberg Kristensen0c4ef362016-01-29 12:10:12 -08001902 __builtin_ia32_mfence();
Kristian Høgsberg Kristensenc4802bc2015-12-19 22:17:19 -08001903 __builtin_ia32_clflush(event);
1904 }
1905
1906 return VK_SUCCESS;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001907}
1908
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001909// Buffer functions
1910
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001911VkResult anv_CreateBuffer(
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001912 VkDevice _device,
1913 const VkBufferCreateInfo* pCreateInfo,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001914 const VkAllocationCallbacks* pAllocator,
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001915 VkBuffer* pBuffer)
1916{
Jason Ekstrandc8577b52015-07-08 14:24:12 -07001917 ANV_FROM_HANDLE(anv_device, device, _device);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001918 struct anv_buffer *buffer;
1919
1920 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1921
Dave Airlie1ae6ece2016-10-14 13:31:35 +10001922 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001923 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001924 if (buffer == NULL)
1925 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1926
1927 buffer->size = pCreateInfo->size;
Jason Ekstrand783a2112015-12-14 16:51:12 -08001928 buffer->usage = pCreateInfo->usage;
Kristian Høgsberg099faa12015-05-11 22:19:58 -07001929 buffer->bo = NULL;
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001930 buffer->offset = 0;
1931
Jason Ekstrand098209e2015-07-09 18:41:27 -07001932 *pBuffer = anv_buffer_to_handle(buffer);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001933
1934 return VK_SUCCESS;
1935}
1936
Jason Ekstrand05a26a62015-10-05 20:50:51 -07001937void anv_DestroyBuffer(
Chad Versacee93b6d82015-07-14 09:47:45 -07001938 VkDevice _device,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001939 VkBuffer _buffer,
1940 const VkAllocationCallbacks* pAllocator)
Chad Versacee93b6d82015-07-14 09:47:45 -07001941{
1942 ANV_FROM_HANDLE(anv_device, device, _device);
1943 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1944
Jason Ekstrand49f08ad2016-11-10 21:32:32 -08001945 if (!buffer)
1946 return;
1947
Dave Airlie1ae6ece2016-10-14 13:31:35 +10001948 vk_free2(&device->alloc, pAllocator, buffer);
Chad Versacee93b6d82015-07-14 09:47:45 -07001949}
1950
Kristian Høgsberg Kristensen8fe74ec2015-08-19 16:01:33 -07001951void
Francisco Jerez6840cc12016-01-26 14:50:52 -08001952anv_fill_buffer_surface_state(struct anv_device *device, struct anv_state state,
Jason Ekstrand1f98bf82015-12-14 16:14:20 -08001953 enum isl_format format,
Jason Ekstrand399d5312015-11-06 15:14:10 -08001954 uint32_t offset, uint32_t range, uint32_t stride)
Kristian Høgsberg Kristensen8fe74ec2015-08-19 16:01:33 -07001955{
Jason Ekstrandeb19d642016-02-22 16:54:25 -08001956 isl_buffer_fill_state(&device->isl_dev, state.map,
1957 .address = offset,
1958 .mocs = device->default_mocs,
1959 .size = range,
1960 .format = format,
1961 .stride = stride);
Francisco Jerez6840cc12016-01-26 14:50:52 -08001962
1963 if (!device->info.has_llc)
1964 anv_state_clflush(state);
Kristian Høgsberg Kristensen8fe74ec2015-08-19 16:01:33 -07001965}
1966
Jason Ekstrand05a26a62015-10-05 20:50:51 -07001967void anv_DestroySampler(
Chad Versaceec5e2f42015-07-14 10:34:00 -07001968 VkDevice _device,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001969 VkSampler _sampler,
1970 const VkAllocationCallbacks* pAllocator)
Chad Versaceec5e2f42015-07-14 10:34:00 -07001971{
1972 ANV_FROM_HANDLE(anv_device, device, _device);
1973 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
1974
Jason Ekstrand49f08ad2016-11-10 21:32:32 -08001975 if (!sampler)
1976 return;
1977
Dave Airlie1ae6ece2016-10-14 13:31:35 +10001978 vk_free2(&device->alloc, pAllocator, sampler);
Chad Versaceec5e2f42015-07-14 10:34:00 -07001979}
1980
Kristian Høgsberg454345d2015-05-17 16:33:48 -07001981VkResult anv_CreateFramebuffer(
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001982 VkDevice _device,
1983 const VkFramebufferCreateInfo* pCreateInfo,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001984 const VkAllocationCallbacks* pAllocator,
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001985 VkFramebuffer* pFramebuffer)
1986{
Jason Ekstrandc95f9b62015-07-09 18:20:10 -07001987 ANV_FROM_HANDLE(anv_device, device, _device);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001988 struct anv_framebuffer *framebuffer;
1989
1990 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1991
Jason Ekstrand84783502015-07-10 20:18:52 -07001992 size_t size = sizeof(*framebuffer) +
Chad Versaced4446a72015-10-06 11:42:43 -07001993 sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
Dave Airlie1ae6ece2016-10-14 13:31:35 +10001994 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08001995 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07001996 if (framebuffer == NULL)
1997 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1998
Jason Ekstrand84783502015-07-10 20:18:52 -07001999 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2000 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
Chad Versace6dea1a92015-10-07 07:30:52 -07002001 VkImageView _iview = pCreateInfo->pAttachments[i];
Chad Versaced4446a72015-10-06 11:42:43 -07002002 framebuffer->attachments[i] = anv_image_view_from_handle(_iview);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002003 }
2004
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002005 framebuffer->width = pCreateInfo->width;
2006 framebuffer->height = pCreateInfo->height;
2007 framebuffer->layers = pCreateInfo->layers;
2008
Jason Ekstrand098209e2015-07-09 18:41:27 -07002009 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
Kristian Høgsberg769785c2015-05-08 22:32:37 -07002010
2011 return VK_SUCCESS;
2012}
2013
Jason Ekstrand05a26a62015-10-05 20:50:51 -07002014void anv_DestroyFramebuffer(
Chad Versace08f77312015-07-14 10:59:30 -07002015 VkDevice _device,
Jason Ekstrandfcfb4042015-12-02 03:28:27 -08002016 VkFramebuffer _fb,
2017 const VkAllocationCallbacks* pAllocator)
Chad Versace08f77312015-07-14 10:59:30 -07002018{
2019 ANV_FROM_HANDLE(anv_device, device, _device);
2020 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
2021
Jason Ekstrand49f08ad2016-11-10 21:32:32 -08002022 if (!fb)
2023 return;
2024
Dave Airlie1ae6ece2016-10-14 13:31:35 +10002025 vk_free2(&device->alloc, pAllocator, fb);
Chad Versace08f77312015-07-14 10:59:30 -07002026}