blob: fc1fd4e7520de732910d3c7f2d4e5edaa09eed26 [file] [log] [blame]
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001/*
2 * Copyright © 2007, 2011, 2013, 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29#ifndef ANDROID
30#define _GNU_SOURCE
31#else
32#include <libgen.h>
33#endif
34#include <stdio.h>
35#include <fcntl.h>
36#include <sys/stat.h>
37#include <sys/ioctl.h>
38#include <string.h>
39#include <sys/mman.h>
40#include <signal.h>
41#include <pciaccess.h>
42#include <getopt.h>
43#include <stdlib.h>
44#include <unistd.h>
45#include <sys/wait.h>
46#include <sys/types.h>
47#include <sys/syscall.h>
48#include <sys/utsname.h>
49#include <termios.h>
Daniel Vetter254f19b2014-03-22 21:29:01 +010050#include <errno.h>
Daniel Vetter766c5bc2014-03-11 22:58:07 +010051
52#include "drmtest.h"
53#include "i915_drm.h"
54#include "intel_chipset.h"
Daniel Vetterc03c6ce2014-03-22 21:34:29 +010055#include "intel_io.h"
Daniel Vetter766c5bc2014-03-11 22:58:07 +010056#include "igt_debugfs.h"
Daniel Vetter766c5bc2014-03-11 22:58:07 +010057#include "config.h"
58
59#include "ioctl_wrappers.h"
60
Daniel Vetter556c49f2014-03-11 23:27:06 +010061/**
62 * SECTION:ioctl_wrappers
63 * @short_description: ioctl wrappers and related functions
64 * @title: ioctl wrappers
Thomas Woodf0381d12015-09-07 09:26:01 +010065 * @include: igt.h
Daniel Vettercd6d5a62014-03-22 19:35:40 +010066 *
Daniel Vetter556c49f2014-03-11 23:27:06 +010067 * This helper library contains simple functions to wrap the raw drm/i915 kernel
68 * ioctls. The normal versions never pass any error codes to the caller and use
69 * igt_assert() to check for error conditions instead. For some ioctls raw
70 * wrappers which do pass on error codes are available. These raw wrappers have
71 * a __ prefix.
72 *
73 * For wrappers which check for feature bits there can also be two versions: The
74 * normal one simply returns a boolean to the caller. But when skipping the
75 * testcase entirely is the right action then it's better to use igt_skip()
76 * directly in the wrapper. Such functions have _require_ in their name to
77 * distinguish them.
78 */
79
80/**
81 * gem_handle_to_libdrm_bo:
82 * @bufmgr: libdrm buffer manager instance
83 * @fd: open i915 drm file descriptor
84 * @name: buffer name in libdrm
85 * @handle: gem buffer object handle
86 *
87 * This helper function imports a raw gem buffer handle into the libdrm buffer
88 * manager.
89 *
90 * Returns: The imported libdrm buffer manager object.
91 */
92drm_intel_bo *
93gem_handle_to_libdrm_bo(drm_intel_bufmgr *bufmgr, int fd, const char *name, uint32_t handle)
94{
95 struct drm_gem_flink flink;
96 int ret;
97 drm_intel_bo *bo;
98
Chris Wilsonacca7242014-07-21 07:57:25 +010099 memset(&flink, 0, sizeof(handle));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100100 flink.handle = handle;
101 ret = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
102 igt_assert(ret == 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100103 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100104
105 bo = drm_intel_bo_gem_create_from_name(bufmgr, name, flink.name);
106 igt_assert(bo);
107
108 return bo;
109}
110
Damien Lespiau7bf0f7f2014-06-20 00:14:52 +0100111/**
112 * gem_get_tiling:
113 * @fd: open i915 drm file descriptor
114 * @handle: gem buffer object handle
115 * @tiling: (out) tiling mode of the gem buffer
116 * @swizzle: (out) bit 6 swizzle mode
117 *
118 * This wraps the GET_TILING ioctl.
119 */
120void
121gem_get_tiling(int fd, uint32_t handle, uint32_t *tiling, uint32_t *swizzle)
122{
123 struct drm_i915_gem_get_tiling get_tiling;
124 int ret;
125
126 memset(&get_tiling, 0, sizeof(get_tiling));
127 get_tiling.handle = handle;
128
129 ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
130 igt_assert(ret == 0);
131
132 *tiling = get_tiling.tiling_mode;
133 *swizzle = get_tiling.swizzle_mode;
134}
135
Daniel Vetter766c5bc2014-03-11 22:58:07 +0100136int __gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride)
137{
138 struct drm_i915_gem_set_tiling st;
139 int ret;
140
141 memset(&st, 0, sizeof(st));
142 do {
143 st.handle = handle;
144 st.tiling_mode = tiling;
145 st.stride = tiling ? stride : 0;
146
147 ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &st);
148 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
149 if (ret != 0)
150 return -errno;
151
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100152 errno = 0;
Daniel Vetter766c5bc2014-03-11 22:58:07 +0100153 igt_assert(st.tiling_mode == tiling);
154 return 0;
155}
156
Daniel Vetter556c49f2014-03-11 23:27:06 +0100157/**
158 * gem_set_tiling:
159 * @fd: open i915 drm file descriptor
160 * @handle: gem buffer object handle
161 * @tiling: tiling mode bits
162 * @stride: stride of the buffer when using a tiled mode, otherwise must be 0
163 *
164 * This wraps the SET_TILING ioctl.
165 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +0100166void gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride)
167{
168 igt_assert(__gem_set_tiling(fd, handle, tiling, stride) == 0);
169}
170
Daniel Vetter556c49f2014-03-11 23:27:06 +0100171struct local_drm_i915_gem_caching {
172 uint32_t handle;
173 uint32_t caching;
174};
175
176#define LOCAL_DRM_I915_GEM_SET_CACHEING 0x2f
177#define LOCAL_DRM_I915_GEM_GET_CACHEING 0x30
178#define LOCAL_DRM_IOCTL_I915_GEM_SET_CACHEING \
179 DRM_IOW(DRM_COMMAND_BASE + LOCAL_DRM_I915_GEM_SET_CACHEING, struct local_drm_i915_gem_caching)
180#define LOCAL_DRM_IOCTL_I915_GEM_GET_CACHEING \
181 DRM_IOWR(DRM_COMMAND_BASE + LOCAL_DRM_I915_GEM_GET_CACHEING, struct local_drm_i915_gem_caching)
182
183/**
184 * gem_set_caching:
185 * @fd: open i915 drm file descriptor
186 * @handle: gem buffer object handle
187 * @caching: caching mode bits
188 *
189 * This wraps the SET_CACHING ioctl. Note that this function internally calls
190 * igt_require() when SET_CACHING isn't available, hence automatically skips the
191 * test. Therefore always extract test logic which uses this into its own
192 * subtest.
193 */
194void gem_set_caching(int fd, uint32_t handle, uint32_t caching)
195{
196 struct local_drm_i915_gem_caching arg;
197 int ret;
198
Chris Wilsonacca7242014-07-21 07:57:25 +0100199 memset(&arg, 0, sizeof(arg));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100200 arg.handle = handle;
201 arg.caching = caching;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100202
Chris Wilson3b758392015-12-01 13:33:13 +0000203 ret = drmIoctl(fd, LOCAL_DRM_IOCTL_I915_GEM_SET_CACHEING, &arg);
Daniel Vetter556c49f2014-03-11 23:27:06 +0100204 igt_assert(ret == 0 || (errno == ENOTTY || errno == EINVAL));
Chris Wilson3b758392015-12-01 13:33:13 +0000205
Daniel Vetter556c49f2014-03-11 23:27:06 +0100206 igt_require(ret == 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100207 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100208}
209
210/**
211 * gem_get_caching:
212 * @fd: open i915 drm file descriptor
213 * @handle: gem buffer object handle
214 *
215 * This wraps the GET_CACHING ioctl.
216 *
217 * Returns: The current caching mode bits.
218 */
219uint32_t gem_get_caching(int fd, uint32_t handle)
220{
221 struct local_drm_i915_gem_caching arg;
222 int ret;
223
224 arg.handle = handle;
225 arg.caching = 0;
226 ret = ioctl(fd, LOCAL_DRM_IOCTL_I915_GEM_GET_CACHEING, &arg);
227 igt_assert(ret == 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100228 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100229
230 return arg.caching;
231}
232
233/**
234 * gem_open:
235 * @fd: open i915 drm file descriptor
236 * @name: flink buffer name
237 *
238 * This wraps the GEM_OPEN ioctl, which is used to import an flink name.
239 *
240 * Returns: gem file-private buffer handle of the open object.
241 */
242uint32_t gem_open(int fd, uint32_t name)
243{
244 struct drm_gem_open open_struct;
245 int ret;
246
Chris Wilsonacca7242014-07-21 07:57:25 +0100247 memset(&open_struct, 0, sizeof(open_struct));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100248 open_struct.name = name;
249 ret = ioctl(fd, DRM_IOCTL_GEM_OPEN, &open_struct);
250 igt_assert(ret == 0);
251 igt_assert(open_struct.handle != 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100252 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100253
254 return open_struct.handle;
255}
256
257/**
258 * gem_flink:
259 * @fd: open i915 drm file descriptor
260 * @handle: file-private gem buffer object handle
261 *
262 * This wraps the GEM_FLINK ioctl, which is used to export a gem buffer object
263 * into the device-global flink namespace. See gem_open() for opening such a
264 * buffer name on a different i915 drm file descriptor.
265 *
266 * Returns: The created flink buffer name.
267 */
268uint32_t gem_flink(int fd, uint32_t handle)
269{
270 struct drm_gem_flink flink;
271 int ret;
272
Chris Wilsonacca7242014-07-21 07:57:25 +0100273 memset(&flink, 0, sizeof(flink));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100274 flink.handle = handle;
275 ret = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
276 igt_assert(ret == 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100277 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100278
279 return flink.name;
280}
281
282/**
283 * gem_close:
284 * @fd: open i915 drm file descriptor
285 * @handle: gem buffer object handle
286 *
287 * This wraps the GEM_CLOSE ioctl, which to release a file-private gem buffer
288 * handle.
289 */
290void gem_close(int fd, uint32_t handle)
291{
292 struct drm_gem_close close_bo;
293
Chris Wilson7b5a8182015-12-12 18:56:53 +0000294 igt_assert_neq(handle, 0);
295
Chris Wilsonacca7242014-07-21 07:57:25 +0100296 memset(&close_bo, 0, sizeof(close_bo));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100297 close_bo.handle = handle;
298 do_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
299}
300
301/**
302 * gem_write:
303 * @fd: open i915 drm file descriptor
304 * @handle: gem buffer object handle
305 * @offset: offset within the buffer of the subrange
306 * @buf: pointer to the data to write into the buffer
307 * @length: size of the subrange
308 *
309 * This wraps the PWRITE ioctl, which is to upload a linear data to a subrange
310 * of a gem buffer object.
311 */
Chris Wilsonfc69bb02015-04-27 21:05:33 +0100312void gem_write(int fd, uint32_t handle, uint64_t offset, const void *buf, uint64_t length)
Daniel Vetter556c49f2014-03-11 23:27:06 +0100313{
314 struct drm_i915_gem_pwrite gem_pwrite;
315
Chris Wilsonacca7242014-07-21 07:57:25 +0100316 memset(&gem_pwrite, 0, sizeof(gem_pwrite));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100317 gem_pwrite.handle = handle;
318 gem_pwrite.offset = offset;
319 gem_pwrite.size = length;
320 gem_pwrite.data_ptr = (uintptr_t)buf;
321 do_ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite);
322}
323
324/**
325 * gem_read:
326 * @fd: open i915 drm file descriptor
327 * @handle: gem buffer object handle
328 * @offset: offset within the buffer of the subrange
329 * @buf: pointer to the data to read into
330 * @length: size of the subrange
331 *
332 * This wraps the PREAD ioctl, which is to download a linear data to a subrange
333 * of a gem buffer object.
334 */
Chris Wilsonfc69bb02015-04-27 21:05:33 +0100335void gem_read(int fd, uint32_t handle, uint64_t offset, void *buf, uint64_t length)
Daniel Vetter556c49f2014-03-11 23:27:06 +0100336{
337 struct drm_i915_gem_pread gem_pread;
338
Chris Wilsonacca7242014-07-21 07:57:25 +0100339 memset(&gem_pread, 0, sizeof(gem_pread));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100340 gem_pread.handle = handle;
341 gem_pread.offset = offset;
342 gem_pread.size = length;
343 gem_pread.data_ptr = (uintptr_t)buf;
344 do_ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &gem_pread);
345}
346
347/**
348 * gem_set_domain:
349 * @fd: open i915 drm file descriptor
350 * @handle: gem buffer object handle
351 * @read_domains: gem domain bits for read access
352 * @write_domain: gem domain bit for write access
353 *
354 * This wraps the SET_DOMAIN ioctl, which is used to control the coherency of
355 * the gem buffer object between the cpu and gtt mappings. It is also use to
356 * synchronize with outstanding rendering in general, but for that use-case
357 * please have a look at gem_sync().
358 */
359void gem_set_domain(int fd, uint32_t handle,
360 uint32_t read_domains, uint32_t write_domain)
361{
362 struct drm_i915_gem_set_domain set_domain;
363
Chris Wilsonacca7242014-07-21 07:57:25 +0100364 memset(&set_domain, 0, sizeof(set_domain));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100365 set_domain.handle = handle;
366 set_domain.read_domains = read_domains;
367 set_domain.write_domain = write_domain;
368
369 do_ioctl(fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
370}
371
372/**
Chris Wilsonf27d2952016-02-23 17:45:49 +0000373 * __gem_wait:
374 * @fd: open i915 drm file descriptor
375 * @handle: gem buffer object handle
376 * @timeout_ns: [in] time to wait, [out] remaining time (in nanoseconds)
377 *
378 * This functions waits for outstanding rendering to complete, upto
379 * the timeout_ns. If no timeout_ns is provided, the wait is indefinite and
380 * only returns upon an error or when the rendering is complete.
381 */
382int gem_wait(int fd, uint32_t handle, int64_t *timeout_ns)
383{
384 struct drm_i915_gem_wait wait;
385 int ret;
386
387 memset(&wait, 0, sizeof(wait));
388 wait.bo_handle = handle;
389 wait.timeout_ns = timeout_ns ? *timeout_ns : -1;
390 wait.flags = 0;
391
392 ret = 0;
393 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_WAIT, &wait))
394 ret = -errno;
395
396 if (timeout_ns)
397 *timeout_ns = wait.timeout_ns;
398
399 return ret;
400}
401
402/**
Daniel Vetter556c49f2014-03-11 23:27:06 +0100403 * gem_sync:
404 * @fd: open i915 drm file descriptor
405 * @handle: gem buffer object handle
406 *
Chris Wilson41fe1d12015-04-13 11:54:18 +0100407 * This functions waits for outstanding rendering to complete.
Daniel Vetter556c49f2014-03-11 23:27:06 +0100408 */
409void gem_sync(int fd, uint32_t handle)
410{
Chris Wilsonf27d2952016-02-23 17:45:49 +0000411 if (gem_wait(fd, handle, NULL))
412 gem_set_domain(fd, handle,
413 I915_GEM_DOMAIN_GTT,
414 I915_GEM_DOMAIN_GTT);
415 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100416}
417
Chris Wilsonf27d2952016-02-23 17:45:49 +0000418
Ankitprasad Sharma70c3be82015-12-02 14:54:50 +0530419bool gem_create__has_stolen_support(int fd)
420{
421 static int has_stolen_support = -1;
422 struct drm_i915_getparam gp;
423 int val = -1;
424
425 if (has_stolen_support < 0) {
426 memset(&gp, 0, sizeof(gp));
427 gp.param = 36; /* CREATE_VERSION */
428 gp.value = &val;
429
430 /* Do we have the extended gem_create_ioctl? */
431 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
432 has_stolen_support = val >= 2;
433 }
434
435 return has_stolen_support;
436}
437
438struct local_i915_gem_create_v2 {
439 uint64_t size;
440 uint32_t handle;
441 uint32_t pad;
442#define I915_CREATE_PLACEMENT_STOLEN (1<<0)
443 uint32_t flags;
444};
445
446#define LOCAL_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct local_i915_gem_create_v2)
447uint32_t __gem_create_stolen(int fd, uint64_t size)
448{
449 struct local_i915_gem_create_v2 create;
450 int ret;
451
452 memset(&create, 0, sizeof(create));
453 create.handle = 0;
454 create.size = size;
455 create.flags = I915_CREATE_PLACEMENT_STOLEN;
456 ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CREATE, &create);
457
458 if (ret < 0)
459 return 0;
460
461 errno = 0;
462 return create.handle;
463}
464
465/**
466 * gem_create_stolen:
467 * @fd: open i915 drm file descriptor
468 * @size: desired size of the buffer
469 *
470 * This wraps the new GEM_CREATE ioctl, which allocates a new gem buffer
471 * object of @size and placement in stolen memory region.
472 *
473 * Returns: The file-private handle of the created buffer object
474 */
475
476uint32_t gem_create_stolen(int fd, uint64_t size)
477{
478 struct local_i915_gem_create_v2 create;
479
480 memset(&create, 0, sizeof(create));
481 create.handle = 0;
482 create.size = size;
483 create.flags = I915_CREATE_PLACEMENT_STOLEN;
484 do_ioctl(fd, LOCAL_IOCTL_I915_GEM_CREATE, &create);
485 igt_assert(create.handle);
486
487 return create.handle;
488}
489
490
Daniel Vetter556c49f2014-03-11 23:27:06 +0100491uint32_t __gem_create(int fd, int size)
492{
493 struct drm_i915_gem_create create;
494 int ret;
495
Chris Wilsonacca7242014-07-21 07:57:25 +0100496 memset(&create, 0, sizeof(create));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100497 create.handle = 0;
498 create.size = size;
499 ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
500
501 if (ret < 0)
502 return 0;
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100503
504 errno = 0;
505 return create.handle;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100506}
507
508/**
509 * gem_create:
510 * @fd: open i915 drm file descriptor
511 * @size: desired size of the buffer
512 *
513 * This wraps the GEM_CREATE ioctl, which allocates a new gem buffer object of
514 * @size.
515 *
516 * Returns: The file-private handle of the created buffer object
517 */
Chris Wilson236bab52015-04-26 11:11:55 +0100518uint32_t gem_create(int fd, uint64_t size)
Daniel Vetter556c49f2014-03-11 23:27:06 +0100519{
520 struct drm_i915_gem_create create;
521
Chris Wilsonacca7242014-07-21 07:57:25 +0100522 memset(&create, 0, sizeof(create));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100523 create.handle = 0;
524 create.size = size;
525 do_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
526 igt_assert(create.handle);
527
528 return create.handle;
529}
530
531/**
Chris Wilsone3b68bb2016-01-23 09:44:19 +0000532 * __gem_execbuf:
533 * @fd: open i915 drm file descriptor
534 * @execbuf: execbuffer data structure
535 *
536 * This wraps the EXECBUFFER2 ioctl, which submits a batchbuffer for the gpu to
537 * run. This is allowed to fail, with -errno returned.
538 */
539int __gem_execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
540{
541 int err = 0;
542 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf))
543 err = -errno;
Chris Wilson9ba9af22016-03-04 20:38:16 +0000544 errno = 0;
Chris Wilsone3b68bb2016-01-23 09:44:19 +0000545 return err;
546}
547
548/**
Daniel Vetter556c49f2014-03-11 23:27:06 +0100549 * gem_execbuf:
550 * @fd: open i915 drm file descriptor
551 * @execbuf: execbuffer data structure
552 *
553 * This wraps the EXECBUFFER2 ioctl, which submits a batchbuffer for the gpu to
554 * run.
555 */
556void gem_execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
557{
Chris Wilsone3b68bb2016-01-23 09:44:19 +0000558 igt_assert_eq(__gem_execbuf(fd, execbuf), 0);
Daniel Vetter556c49f2014-03-11 23:27:06 +0100559}
560
561/**
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300562 * __gem_mmap__gtt:
Daniel Vetter556c49f2014-03-11 23:27:06 +0100563 * @fd: open i915 drm file descriptor
564 * @handle: gem buffer object handle
565 * @size: size of the gem buffer
566 * @prot: memory protection bits as used by mmap()
567 *
568 * This functions wraps up procedure to establish a memory mapping through the
569 * GTT.
570 *
Ville Syrjälä8986bbc2015-10-09 16:59:13 +0300571 * Returns: A pointer to the created memory mapping, NULL on failure.
Daniel Vetter556c49f2014-03-11 23:27:06 +0100572 */
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300573void *__gem_mmap__gtt(int fd, uint32_t handle, uint64_t size, unsigned prot)
Daniel Vetter556c49f2014-03-11 23:27:06 +0100574{
575 struct drm_i915_gem_mmap_gtt mmap_arg;
576 void *ptr;
577
Chris Wilsonacca7242014-07-21 07:57:25 +0100578 memset(&mmap_arg, 0, sizeof(mmap_arg));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100579 mmap_arg.handle = handle;
580 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg))
581 return NULL;
582
583 ptr = mmap64(0, size, prot, MAP_SHARED, fd, mmap_arg.offset);
584 if (ptr == MAP_FAILED)
585 ptr = NULL;
Chris Wilsona464fb72015-01-02 16:33:29 +0530586 else
587 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100588
589 return ptr;
590}
591
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300592/**
593 * gem_mmap__gtt:
594 * @fd: open i915 drm file descriptor
595 * @handle: gem buffer object handle
596 * @size: size of the gem buffer
597 * @prot: memory protection bits as used by mmap()
598 *
599 * Like __gem_mmap__gtt() except we assert on failure.
600 *
601 * Returns: A pointer to the created memory mapping
602 */
603void *gem_mmap__gtt(int fd, uint32_t handle, uint64_t size, unsigned prot)
604{
605 void *ptr = __gem_mmap__gtt(fd, handle, size, prot);
606 igt_assert(ptr);
607 return ptr;
608}
609
Chris Wilsona464fb72015-01-02 16:33:29 +0530610struct local_i915_gem_mmap_v2 {
611 uint32_t handle;
612 uint32_t pad;
613 uint64_t offset;
614 uint64_t size;
615 uint64_t addr_ptr;
616 uint64_t flags;
617#define I915_MMAP_WC 0x1
618};
619#define LOCAL_IOCTL_I915_GEM_MMAP_v2 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct local_i915_gem_mmap_v2)
620
621bool gem_mmap__has_wc(int fd)
622{
623 static int has_wc = -1;
624
625 if (has_wc == -1) {
626 struct drm_i915_getparam gp;
627 int val = -1;
628
629 has_wc = 0;
630
631 memset(&gp, 0, sizeof(gp));
632 gp.param = 30; /* MMAP_VERSION */
633 gp.value = &val;
634
635 /* Do we have the new mmap_ioctl? */
636 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
637 if (val >= 1) {
638 struct local_i915_gem_mmap_v2 arg;
639
640 /* Does this device support wc-mmaps ? */
641 memset(&arg, 0, sizeof(arg));
642 arg.handle = gem_create(fd, 4096);
643 arg.offset = 0;
644 arg.size = 4096;
645 arg.flags = I915_MMAP_WC;
646 has_wc = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_MMAP_v2, &arg) == 0;
647 gem_close(fd, arg.handle);
648 }
649 errno = 0;
650 }
651
652 return has_wc > 0;
653}
654
655/**
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300656 * __gem_mmap__wc:
Chris Wilsona464fb72015-01-02 16:33:29 +0530657 * @fd: open i915 drm file descriptor
658 * @handle: gem buffer object handle
Thomas Woodd01ebbd2015-06-29 16:47:14 +0100659 * @offset: offset in the gem buffer of the mmap arena
Chris Wilsona464fb72015-01-02 16:33:29 +0530660 * @size: size of the mmap arena
661 * @prot: memory protection bits as used by mmap()
662 *
663 * This functions wraps up procedure to establish a memory mapping through
Chris Wilsoneaa1e8e2015-01-06 10:06:41 +0000664 * direct cpu access, bypassing the gpu and cpu caches completely and also
665 * bypassing the GTT system agent (i.e. there is no automatic tiling of
666 * the mmapping through the fence registers).
Chris Wilsona464fb72015-01-02 16:33:29 +0530667 *
Ville Syrjälä8986bbc2015-10-09 16:59:13 +0300668 * Returns: A pointer to the created memory mapping, NULL on failure.
Chris Wilsona464fb72015-01-02 16:33:29 +0530669 */
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300670void *__gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot)
Chris Wilsona464fb72015-01-02 16:33:29 +0530671{
672 struct local_i915_gem_mmap_v2 arg;
673
674 if (!gem_mmap__has_wc(fd)) {
675 errno = ENOSYS;
676 return NULL;
677 }
678
679 memset(&arg, 0, sizeof(arg));
680 arg.handle = handle;
681 arg.offset = offset;
682 arg.size = size;
683 arg.flags = I915_MMAP_WC;
684 if (drmIoctl(fd, LOCAL_IOCTL_I915_GEM_MMAP_v2, &arg))
685 return NULL;
686
687 errno = 0;
688 return (void *)(uintptr_t)arg.addr_ptr;
689}
690
Daniel Vetter556c49f2014-03-11 23:27:06 +0100691/**
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300692 * gem_mmap__wc:
693 * @fd: open i915 drm file descriptor
694 * @handle: gem buffer object handle
695 * @offset: offset in the gem buffer of the mmap arena
696 * @size: size of the mmap arena
697 * @prot: memory protection bits as used by mmap()
698 *
699 * Like __gem_mmap__wc() except we assert on failure.
700 *
701 * Returns: A pointer to the created memory mapping
702 */
703void *gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot)
704{
705 void *ptr = __gem_mmap__wc(fd, handle, offset, size, prot);
706 igt_assert(ptr);
707 return ptr;
708}
709
710/**
711 * __gem_mmap__cpu:
Daniel Vetter556c49f2014-03-11 23:27:06 +0100712 * @fd: open i915 drm file descriptor
713 * @handle: gem buffer object handle
Thomas Woodd01ebbd2015-06-29 16:47:14 +0100714 * @offset: offset in the gem buffer of the mmap arena
Chris Wilson6fff1f82014-11-04 12:06:17 +0000715 * @size: size of the mmap arena
Daniel Vetter556c49f2014-03-11 23:27:06 +0100716 * @prot: memory protection bits as used by mmap()
717 *
718 * This functions wraps up procedure to establish a memory mapping through
719 * direct cpu access, bypassing the gpu completely.
720 *
Ville Syrjälä8986bbc2015-10-09 16:59:13 +0300721 * Returns: A pointer to the created memory mapping, NULL on failure.
Daniel Vetter556c49f2014-03-11 23:27:06 +0100722 */
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300723void *__gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot)
Daniel Vetter556c49f2014-03-11 23:27:06 +0100724{
725 struct drm_i915_gem_mmap mmap_arg;
726
Chris Wilsonacca7242014-07-21 07:57:25 +0100727 memset(&mmap_arg, 0, sizeof(mmap_arg));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100728 mmap_arg.handle = handle;
Chris Wilson6fff1f82014-11-04 12:06:17 +0000729 mmap_arg.offset = offset;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100730 mmap_arg.size = size;
731 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg))
732 return NULL;
733
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100734 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100735 return (void *)(uintptr_t)mmap_arg.addr_ptr;
736}
737
738/**
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300739 * gem_mmap__cpu:
740 * @fd: open i915 drm file descriptor
741 * @handle: gem buffer object handle
742 * @offset: offset in the gem buffer of the mmap arena
743 * @size: size of the mmap arena
744 * @prot: memory protection bits as used by mmap()
745 *
746 * Like __gem_mmap__cpu() except we assert on failure.
747 *
748 * Returns: A pointer to the created memory mapping
749 */
750void *gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot)
751{
752 void *ptr = __gem_mmap__cpu(fd, handle, offset, size, prot);
753 igt_assert(ptr);
754 return ptr;
755}
756
757/**
Daniel Vetter556c49f2014-03-11 23:27:06 +0100758 * gem_madvise:
759 * @fd: open i915 drm file descriptor
760 * @handle: gem buffer object handle
761 * @state: desired madvise state
762 *
763 * This is a wraps the MADVISE ioctl, which is used in libdrm to implement
764 * opportunistic buffer object caching. Objects in the cache are set to DONTNEED
765 * (internally in the kernel tracked as purgeable objects). When such a cached
766 * object is in need again it must be set back to WILLNEED before first use.
767 *
768 * Returns: When setting the madvise state to WILLNEED this returns whether the
Thomas Wood519f3772014-09-26 14:24:52 +0100769 * backing storage was still available or not.
Daniel Vetter556c49f2014-03-11 23:27:06 +0100770 */
771int gem_madvise(int fd, uint32_t handle, int state)
772{
773 struct drm_i915_gem_madvise madv;
774
Chris Wilsonacca7242014-07-21 07:57:25 +0100775 memset(&madv, 0, sizeof(madv));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100776 madv.handle = handle;
777 madv.madv = state;
778 madv.retained = 1;
779 do_ioctl(fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
780
781 return madv.retained;
782}
783
784/**
785 * gem_context_create:
786 * @fd: open i915 drm file descriptor
787 *
788 * This is a wraps the CONTEXT_CREATE ioctl, which is used to allocate a new
789 * hardware context. Not that similarly to gem_set_caching() this wrapper calls
790 * igt_require() internally to correctly skip on kernels and platforms where hw
791 * context support is not available.
792 *
793 * Returns: The id of the allocated hw context.
794 */
795uint32_t gem_context_create(int fd)
796{
797 struct drm_i915_gem_context_create create;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100798
Chris Wilsonacca7242014-07-21 07:57:25 +0100799 memset(&create, 0, sizeof(create));
Chris Wilsonffbc59a2016-02-05 11:16:18 +0000800 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create)) {
801 int err = -errno;
802 igt_skip_on(err == -ENODEV || errno == -EINVAL);
803 igt_assert_eq(err, 0);
804 }
805 igt_assert(create.ctx_id != 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100806 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100807
808 return create.ctx_id;
809}
810
Daniel Vetter09b82112015-02-06 17:15:13 +0100811int __gem_context_destroy(int fd, uint32_t ctx_id)
812{
813 struct drm_i915_gem_context_destroy destroy;
814 int ret;
815
816 memset(&destroy, 0, sizeof(destroy));
817 destroy.ctx_id = ctx_id;
818
819 ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
820 if (ret)
821 return -errno;
822 return 0;
823}
824
825/**
Thomas Wooda22548f2015-02-16 11:17:11 +0000826 * gem_context_destroy:
Daniel Vetter09b82112015-02-06 17:15:13 +0100827 * @fd: open i915 drm file descriptor
828 * @ctx_id: i915 hw context id
829 *
830 * This is a wraps the CONTEXT_DESTROY ioctl, which is used to free a hardware
831 * context.
832 */
833void gem_context_destroy(int fd, uint32_t ctx_id)
834{
835 struct drm_i915_gem_context_destroy destroy;
836
837 memset(&destroy, 0, sizeof(destroy));
838 destroy.ctx_id = ctx_id;
839
840 do_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
841}
842
Daniel Vetter556c49f2014-03-11 23:27:06 +0100843/**
Daniel Vetter75c075c2015-02-06 17:13:59 +0100844 * gem_context_get_param:
845 * @fd: open i915 drm file descriptor
846 * @p: i915 hw context parameter
847 *
848 * This is a wraps the CONTEXT_GET_PARAM ioctl, which is used to free a hardware
849 * context. Not that similarly to gem_set_caching() this wrapper calls
850 * igt_require() internally to correctly skip on kernels and platforms where hw
851 * context parameter support is not available.
852 */
853void gem_context_get_param(int fd, struct local_i915_gem_context_param *p)
854{
855#define LOCAL_I915_GEM_CONTEXT_GETPARAM 0x34
856#define LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_CONTEXT_GETPARAM, struct local_i915_gem_context_param)
857 do_ioctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, p);
858}
859
Chris Wilson19642c62015-12-11 13:27:49 +0000860int __gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
861{
862#define LOCAL_I915_GEM_CONTEXT_SETPARAM 0x35
863#define LOCAL_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_CONTEXT_SETPARAM, struct local_i915_gem_context_param)
864 if (drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_SETPARAM, p))
865 return -errno;
866
867 errno = 0;
868 return 0;
869}
Daniel Vetter75c075c2015-02-06 17:13:59 +0100870/**
871 * gem_context_set_param:
872 * @fd: open i915 drm file descriptor
873 * @p: i915 hw context parameter
874 *
875 * This is a wraps the CONTEXT_SET_PARAM ioctl, which is used to free a hardware
876 * context. Not that similarly to gem_set_caching() this wrapper calls
877 * igt_require() internally to correctly skip on kernels and platforms where hw
878 * context parameter support is not available.
879 */
880void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
881{
Chris Wilson19642c62015-12-11 13:27:49 +0000882 igt_assert(__gem_context_set_param(fd, p) == 0);
Daniel Vetter75c075c2015-02-06 17:13:59 +0100883}
884
885/**
Thomas Wood26f40812015-02-20 11:31:01 +0000886 * gem_context_require_param:
Daniel Vetter75c075c2015-02-06 17:13:59 +0100887 * @fd: open i915 drm file descriptor
Thomas Wood3b8e1212015-03-12 17:01:57 +0000888 * @param: i915 hw context parameter
Daniel Vetter75c075c2015-02-06 17:13:59 +0100889 *
890 * Feature test macro to query whether hw context parameter support for @param
891 * is available. Automatically skips through igt_require() if not.
892 */
893void gem_context_require_param(int fd, uint64_t param)
894{
895 struct local_i915_gem_context_param p;
896
897 p.context = 0;
898 p.param = param;
899 p.value = 0;
900 p.size = 0;
901
902 igt_require(drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0);
903}
904
Chris Wilsonfb950bc2015-04-13 19:04:13 +0100905void gem_context_require_ban_period(int fd)
906{
907 static int has_ban_period = -1;
908
909 if (has_ban_period < 0) {
910 struct local_i915_gem_context_param p;
911
912 p.context = 0;
913 p.param = LOCAL_CONTEXT_PARAM_BAN_PERIOD;
914 p.value = 0;
915 p.size = 0;
916
917 has_ban_period = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0;
918 }
919
920 igt_require(has_ban_period);
921}
922
Tiago Vignattie1f663b2015-08-12 15:57:12 -0300923int __gem_userptr(int fd, void *ptr, int size, int read_only, uint32_t flags, uint32_t *handle)
924{
925 struct local_i915_gem_userptr userptr;
926 int ret;
927
928 memset(&userptr, 0, sizeof(userptr));
929 userptr.user_ptr = (uintptr_t)ptr;
930 userptr.user_size = size;
931 userptr.flags = flags;
932 if (read_only)
933 userptr.flags |= LOCAL_I915_USERPTR_READ_ONLY;
934
935 ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
936 if (ret)
937 ret = errno;
938 igt_skip_on_f(ret == ENODEV &&
939 (flags & LOCAL_I915_USERPTR_UNSYNCHRONIZED) == 0 &&
940 !read_only,
941 "Skipping, synchronized mappings with no kernel CONFIG_MMU_NOTIFIER?");
942 if (ret == 0)
943 *handle = userptr.handle;
944
945 return ret;
946}
947
948/**
949 * gem_userptr:
950 * @fd: open i915 drm file descriptor
951 * @ptr: userptr pointer to be passed
952 * @size: desired size of the buffer
953 * @read_only: specify whether userptr is opened read only
954 * @flags: other userptr flags
955 * @handle: returned handle for the object
956 *
957 * Returns userptr handle for the GEM object.
958 */
959void gem_userptr(int fd, void *ptr, int size, int read_only, uint32_t flags, uint32_t *handle)
960{
961 igt_assert_eq(__gem_userptr(fd, ptr, size, read_only, flags, handle), 0);
962}
963
Daniel Vetter75c075c2015-02-06 17:13:59 +0100964/**
Daniel Vetter556c49f2014-03-11 23:27:06 +0100965 * gem_sw_finish:
966 * @fd: open i915 drm file descriptor
967 * @handle: gem buffer object handle
968 *
969 * This is a wraps the SW_FINISH ioctl, which is used to flush out frontbuffer
970 * rendering done through the direct cpu memory mappings. Shipping userspace
971 * does _not_ call this after frontbuffer rendering through gtt memory mappings.
972 */
973void gem_sw_finish(int fd, uint32_t handle)
974{
975 struct drm_i915_gem_sw_finish finish;
976
Chris Wilsonacca7242014-07-21 07:57:25 +0100977 memset(&finish, 0, sizeof(finish));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100978 finish.handle = handle;
979
980 do_ioctl(fd, DRM_IOCTL_I915_GEM_SW_FINISH, &finish);
981}
982
983/**
984 * gem_bo_busy:
985 * @fd: open i915 drm file descriptor
986 * @handle: gem buffer object handle
987 *
988 * This is a wraps the BUSY ioctl, which tells whether a buffer object is still
989 * actively used by the gpu in a execbuffer.
990 *
991 * Returns: The busy state of the buffer object.
992 */
993bool gem_bo_busy(int fd, uint32_t handle)
994{
995 struct drm_i915_gem_busy busy;
996
Chris Wilsonacca7242014-07-21 07:57:25 +0100997 memset(&busy, 0, sizeof(busy));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100998 busy.handle = handle;
999
1000 do_ioctl(fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
1001
1002 return !!busy.busy;
1003}
1004
1005
1006/* feature test helpers */
1007
1008/**
Michał Winiarski52b5d502016-01-25 19:35:01 +01001009 * gem_gtt_type:
Daniel Vetter556c49f2014-03-11 23:27:06 +01001010 * @fd: open i915 drm file descriptor
1011 *
Michał Winiarski52b5d502016-01-25 19:35:01 +01001012 * Feature test macro to check what type of gtt is being used by the kernel:
1013 * 0 - global gtt
1014 * 1 - aliasing ppgtt
1015 * 2 - full ppgtt, limited to 32bit address space
1016 * 3 - full ppgtt, 64bit address space
Daniel Vetter556c49f2014-03-11 23:27:06 +01001017 *
Michał Winiarski52b5d502016-01-25 19:35:01 +01001018 * Returns: Type of gtt being used.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001019 */
Michał Winiarski52b5d502016-01-25 19:35:01 +01001020int gem_gtt_type(int fd)
Daniel Vetter556c49f2014-03-11 23:27:06 +01001021{
1022 struct drm_i915_getparam gp;
Chris Wilsonacca7242014-07-21 07:57:25 +01001023 int val = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +01001024
Chris Wilsonacca7242014-07-21 07:57:25 +01001025 memset(&gp, 0, sizeof(gp));
Daniel Vetter556c49f2014-03-11 23:27:06 +01001026 gp.param = 18; /* HAS_ALIASING_PPGTT */
1027 gp.value = &val;
1028
1029 if (ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp)))
1030 return 0;
1031
Chris Wilsonb918a3b2014-04-25 07:40:34 +01001032 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +01001033 return val;
1034}
1035
1036/**
Michał Winiarski52b5d502016-01-25 19:35:01 +01001037 * gem_uses_ppgtt:
1038 * @fd: open i915 drm file descriptor
1039 *
1040 * Feature test macro to check whether the kernel internally uses ppgtt to
1041 * execute batches. Note that this is also true when we're using full ppgtt.
1042 *
1043 * Returns: Whether batches are run through ppgtt.
1044 */
1045bool gem_uses_ppgtt(int fd)
1046{
1047 return gem_gtt_type(fd) > 0;
1048}
1049
1050/**
1051 * gem_uses_full_ppgtt:
1052 * @fd: open i915 drm file descriptor
1053 *
1054 * Feature test macro to check whether the kernel internally uses full
1055 * per-process gtt to execute batches. Note that this is also true when we're
1056 * using full 64b ppgtt.
1057 *
1058 * Returns: Whether batches are run through full ppgtt.
1059 */
1060bool gem_uses_full_ppgtt(int fd)
1061{
1062 return gem_gtt_type(fd) > 1;
1063}
1064
1065/**
Thomas Woodae3a9462014-11-25 11:59:37 +00001066 * gem_available_fences:
Daniel Vetter556c49f2014-03-11 23:27:06 +01001067 * @fd: open i915 drm file descriptor
1068 *
1069 * Feature test macro to query the kernel for the number of available fences
Thomas Wood519f3772014-09-26 14:24:52 +01001070 * usable in a batchbuffer. Only relevant for pre-gen4.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001071 *
1072 * Returns: The number of available fences.
1073 */
1074int gem_available_fences(int fd)
1075{
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001076 static int num_fences = -1;
Daniel Vetter556c49f2014-03-11 23:27:06 +01001077
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001078 if (num_fences < 0) {
1079 struct drm_i915_getparam gp;
Daniel Vetter556c49f2014-03-11 23:27:06 +01001080
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001081 memset(&gp, 0, sizeof(gp));
1082 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
1083 gp.value = &num_fences;
Daniel Vetter556c49f2014-03-11 23:27:06 +01001084
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001085 num_fences = 0;
1086 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
1087 errno = 0;
1088 }
1089
1090 return num_fences;
Daniel Vetter556c49f2014-03-11 23:27:06 +01001091}
1092
Chris Wilsonb76f1d82014-09-08 10:28:41 +01001093bool gem_has_llc(int fd)
1094{
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001095 static int has_llc = -1;
Chris Wilsonb76f1d82014-09-08 10:28:41 +01001096
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001097 if (has_llc < 0) {
1098 struct drm_i915_getparam gp;
Chris Wilsonb76f1d82014-09-08 10:28:41 +01001099
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001100 memset(&gp, 0, sizeof(gp));
1101 gp.param = I915_PARAM_HAS_LLC;
1102 gp.value = &has_llc;
Chris Wilsonb76f1d82014-09-08 10:28:41 +01001103
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001104 has_llc = 0;
1105 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
1106 errno = 0;
1107 }
1108
1109 return has_llc;
Chris Wilsonb76f1d82014-09-08 10:28:41 +01001110}
1111
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001112static bool has_param(int fd, int param)
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001113{
1114 drm_i915_getparam_t gp;
Chris Wilsonacca7242014-07-21 07:57:25 +01001115 int tmp = 0;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001116
Chris Wilsonacca7242014-07-21 07:57:25 +01001117 memset(&gp, 0, sizeof(gp));
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001118 gp.value = &tmp;
1119 gp.param = param;
1120
Chris Wilsonb918a3b2014-04-25 07:40:34 +01001121 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001122 return false;
Chris Wilsonb918a3b2014-04-25 07:40:34 +01001123
1124 errno = 0;
1125 return tmp > 0;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001126}
1127
Daniel Vetter556c49f2014-03-11 23:27:06 +01001128/**
1129 * gem_has_bsd:
1130 * @fd: open i915 drm file descriptor
1131 *
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001132 * Feature test macro to query whether the BSD ring is available.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001133 *
1134 * Note that recent Bspec calls this the VCS ring for Video Command Submission.
1135 *
Thomas Wood519f3772014-09-26 14:24:52 +01001136 * Returns: Whether the BSD ring is available or not.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001137 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001138bool gem_has_bsd(int fd)
1139{
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001140 static int has_bsd = -1;
1141 if (has_bsd < 0)
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001142 has_bsd = has_param(fd, I915_PARAM_HAS_BSD);
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001143 return has_bsd;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001144}
1145
Daniel Vetter556c49f2014-03-11 23:27:06 +01001146/**
1147 * gem_has_blt:
1148 * @fd: open i915 drm file descriptor
1149 *
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001150 * Feature test macro to query whether the blitter ring is available.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001151 *
1152 * Note that recent Bspec calls this the BCS ring for Blitter Command Submission.
1153 *
Thomas Wood519f3772014-09-26 14:24:52 +01001154 * Returns: Whether the blitter ring is available or not.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001155 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001156bool gem_has_blt(int fd)
1157{
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001158 static int has_blt = -1;
1159 if (has_blt < 0)
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001160 has_blt = has_param(fd, I915_PARAM_HAS_BLT);
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001161 return has_blt;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001162}
1163
1164#define LOCAL_I915_PARAM_HAS_VEBOX 22
Daniel Vetter556c49f2014-03-11 23:27:06 +01001165/**
1166 * gem_has_vebox:
1167 * @fd: open i915 drm file descriptor
1168 *
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001169 * Feature test macro to query whether the vebox ring is available.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001170 *
1171 * Note that recent Bspec calls this the VECS ring for Video Enhancement Command
1172 * Submission.
1173 *
Thomas Wood519f3772014-09-26 14:24:52 +01001174 * Returns: Whether the vebox ring is available or not.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001175 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001176bool gem_has_vebox(int fd)
1177{
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001178 static int has_vebox = -1;
1179 if (has_vebox < 0)
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001180 has_vebox = has_param(fd, LOCAL_I915_PARAM_HAS_VEBOX);
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001181 return has_vebox;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001182}
1183
Zhipeng Gong17937a02015-01-13 08:50:19 +08001184#define LOCAL_I915_PARAM_HAS_BSD2 31
1185/**
1186 * gem_has_bsd2:
1187 * @fd: open i915 drm file descriptor
1188 *
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001189 * Feature test macro to query whether the BSD2 ring is available.
Zhipeng Gong17937a02015-01-13 08:50:19 +08001190 *
1191 * Note that recent Bspec calls this the VCS ring for Video Command Submission.
1192 *
1193 * Returns: Whether the BSD ring is avaible or not.
1194 */
1195bool gem_has_bsd2(int fd)
1196{
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001197 static int has_bsd2 = -1;
1198 if (has_bsd2 < 0)
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001199 has_bsd2 = has_param(fd, LOCAL_I915_PARAM_HAS_BSD2);
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001200 return has_bsd2;
Zhipeng Gong17937a02015-01-13 08:50:19 +08001201}
Chris Wilson16038902016-02-18 10:35:10 +00001202/**
1203 * gem_available_aperture_size:
1204 * @fd: open i915 drm file descriptor
1205 *
1206 * Feature test macro to query the kernel for the available gpu aperture size
1207 * usable in a batchbuffer.
1208 *
1209 * Returns: The available gtt address space size.
1210 */
1211uint64_t gem_available_aperture_size(int fd)
1212{
1213 struct drm_i915_gem_get_aperture aperture;
1214
1215 memset(&aperture, 0, sizeof(aperture));
1216 aperture.aper_size = 256*1024*1024;
1217 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1218
1219 return aperture.aper_available_size;
1220}
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001221
Daniel Vetter556c49f2014-03-11 23:27:06 +01001222/**
1223 * gem_aperture_size:
1224 * @fd: open i915 drm file descriptor
1225 *
1226 * Feature test macro to query the kernel for the total gpu aperture size.
1227 *
1228 * Returns: The total gtt address space size.
1229 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001230uint64_t gem_aperture_size(int fd)
1231{
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001232 static uint64_t aperture_size = 0;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001233
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001234 if (aperture_size == 0) {
Chris Wilsona2271932015-10-14 14:17:55 +01001235 struct local_i915_gem_context_param p;
Chris Wilsonacca7242014-07-21 07:57:25 +01001236
Chris Wilsona2271932015-10-14 14:17:55 +01001237 memset(&p, 0, sizeof(p));
1238 p.param = 0x3;
1239 if (ioctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0) {
1240 aperture_size = p.value;
1241 } else {
1242 struct drm_i915_gem_get_aperture aperture;
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001243
Chris Wilsona2271932015-10-14 14:17:55 +01001244 memset(&aperture, 0, sizeof(aperture));
1245 aperture.aper_size = 256*1024*1024;
1246
1247 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1248 aperture_size = aperture.aper_size;
1249 }
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001250 }
1251
1252 return aperture_size;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001253}
1254
Daniel Vetter556c49f2014-03-11 23:27:06 +01001255/**
Thomas Woodae3a9462014-11-25 11:59:37 +00001256 * gem_mappable_aperture_size:
Daniel Vetter556c49f2014-03-11 23:27:06 +01001257 *
1258 * Feature test macro to query the kernel for the mappable gpu aperture size.
Thomas Wood519f3772014-09-26 14:24:52 +01001259 * This is the area available for GTT memory mappings.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001260 *
1261 * Returns: The mappable gtt address space size.
1262 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001263uint64_t gem_mappable_aperture_size(void)
1264{
Chris Wilsonacca7242014-07-21 07:57:25 +01001265 struct pci_device *pci_dev = intel_get_pci_device();
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001266 int bar;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001267
1268 if (intel_gen(pci_dev->device_id) < 3)
1269 bar = 0;
1270 else
1271 bar = 2;
1272
1273 return pci_dev->regions[bar].size;
1274}
1275
Chris Wilson391b32c2016-02-05 18:35:21 +00001276/**
1277 * gem_global_aperture_size:
1278 *
1279 * Feature test macro to query the kernel for the global gpu aperture size.
1280 * This is the area available for the kernel to perform address translations.
1281 *
1282 * Returns: The mappable gtt address space size.
1283 */
1284uint64_t gem_global_aperture_size(int fd)
1285{
1286 struct drm_i915_gem_get_aperture aperture;
1287
1288 memset(&aperture, 0, sizeof(aperture));
1289 aperture.aper_size = 256*1024*1024;
1290 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1291
1292 return aperture.aper_size;
1293}
1294
Michał Winiarskie6ca4bd2016-01-25 19:35:02 +01001295#define LOCAL_I915_PARAM_HAS_EXEC_SOFTPIN 37
1296/**
1297 * gem_has_softpin:
1298 * @fd: open i915 drm file descriptor
1299 *
1300 * Feature test macro to query whether the softpinning functionality is
1301 * supported.
1302 *
1303 * Returns: Whether softpin support is available
1304 */
1305bool gem_has_softpin(int fd)
1306{
1307 static int has_softpin = -1;
1308
1309 if (has_softpin < 0) {
1310 struct drm_i915_getparam gp;
1311
1312 memset(&gp, 0, sizeof(gp));
1313 gp.param = LOCAL_I915_PARAM_HAS_EXEC_SOFTPIN;
1314 gp.value = &has_softpin;
1315
1316 has_softpin = 0;
1317 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
1318 errno = 0;
1319 }
1320
1321 return has_softpin;
1322}
1323
Daniel Vetter556c49f2014-03-11 23:27:06 +01001324/**
1325 * gem_require_caching:
1326 * @fd: open i915 drm file descriptor
1327 *
1328 * Feature test macro to query whether buffer object caching control is
1329 * available. Automatically skips through igt_require() if not.
1330 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001331void gem_require_caching(int fd)
1332{
1333 struct local_drm_i915_gem_caching arg;
1334 int ret;
1335
Chris Wilsonacca7242014-07-21 07:57:25 +01001336 memset(&arg, 0, sizeof(arg));
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001337 arg.handle = gem_create(fd, 4096);
1338 igt_assert(arg.handle != 0);
1339
1340 arg.caching = 0;
1341 ret = ioctl(fd, LOCAL_DRM_IOCTL_I915_GEM_SET_CACHEING, &arg);
1342 gem_close(fd, arg.handle);
1343
1344 igt_require(ret == 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +01001345 errno = 0;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001346}
1347
Chris Wilson60eafc52016-03-04 09:40:51 +00001348bool gem_has_ring(int fd, unsigned ring)
Chris Wilsonb7f150b2016-01-27 14:30:24 +00001349{
Chris Wilsonb7f150b2016-01-27 14:30:24 +00001350 struct drm_i915_gem_execbuffer2 execbuf;
1351 struct drm_i915_gem_exec_object2 exec;
Chris Wilsonb7f150b2016-01-27 14:30:24 +00001352
Chris Wilson60eafc52016-03-04 09:40:51 +00001353 /* silly ABI, the kernel thinks everyone who has BSD also has BSD2 */
1354 if ((ring & ~(3<<13)) == I915_EXEC_BSD) {
1355 if (ring & (3 << 13) && !gem_has_bsd2(fd))
1356 return false;
1357 }
1358
Chris Wilsonb7f150b2016-01-27 14:30:24 +00001359 memset(&exec, 0, sizeof(exec));
Chris Wilsonb7f150b2016-01-27 14:30:24 +00001360 memset(&execbuf, 0, sizeof(execbuf));
1361 execbuf.buffers_ptr = (uintptr_t)&exec;
1362 execbuf.buffer_count = 1;
1363 execbuf.flags = ring;
Chris Wilson9ba9af22016-03-04 20:38:16 +00001364 return __gem_execbuf(fd, &execbuf) == -ENOENT;
Chris Wilsonb7f150b2016-01-27 14:30:24 +00001365}
1366
Daniel Vetter556c49f2014-03-11 23:27:06 +01001367/**
1368 * gem_require_ring:
1369 * @fd: open i915 drm file descriptor
Chris Wilson60eafc52016-03-04 09:40:51 +00001370 * @ring: ring flag bit as used in gem_execbuf()
Daniel Vetter556c49f2014-03-11 23:27:06 +01001371 *
1372 * Feature test macro to query whether a specific ring is available.
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001373 * This automagically skips if the ring isn't available by
1374 * calling igt_require().
Daniel Vetter556c49f2014-03-11 23:27:06 +01001375 */
Chris Wilson60eafc52016-03-04 09:40:51 +00001376void gem_require_ring(int fd, unsigned ring)
Daniel Vetter556c49f2014-03-11 23:27:06 +01001377{
Chris Wilson60eafc52016-03-04 09:40:51 +00001378 igt_require(gem_has_ring(fd, ring));
Daniel Vetter556c49f2014-03-11 23:27:06 +01001379}
1380
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001381/* prime */
Daniel Vetter556c49f2014-03-11 23:27:06 +01001382
1383/**
1384 * prime_handle_to_fd:
1385 * @fd: open i915 drm file descriptor
1386 * @handle: file-private gem buffer object handle
1387 *
1388 * This wraps the PRIME_HANDLE_TO_FD ioctl, which is used to export a gem buffer
1389 * object into a global (i.e. potentially cross-device) dma-buf file-descriptor
1390 * handle.
1391 *
1392 * Returns: The created dma-buf fd handle.
1393 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001394int prime_handle_to_fd(int fd, uint32_t handle)
1395{
1396 struct drm_prime_handle args;
1397
Chris Wilsonacca7242014-07-21 07:57:25 +01001398 memset(&args, 0, sizeof(args));
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001399 args.handle = handle;
1400 args.flags = DRM_CLOEXEC;
1401 args.fd = -1;
1402
1403 do_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
1404
1405 return args.fd;
1406}
1407
Daniel Vetter556c49f2014-03-11 23:27:06 +01001408/**
Tiago Vignatti4edfa092015-07-29 18:26:29 -03001409 * prime_handle_to_fd_for_mmap:
1410 * @fd: open i915 drm file descriptor
1411 * @handle: file-private gem buffer object handle
1412 *
1413 * Same as prime_handle_to_fd above but with DRM_RDWR capabilities, which can
1414 * be useful for writing into the mmap'ed dma-buf file-descriptor.
1415 *
1416 * Returns: The created dma-buf fd handle or -1 if the ioctl fails.
1417 */
1418int prime_handle_to_fd_for_mmap(int fd, uint32_t handle)
1419{
1420 struct drm_prime_handle args;
1421
1422 memset(&args, 0, sizeof(args));
1423 args.handle = handle;
1424 args.flags = DRM_CLOEXEC | DRM_RDWR;
1425 args.fd = -1;
1426
1427 if (drmIoctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args) != 0)
1428 return -1;
1429
1430 return args.fd;
1431}
1432
1433/**
Daniel Vetter556c49f2014-03-11 23:27:06 +01001434 * prime_fd_to_handle:
1435 * @fd: open i915 drm file descriptor
1436 * @dma_buf_fd: dma-buf fd handle
1437 *
1438 * This wraps the PRIME_FD_TO_HANDLE ioctl, which is used to import a dma-buf
1439 * file-descriptor into a gem buffer object.
1440 *
1441 * Returns: The created gem buffer object handle.
1442 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001443uint32_t prime_fd_to_handle(int fd, int dma_buf_fd)
1444{
1445 struct drm_prime_handle args;
1446
Chris Wilsonacca7242014-07-21 07:57:25 +01001447 memset(&args, 0, sizeof(args));
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001448 args.fd = dma_buf_fd;
1449 args.flags = 0;
1450 args.handle = 0;
1451
1452 do_ioctl(fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
1453
1454 return args.handle;
1455}
1456
Daniel Vetter556c49f2014-03-11 23:27:06 +01001457/**
1458 * prime_get_size:
1459 * @dma_buf_fd: dma-buf fd handle
1460 *
1461 * This wraps the lseek() protocol used to query the invariant size of a
1462 * dma-buf. Not all kernels support this, which is check with igt_require() and
1463 * so will result in automagic test skipping.
1464 *
1465 * Returns: The lifetime-invariant size of the dma-buf object.
1466 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001467off_t prime_get_size(int dma_buf_fd)
1468{
1469 off_t ret;
Chris Wilsonacca7242014-07-21 07:57:25 +01001470
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001471 ret = lseek(dma_buf_fd, 0, SEEK_END);
1472 igt_assert(ret >= 0 || errno == ESPIPE);
1473 igt_require(ret >= 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +01001474 errno = 0;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001475
1476 return ret;
1477}
Tvrtko Ursulin0a087732015-03-03 14:10:54 +00001478
1479/**
Tiago Vignatti35debab2015-12-11 18:50:35 -02001480 * prime_sync_start
1481 * @dma_buf_fd: dma-buf fd handle
1482 */
Chris Wilsonaed69b52016-02-25 21:43:01 +00001483void prime_sync_start(int dma_buf_fd, bool write)
Tiago Vignatti35debab2015-12-11 18:50:35 -02001484{
1485 struct local_dma_buf_sync sync_start;
1486
1487 memset(&sync_start, 0, sizeof(sync_start));
Chris Wilsonaed69b52016-02-25 21:43:01 +00001488 sync_start.flags = LOCAL_DMA_BUF_SYNC_START;
1489 sync_start.flags |= LOCAL_DMA_BUF_SYNC_READ;
1490 if (write)
1491 sync_start.flags |= LOCAL_DMA_BUF_SYNC_WRITE;
Tiago Vignatti35debab2015-12-11 18:50:35 -02001492 do_ioctl(dma_buf_fd, LOCAL_DMA_BUF_IOCTL_SYNC, &sync_start);
1493}
1494
1495/**
1496 * prime_sync_end
1497 * @dma_buf_fd: dma-buf fd handle
1498 */
Chris Wilsonaed69b52016-02-25 21:43:01 +00001499void prime_sync_end(int dma_buf_fd, bool write)
Tiago Vignatti35debab2015-12-11 18:50:35 -02001500{
1501 struct local_dma_buf_sync sync_end;
1502
1503 memset(&sync_end, 0, sizeof(sync_end));
Chris Wilsonaed69b52016-02-25 21:43:01 +00001504 sync_end.flags = LOCAL_DMA_BUF_SYNC_END;
1505 sync_end.flags |= LOCAL_DMA_BUF_SYNC_READ;
1506 if (write)
1507 sync_end.flags |= LOCAL_DMA_BUF_SYNC_WRITE;
Tiago Vignatti35debab2015-12-11 18:50:35 -02001508 do_ioctl(dma_buf_fd, LOCAL_DMA_BUF_IOCTL_SYNC, &sync_end);
1509}
1510
1511/**
Tvrtko Ursulin0a087732015-03-03 14:10:54 +00001512 * igt_require_fb_modifiers:
1513 * @fd: Open DRM file descriptor.
1514 *
1515 * Requires presence of DRM_CAP_ADDFB2_MODIFIERS.
1516 */
1517void igt_require_fb_modifiers(int fd)
1518{
1519 static bool has_modifiers, cap_modifiers_tested;
1520
1521 if (!cap_modifiers_tested) {
1522 uint64_t cap_modifiers;
1523 int ret;
1524
1525 ret = drmGetCap(fd, LOCAL_DRM_CAP_ADDFB2_MODIFIERS, &cap_modifiers);
1526 igt_assert(ret == 0 || errno == EINVAL);
1527 has_modifiers = ret == 0 && cap_modifiers == 1;
1528 cap_modifiers_tested = true;
1529 }
1530
1531 igt_require(has_modifiers);
1532}
Tvrtko Ursulinc7bac3c2015-03-03 14:11:02 +00001533
1534int __kms_addfb(int fd, uint32_t handle, uint32_t width, uint32_t height,
1535 uint32_t stride, uint32_t pixel_format, uint64_t modifier,
1536 uint32_t flags, uint32_t *buf_id)
1537{
1538 struct local_drm_mode_fb_cmd2 f;
1539 int ret;
1540
1541 igt_require_fb_modifiers(fd);
1542
1543 memset(&f, 0, sizeof(f));
1544
1545 f.width = width;
1546 f.height = height;
1547 f.pixel_format = pixel_format;
1548 f.flags = flags;
1549 f.handles[0] = handle;
1550 f.pitches[0] = stride;
1551 f.modifier[0] = modifier;
1552
1553 ret = drmIoctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f);
1554
1555 *buf_id = f.fb_id;
1556
1557 return ret < 0 ? -errno : ret;
1558}