blob: 87511fc62a155399a26b9d5f9115102fdc4d62d2 [file] [log] [blame]
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001/*
2 * Copyright © 2007, 2011, 2013, 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
Daniel Vetter00215dd2017-09-05 14:36:05 +020029#ifdef HAVE_LIBGEN_H
Daniel Vetter766c5bc2014-03-11 22:58:07 +010030#include <libgen.h>
31#endif
32#include <stdio.h>
33#include <fcntl.h>
34#include <sys/stat.h>
35#include <sys/ioctl.h>
36#include <string.h>
37#include <sys/mman.h>
38#include <signal.h>
39#include <pciaccess.h>
40#include <getopt.h>
41#include <stdlib.h>
42#include <unistd.h>
43#include <sys/wait.h>
44#include <sys/types.h>
45#include <sys/syscall.h>
46#include <sys/utsname.h>
47#include <termios.h>
Daniel Vetter254f19b2014-03-22 21:29:01 +010048#include <errno.h>
Daniel Vetter766c5bc2014-03-11 22:58:07 +010049
50#include "drmtest.h"
51#include "i915_drm.h"
52#include "intel_chipset.h"
Daniel Vetterc03c6ce2014-03-22 21:34:29 +010053#include "intel_io.h"
Daniel Vetter766c5bc2014-03-11 22:58:07 +010054#include "igt_debugfs.h"
Daniel Vetter766c5bc2014-03-11 22:58:07 +010055#include "config.h"
56
Maarten Lankhorstd930b642017-02-09 10:42:01 +010057#ifdef HAVE_VALGRIND
58#include <valgrind/valgrind.h>
59#include <valgrind/memcheck.h>
60
61#define VG(x) x
62#else
Daniel Vetter94d56aa2017-09-05 14:36:15 +020063#define VG(x) do {} while (0)
Maarten Lankhorstd930b642017-02-09 10:42:01 +010064#endif
65
Daniel Vetter766c5bc2014-03-11 22:58:07 +010066#include "ioctl_wrappers.h"
67
Daniel Vetter556c49f2014-03-11 23:27:06 +010068/**
69 * SECTION:ioctl_wrappers
70 * @short_description: ioctl wrappers and related functions
71 * @title: ioctl wrappers
Thomas Woodf0381d12015-09-07 09:26:01 +010072 * @include: igt.h
Daniel Vettercd6d5a62014-03-22 19:35:40 +010073 *
Daniel Vetter556c49f2014-03-11 23:27:06 +010074 * This helper library contains simple functions to wrap the raw drm/i915 kernel
75 * ioctls. The normal versions never pass any error codes to the caller and use
76 * igt_assert() to check for error conditions instead. For some ioctls raw
77 * wrappers which do pass on error codes are available. These raw wrappers have
78 * a __ prefix.
79 *
80 * For wrappers which check for feature bits there can also be two versions: The
81 * normal one simply returns a boolean to the caller. But when skipping the
82 * testcase entirely is the right action then it's better to use igt_skip()
83 * directly in the wrapper. Such functions have _require_ in their name to
84 * distinguish them.
85 */
86
Chris Wilsonc1fed522016-03-19 13:00:29 +000087int (*igt_ioctl)(int fd, unsigned long request, void *arg) = drmIoctl;
88
89
Daniel Vetter556c49f2014-03-11 23:27:06 +010090/**
91 * gem_handle_to_libdrm_bo:
92 * @bufmgr: libdrm buffer manager instance
93 * @fd: open i915 drm file descriptor
94 * @name: buffer name in libdrm
95 * @handle: gem buffer object handle
96 *
97 * This helper function imports a raw gem buffer handle into the libdrm buffer
98 * manager.
99 *
100 * Returns: The imported libdrm buffer manager object.
101 */
102drm_intel_bo *
103gem_handle_to_libdrm_bo(drm_intel_bufmgr *bufmgr, int fd, const char *name, uint32_t handle)
104{
105 struct drm_gem_flink flink;
106 int ret;
107 drm_intel_bo *bo;
108
Chris Wilsonacca7242014-07-21 07:57:25 +0100109 memset(&flink, 0, sizeof(handle));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100110 flink.handle = handle;
111 ret = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
112 igt_assert(ret == 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100113 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100114
115 bo = drm_intel_bo_gem_create_from_name(bufmgr, name, flink.name);
116 igt_assert(bo);
117
118 return bo;
119}
120
Chris Wilsone2762682016-10-19 14:07:25 +0100121static int
122__gem_get_tiling(int fd, struct drm_i915_gem_get_tiling *arg)
123{
124 int err;
125
126 err = 0;
127 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, arg))
128 err = -errno;
129 errno = 0;
130
131 return err;
132}
133
Damien Lespiau7bf0f7f2014-06-20 00:14:52 +0100134/**
135 * gem_get_tiling:
136 * @fd: open i915 drm file descriptor
137 * @handle: gem buffer object handle
138 * @tiling: (out) tiling mode of the gem buffer
139 * @swizzle: (out) bit 6 swizzle mode
140 *
141 * This wraps the GET_TILING ioctl.
Chris Wilsone2762682016-10-19 14:07:25 +0100142 *
143 * Returns whether the actual physical tiling matches the reported tiling.
Damien Lespiau7bf0f7f2014-06-20 00:14:52 +0100144 */
Chris Wilsone2762682016-10-19 14:07:25 +0100145bool
Damien Lespiau7bf0f7f2014-06-20 00:14:52 +0100146gem_get_tiling(int fd, uint32_t handle, uint32_t *tiling, uint32_t *swizzle)
147{
148 struct drm_i915_gem_get_tiling get_tiling;
Damien Lespiau7bf0f7f2014-06-20 00:14:52 +0100149
150 memset(&get_tiling, 0, sizeof(get_tiling));
151 get_tiling.handle = handle;
152
Chris Wilsone2762682016-10-19 14:07:25 +0100153 igt_assert_eq(__gem_get_tiling(fd, &get_tiling), 0);
Damien Lespiau7bf0f7f2014-06-20 00:14:52 +0100154
155 *tiling = get_tiling.tiling_mode;
156 *swizzle = get_tiling.swizzle_mode;
Chris Wilsone2762682016-10-19 14:07:25 +0100157
158 return get_tiling.phys_swizzle_mode == get_tiling.swizzle_mode;
Damien Lespiau7bf0f7f2014-06-20 00:14:52 +0100159}
160
Daniel Vetter766c5bc2014-03-11 22:58:07 +0100161int __gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride)
162{
163 struct drm_i915_gem_set_tiling st;
164 int ret;
165
Tomeu Vizoso1af56052016-11-11 14:17:50 +0100166 /* The kernel doesn't know about these tiling modes, expects NONE */
Tomeu Vizoso050c00d2016-11-10 10:28:51 +0100167 if (tiling == I915_TILING_Yf || tiling == I915_TILING_Ys)
Tomeu Vizoso1af56052016-11-11 14:17:50 +0100168 tiling = I915_TILING_NONE;
Tomeu Vizoso050c00d2016-11-10 10:28:51 +0100169
Daniel Vetter766c5bc2014-03-11 22:58:07 +0100170 memset(&st, 0, sizeof(st));
171 do {
172 st.handle = handle;
173 st.tiling_mode = tiling;
174 st.stride = tiling ? stride : 0;
175
176 ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &st);
177 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
178 if (ret != 0)
179 return -errno;
180
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100181 errno = 0;
Daniel Vetter766c5bc2014-03-11 22:58:07 +0100182 igt_assert(st.tiling_mode == tiling);
183 return 0;
184}
185
Daniel Vetter556c49f2014-03-11 23:27:06 +0100186/**
187 * gem_set_tiling:
188 * @fd: open i915 drm file descriptor
189 * @handle: gem buffer object handle
190 * @tiling: tiling mode bits
191 * @stride: stride of the buffer when using a tiled mode, otherwise must be 0
192 *
193 * This wraps the SET_TILING ioctl.
194 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +0100195void gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride)
196{
197 igt_assert(__gem_set_tiling(fd, handle, tiling, stride) == 0);
198}
199
Daniel Vetter556c49f2014-03-11 23:27:06 +0100200struct local_drm_i915_gem_caching {
201 uint32_t handle;
202 uint32_t caching;
203};
204
205#define LOCAL_DRM_I915_GEM_SET_CACHEING 0x2f
206#define LOCAL_DRM_I915_GEM_GET_CACHEING 0x30
207#define LOCAL_DRM_IOCTL_I915_GEM_SET_CACHEING \
208 DRM_IOW(DRM_COMMAND_BASE + LOCAL_DRM_I915_GEM_SET_CACHEING, struct local_drm_i915_gem_caching)
209#define LOCAL_DRM_IOCTL_I915_GEM_GET_CACHEING \
210 DRM_IOWR(DRM_COMMAND_BASE + LOCAL_DRM_I915_GEM_GET_CACHEING, struct local_drm_i915_gem_caching)
211
Chris Wilson95090bb2016-03-18 11:49:23 +0000212static int __gem_set_caching(int fd, uint32_t handle, uint32_t caching)
213{
214 struct local_drm_i915_gem_caching arg;
215 int err;
216
217 memset(&arg, 0, sizeof(arg));
218 arg.handle = handle;
219 arg.caching = caching;
220
221 err = 0;
Chris Wilsonc1fed522016-03-19 13:00:29 +0000222 if (igt_ioctl(fd, LOCAL_DRM_IOCTL_I915_GEM_SET_CACHEING, &arg)) {
Chris Wilson95090bb2016-03-18 11:49:23 +0000223 err = -errno;
224 igt_assert(errno == ENOTTY || errno == EINVAL);
225 }
226 return err;
227}
228
Daniel Vetter556c49f2014-03-11 23:27:06 +0100229/**
230 * gem_set_caching:
231 * @fd: open i915 drm file descriptor
232 * @handle: gem buffer object handle
233 * @caching: caching mode bits
234 *
235 * This wraps the SET_CACHING ioctl. Note that this function internally calls
236 * igt_require() when SET_CACHING isn't available, hence automatically skips the
237 * test. Therefore always extract test logic which uses this into its own
238 * subtest.
239 */
240void gem_set_caching(int fd, uint32_t handle, uint32_t caching)
241{
Chris Wilson95090bb2016-03-18 11:49:23 +0000242 igt_require(__gem_set_caching(fd, handle, caching) == 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100243 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100244}
245
246/**
247 * gem_get_caching:
248 * @fd: open i915 drm file descriptor
249 * @handle: gem buffer object handle
250 *
251 * This wraps the GET_CACHING ioctl.
252 *
253 * Returns: The current caching mode bits.
254 */
255uint32_t gem_get_caching(int fd, uint32_t handle)
256{
257 struct local_drm_i915_gem_caching arg;
258 int ret;
259
Chris Wilson95090bb2016-03-18 11:49:23 +0000260 memset(&arg, 0, sizeof(arg));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100261 arg.handle = handle;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100262 ret = ioctl(fd, LOCAL_DRM_IOCTL_I915_GEM_GET_CACHEING, &arg);
263 igt_assert(ret == 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100264 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100265
266 return arg.caching;
267}
268
269/**
270 * gem_open:
271 * @fd: open i915 drm file descriptor
272 * @name: flink buffer name
273 *
274 * This wraps the GEM_OPEN ioctl, which is used to import an flink name.
275 *
276 * Returns: gem file-private buffer handle of the open object.
277 */
278uint32_t gem_open(int fd, uint32_t name)
279{
280 struct drm_gem_open open_struct;
281 int ret;
282
Chris Wilsonacca7242014-07-21 07:57:25 +0100283 memset(&open_struct, 0, sizeof(open_struct));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100284 open_struct.name = name;
285 ret = ioctl(fd, DRM_IOCTL_GEM_OPEN, &open_struct);
286 igt_assert(ret == 0);
287 igt_assert(open_struct.handle != 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100288 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100289
290 return open_struct.handle;
291}
292
293/**
294 * gem_flink:
295 * @fd: open i915 drm file descriptor
296 * @handle: file-private gem buffer object handle
297 *
298 * This wraps the GEM_FLINK ioctl, which is used to export a gem buffer object
299 * into the device-global flink namespace. See gem_open() for opening such a
300 * buffer name on a different i915 drm file descriptor.
301 *
302 * Returns: The created flink buffer name.
303 */
304uint32_t gem_flink(int fd, uint32_t handle)
305{
306 struct drm_gem_flink flink;
307 int ret;
308
Chris Wilsonacca7242014-07-21 07:57:25 +0100309 memset(&flink, 0, sizeof(flink));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100310 flink.handle = handle;
311 ret = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
312 igt_assert(ret == 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100313 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100314
315 return flink.name;
316}
317
318/**
319 * gem_close:
320 * @fd: open i915 drm file descriptor
321 * @handle: gem buffer object handle
322 *
323 * This wraps the GEM_CLOSE ioctl, which to release a file-private gem buffer
324 * handle.
325 */
326void gem_close(int fd, uint32_t handle)
327{
328 struct drm_gem_close close_bo;
329
Chris Wilson7b5a8182015-12-12 18:56:53 +0000330 igt_assert_neq(handle, 0);
331
Chris Wilsonacca7242014-07-21 07:57:25 +0100332 memset(&close_bo, 0, sizeof(close_bo));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100333 close_bo.handle = handle;
334 do_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
335}
336
Chris Wilson8fcca192017-03-08 12:22:58 +0000337int __gem_write(int fd, uint32_t handle, uint64_t offset, const void *buf, uint64_t length)
Chris Wilson4a3fa352016-04-11 07:40:11 +0100338{
339 struct drm_i915_gem_pwrite gem_pwrite;
340 int err;
341
342 memset(&gem_pwrite, 0, sizeof(gem_pwrite));
343 gem_pwrite.handle = handle;
344 gem_pwrite.offset = offset;
345 gem_pwrite.size = length;
Chris Wilson39858a12017-01-02 11:05:21 +0000346 gem_pwrite.data_ptr = to_user_pointer(buf);
Chris Wilson4a3fa352016-04-11 07:40:11 +0100347
348 err = 0;
349 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite))
350 err = -errno;
351 return err;
352}
353
Daniel Vetter556c49f2014-03-11 23:27:06 +0100354/**
355 * gem_write:
356 * @fd: open i915 drm file descriptor
357 * @handle: gem buffer object handle
358 * @offset: offset within the buffer of the subrange
359 * @buf: pointer to the data to write into the buffer
360 * @length: size of the subrange
361 *
362 * This wraps the PWRITE ioctl, which is to upload a linear data to a subrange
363 * of a gem buffer object.
364 */
Chris Wilsonfc69bb02015-04-27 21:05:33 +0100365void gem_write(int fd, uint32_t handle, uint64_t offset, const void *buf, uint64_t length)
Daniel Vetter556c49f2014-03-11 23:27:06 +0100366{
Chris Wilson4a3fa352016-04-11 07:40:11 +0100367 igt_assert_eq(__gem_write(fd, handle, offset, buf, length), 0);
Daniel Vetter556c49f2014-03-11 23:27:06 +0100368}
369
Chris Wilson4a3fa352016-04-11 07:40:11 +0100370static int __gem_read(int fd, uint32_t handle, uint64_t offset, void *buf, uint64_t length)
371{
372 struct drm_i915_gem_pread gem_pread;
373 int err;
374
375 memset(&gem_pread, 0, sizeof(gem_pread));
376 gem_pread.handle = handle;
377 gem_pread.offset = offset;
378 gem_pread.size = length;
Chris Wilson39858a12017-01-02 11:05:21 +0000379 gem_pread.data_ptr = to_user_pointer(buf);
Chris Wilson4a3fa352016-04-11 07:40:11 +0100380
381 err = 0;
382 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_PREAD, &gem_pread))
383 err = -errno;
384 return err;
385}
Daniel Vetter556c49f2014-03-11 23:27:06 +0100386/**
387 * gem_read:
388 * @fd: open i915 drm file descriptor
389 * @handle: gem buffer object handle
390 * @offset: offset within the buffer of the subrange
391 * @buf: pointer to the data to read into
392 * @length: size of the subrange
393 *
394 * This wraps the PREAD ioctl, which is to download a linear data to a subrange
395 * of a gem buffer object.
396 */
Chris Wilsonfc69bb02015-04-27 21:05:33 +0100397void gem_read(int fd, uint32_t handle, uint64_t offset, void *buf, uint64_t length)
Daniel Vetter556c49f2014-03-11 23:27:06 +0100398{
Chris Wilson4a3fa352016-04-11 07:40:11 +0100399 igt_assert_eq(__gem_read(fd, handle, offset, buf, length), 0);
Daniel Vetter556c49f2014-03-11 23:27:06 +0100400}
401
Michał Winiarski7b46ae12015-06-30 17:01:10 +0200402int __gem_set_domain(int fd, uint32_t handle, uint32_t read, uint32_t write)
403{
404 struct drm_i915_gem_set_domain set_domain;
405 int err;
406
407 memset(&set_domain, 0, sizeof(set_domain));
408 set_domain.handle = handle;
409 set_domain.read_domains = read;
410 set_domain.write_domain = write;
411
412 err = 0;
413 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain))
414 err = -errno;
415
416 return err;
417}
418
Daniel Vetter556c49f2014-03-11 23:27:06 +0100419/**
420 * gem_set_domain:
421 * @fd: open i915 drm file descriptor
422 * @handle: gem buffer object handle
Michał Winiarski7b46ae12015-06-30 17:01:10 +0200423 * @read: gem domain bits for read access
424 * @write: gem domain bit for write access
Daniel Vetter556c49f2014-03-11 23:27:06 +0100425 *
426 * This wraps the SET_DOMAIN ioctl, which is used to control the coherency of
427 * the gem buffer object between the cpu and gtt mappings. It is also use to
428 * synchronize with outstanding rendering in general, but for that use-case
429 * please have a look at gem_sync().
430 */
Michał Winiarski7b46ae12015-06-30 17:01:10 +0200431void gem_set_domain(int fd, uint32_t handle, uint32_t read, uint32_t write)
Daniel Vetter556c49f2014-03-11 23:27:06 +0100432{
Michał Winiarski7b46ae12015-06-30 17:01:10 +0200433 igt_assert_eq(__gem_set_domain(fd, handle, read, write), 0);
Daniel Vetter556c49f2014-03-11 23:27:06 +0100434}
435
436/**
Chris Wilsonf27d2952016-02-23 17:45:49 +0000437 * __gem_wait:
438 * @fd: open i915 drm file descriptor
439 * @handle: gem buffer object handle
440 * @timeout_ns: [in] time to wait, [out] remaining time (in nanoseconds)
441 *
442 * This functions waits for outstanding rendering to complete, upto
443 * the timeout_ns. If no timeout_ns is provided, the wait is indefinite and
444 * only returns upon an error or when the rendering is complete.
445 */
446int gem_wait(int fd, uint32_t handle, int64_t *timeout_ns)
447{
448 struct drm_i915_gem_wait wait;
449 int ret;
450
451 memset(&wait, 0, sizeof(wait));
452 wait.bo_handle = handle;
453 wait.timeout_ns = timeout_ns ? *timeout_ns : -1;
454 wait.flags = 0;
455
456 ret = 0;
Chris Wilsonc1fed522016-03-19 13:00:29 +0000457 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_WAIT, &wait))
Chris Wilsonf27d2952016-02-23 17:45:49 +0000458 ret = -errno;
459
460 if (timeout_ns)
461 *timeout_ns = wait.timeout_ns;
462
463 return ret;
464}
465
466/**
Daniel Vetter556c49f2014-03-11 23:27:06 +0100467 * gem_sync:
468 * @fd: open i915 drm file descriptor
469 * @handle: gem buffer object handle
470 *
Chris Wilson41fe1d12015-04-13 11:54:18 +0100471 * This functions waits for outstanding rendering to complete.
Daniel Vetter556c49f2014-03-11 23:27:06 +0100472 */
473void gem_sync(int fd, uint32_t handle)
474{
Chris Wilsonf27d2952016-02-23 17:45:49 +0000475 if (gem_wait(fd, handle, NULL))
476 gem_set_domain(fd, handle,
477 I915_GEM_DOMAIN_GTT,
478 I915_GEM_DOMAIN_GTT);
479 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100480}
481
Chris Wilsonf27d2952016-02-23 17:45:49 +0000482
Ankitprasad Sharma70c3be82015-12-02 14:54:50 +0530483bool gem_create__has_stolen_support(int fd)
484{
485 static int has_stolen_support = -1;
486 struct drm_i915_getparam gp;
487 int val = -1;
488
489 if (has_stolen_support < 0) {
490 memset(&gp, 0, sizeof(gp));
Ankitprasad Sharmabeef31a2016-06-06 14:52:42 +0530491 gp.param = 38; /* CREATE_VERSION */
Ankitprasad Sharma70c3be82015-12-02 14:54:50 +0530492 gp.value = &val;
493
494 /* Do we have the extended gem_create_ioctl? */
495 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
496 has_stolen_support = val >= 2;
497 }
498
499 return has_stolen_support;
500}
501
502struct local_i915_gem_create_v2 {
503 uint64_t size;
504 uint32_t handle;
505 uint32_t pad;
506#define I915_CREATE_PLACEMENT_STOLEN (1<<0)
507 uint32_t flags;
508};
509
510#define LOCAL_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct local_i915_gem_create_v2)
511uint32_t __gem_create_stolen(int fd, uint64_t size)
512{
513 struct local_i915_gem_create_v2 create;
514 int ret;
515
516 memset(&create, 0, sizeof(create));
517 create.handle = 0;
518 create.size = size;
519 create.flags = I915_CREATE_PLACEMENT_STOLEN;
Chris Wilsonc1fed522016-03-19 13:00:29 +0000520 ret = igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_CREATE, &create);
Ankitprasad Sharma70c3be82015-12-02 14:54:50 +0530521
522 if (ret < 0)
523 return 0;
524
525 errno = 0;
526 return create.handle;
527}
528
529/**
530 * gem_create_stolen:
531 * @fd: open i915 drm file descriptor
532 * @size: desired size of the buffer
533 *
534 * This wraps the new GEM_CREATE ioctl, which allocates a new gem buffer
535 * object of @size and placement in stolen memory region.
536 *
537 * Returns: The file-private handle of the created buffer object
538 */
539
540uint32_t gem_create_stolen(int fd, uint64_t size)
541{
542 struct local_i915_gem_create_v2 create;
543
544 memset(&create, 0, sizeof(create));
545 create.handle = 0;
546 create.size = size;
547 create.flags = I915_CREATE_PLACEMENT_STOLEN;
548 do_ioctl(fd, LOCAL_IOCTL_I915_GEM_CREATE, &create);
549 igt_assert(create.handle);
550
551 return create.handle;
552}
553
Chris Wilson7fd0cae2017-10-03 14:52:27 +0100554int __gem_create(int fd, uint64_t size, uint32_t *handle)
Daniel Vetter556c49f2014-03-11 23:27:06 +0100555{
Chris Wilson00450852017-10-03 12:46:10 +0100556 struct drm_i915_gem_create create = {
557 .size = size,
558 };
559 int err = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100560
Chris Wilson00450852017-10-03 12:46:10 +0100561 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create) == 0)
562 *handle = create.handle;
563 else
564 err = -errno;
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100565
566 errno = 0;
Chris Wilson00450852017-10-03 12:46:10 +0100567 return err;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100568}
569
570/**
571 * gem_create:
572 * @fd: open i915 drm file descriptor
573 * @size: desired size of the buffer
574 *
575 * This wraps the GEM_CREATE ioctl, which allocates a new gem buffer object of
576 * @size.
577 *
578 * Returns: The file-private handle of the created buffer object
579 */
Chris Wilson236bab52015-04-26 11:11:55 +0100580uint32_t gem_create(int fd, uint64_t size)
Daniel Vetter556c49f2014-03-11 23:27:06 +0100581{
Chris Wilson00450852017-10-03 12:46:10 +0100582 uint32_t handle;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100583
Chris Wilson00450852017-10-03 12:46:10 +0100584 igt_assert_eq(__gem_create(fd, size, &handle), 0);
Daniel Vetter556c49f2014-03-11 23:27:06 +0100585
Chris Wilson00450852017-10-03 12:46:10 +0100586 return handle;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100587}
588
589/**
Chris Wilsone3b68bb2016-01-23 09:44:19 +0000590 * __gem_execbuf:
591 * @fd: open i915 drm file descriptor
592 * @execbuf: execbuffer data structure
593 *
594 * This wraps the EXECBUFFER2 ioctl, which submits a batchbuffer for the gpu to
595 * run. This is allowed to fail, with -errno returned.
596 */
597int __gem_execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
598{
599 int err = 0;
Chris Wilsonc1fed522016-03-19 13:00:29 +0000600 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf))
Chris Wilsone3b68bb2016-01-23 09:44:19 +0000601 err = -errno;
Chris Wilson9ba9af22016-03-04 20:38:16 +0000602 errno = 0;
Chris Wilsone3b68bb2016-01-23 09:44:19 +0000603 return err;
604}
605
606/**
Daniel Vetter556c49f2014-03-11 23:27:06 +0100607 * gem_execbuf:
608 * @fd: open i915 drm file descriptor
609 * @execbuf: execbuffer data structure
610 *
611 * This wraps the EXECBUFFER2 ioctl, which submits a batchbuffer for the gpu to
612 * run.
613 */
614void gem_execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
615{
Chris Wilsone3b68bb2016-01-23 09:44:19 +0000616 igt_assert_eq(__gem_execbuf(fd, execbuf), 0);
Daniel Vetter556c49f2014-03-11 23:27:06 +0100617}
618
Lukasz Fiedorowicz9fc445d2017-05-25 09:40:54 +0200619#define LOCAL_IOCTL_I915_GEM_EXECBUFFER2_WR \
620 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
621
622/**
623 * __gem_execbuf_wr:
624 * @fd: open i915 drm file descriptor
625 * @execbuf: execbuffer data structure
626 *
627 * This wraps the EXECBUFFER2_WR ioctl, which submits a batchbuffer for the gpu to
628 * run. This is allowed to fail, with -errno returned.
629 */
630int __gem_execbuf_wr(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
631{
632 int err = 0;
633 if (igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf))
634 err = -errno;
635 errno = 0;
636 return err;
637}
638
639/**
640 * gem_execbuf_wr:
641 * @fd: open i915 drm file descriptor
642 * @execbuf: execbuffer data structure
643 *
644 * This wraps the EXECBUFFER2_WR ioctl, which submits a batchbuffer for the gpu to
645 * run.
646 */
647void gem_execbuf_wr(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
648{
649 igt_assert_eq(__gem_execbuf_wr(fd, execbuf), 0);
650}
651
Daniel Vetter556c49f2014-03-11 23:27:06 +0100652/**
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300653 * __gem_mmap__gtt:
Daniel Vetter556c49f2014-03-11 23:27:06 +0100654 * @fd: open i915 drm file descriptor
655 * @handle: gem buffer object handle
656 * @size: size of the gem buffer
657 * @prot: memory protection bits as used by mmap()
658 *
659 * This functions wraps up procedure to establish a memory mapping through the
660 * GTT.
661 *
Ville Syrjälä8986bbc2015-10-09 16:59:13 +0300662 * Returns: A pointer to the created memory mapping, NULL on failure.
Daniel Vetter556c49f2014-03-11 23:27:06 +0100663 */
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300664void *__gem_mmap__gtt(int fd, uint32_t handle, uint64_t size, unsigned prot)
Daniel Vetter556c49f2014-03-11 23:27:06 +0100665{
666 struct drm_i915_gem_mmap_gtt mmap_arg;
667 void *ptr;
668
Chris Wilsonacca7242014-07-21 07:57:25 +0100669 memset(&mmap_arg, 0, sizeof(mmap_arg));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100670 mmap_arg.handle = handle;
Chris Wilsonc1fed522016-03-19 13:00:29 +0000671 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg))
Daniel Vetter556c49f2014-03-11 23:27:06 +0100672 return NULL;
673
674 ptr = mmap64(0, size, prot, MAP_SHARED, fd, mmap_arg.offset);
675 if (ptr == MAP_FAILED)
676 ptr = NULL;
Chris Wilsona464fb72015-01-02 16:33:29 +0530677 else
678 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100679
Maarten Lankhorstd930b642017-02-09 10:42:01 +0100680 VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size));
681
Daniel Vetter556c49f2014-03-11 23:27:06 +0100682 return ptr;
683}
684
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300685/**
686 * gem_mmap__gtt:
687 * @fd: open i915 drm file descriptor
688 * @handle: gem buffer object handle
689 * @size: size of the gem buffer
690 * @prot: memory protection bits as used by mmap()
691 *
692 * Like __gem_mmap__gtt() except we assert on failure.
693 *
694 * Returns: A pointer to the created memory mapping
695 */
696void *gem_mmap__gtt(int fd, uint32_t handle, uint64_t size, unsigned prot)
697{
698 void *ptr = __gem_mmap__gtt(fd, handle, size, prot);
699 igt_assert(ptr);
700 return ptr;
701}
702
Maarten Lankhorstd930b642017-02-09 10:42:01 +0100703int gem_munmap(void *ptr, uint64_t size)
704{
705 int ret = munmap(ptr, size);
706
707 if (ret == 0)
708 VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size));
709
710 return ret;
711}
712
Chris Wilsona464fb72015-01-02 16:33:29 +0530713struct local_i915_gem_mmap_v2 {
714 uint32_t handle;
715 uint32_t pad;
716 uint64_t offset;
717 uint64_t size;
718 uint64_t addr_ptr;
719 uint64_t flags;
720#define I915_MMAP_WC 0x1
721};
722#define LOCAL_IOCTL_I915_GEM_MMAP_v2 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct local_i915_gem_mmap_v2)
723
724bool gem_mmap__has_wc(int fd)
725{
726 static int has_wc = -1;
727
728 if (has_wc == -1) {
729 struct drm_i915_getparam gp;
Chris Wilson3bdafec2017-04-13 08:18:10 +0100730 int mmap_version = -1;
731 int gtt_version = -1;
Chris Wilsona464fb72015-01-02 16:33:29 +0530732
733 has_wc = 0;
734
735 memset(&gp, 0, sizeof(gp));
Chris Wilson3bdafec2017-04-13 08:18:10 +0100736 gp.param = 40; /* MMAP_GTT_VERSION */
737 gp.value = &gtt_version;
Chris Wilsona464fb72015-01-02 16:33:29 +0530738 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
Chris Wilson3bdafec2017-04-13 08:18:10 +0100739
740 memset(&gp, 0, sizeof(gp));
741 gp.param = 30; /* MMAP_VERSION */
742 gp.value = &mmap_version;
743 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
744
745 /* Do we have the new mmap_ioctl with DOMAIN_WC? */
746 if (mmap_version >= 1 && gtt_version >= 2) {
Chris Wilsona464fb72015-01-02 16:33:29 +0530747 struct local_i915_gem_mmap_v2 arg;
748
749 /* Does this device support wc-mmaps ? */
750 memset(&arg, 0, sizeof(arg));
751 arg.handle = gem_create(fd, 4096);
752 arg.offset = 0;
753 arg.size = 4096;
754 arg.flags = I915_MMAP_WC;
Chris Wilsonc1fed522016-03-19 13:00:29 +0000755 has_wc = igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_MMAP_v2, &arg) == 0;
Chris Wilsona464fb72015-01-02 16:33:29 +0530756 gem_close(fd, arg.handle);
757 }
758 errno = 0;
759 }
760
761 return has_wc > 0;
762}
763
764/**
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300765 * __gem_mmap__wc:
Chris Wilsona464fb72015-01-02 16:33:29 +0530766 * @fd: open i915 drm file descriptor
767 * @handle: gem buffer object handle
Thomas Woodd01ebbd2015-06-29 16:47:14 +0100768 * @offset: offset in the gem buffer of the mmap arena
Chris Wilsona464fb72015-01-02 16:33:29 +0530769 * @size: size of the mmap arena
770 * @prot: memory protection bits as used by mmap()
771 *
772 * This functions wraps up procedure to establish a memory mapping through
Chris Wilsoneaa1e8e2015-01-06 10:06:41 +0000773 * direct cpu access, bypassing the gpu and cpu caches completely and also
774 * bypassing the GTT system agent (i.e. there is no automatic tiling of
775 * the mmapping through the fence registers).
Chris Wilsona464fb72015-01-02 16:33:29 +0530776 *
Ville Syrjälä8986bbc2015-10-09 16:59:13 +0300777 * Returns: A pointer to the created memory mapping, NULL on failure.
Chris Wilsona464fb72015-01-02 16:33:29 +0530778 */
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300779void *__gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot)
Chris Wilsona464fb72015-01-02 16:33:29 +0530780{
781 struct local_i915_gem_mmap_v2 arg;
782
783 if (!gem_mmap__has_wc(fd)) {
784 errno = ENOSYS;
785 return NULL;
786 }
787
788 memset(&arg, 0, sizeof(arg));
789 arg.handle = handle;
790 arg.offset = offset;
791 arg.size = size;
792 arg.flags = I915_MMAP_WC;
Chris Wilsonc1fed522016-03-19 13:00:29 +0000793 if (igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_MMAP_v2, &arg))
Chris Wilsona464fb72015-01-02 16:33:29 +0530794 return NULL;
795
Maarten Lankhorstd930b642017-02-09 10:42:01 +0100796 VG(VALGRIND_MAKE_MEM_DEFINED(from_user_pointer(arg.addr_ptr), arg.size));
797
Chris Wilsona464fb72015-01-02 16:33:29 +0530798 errno = 0;
Chris Wilson8f393102017-01-02 11:23:33 +0000799 return from_user_pointer(arg.addr_ptr);
Chris Wilsona464fb72015-01-02 16:33:29 +0530800}
801
Daniel Vetter556c49f2014-03-11 23:27:06 +0100802/**
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300803 * gem_mmap__wc:
804 * @fd: open i915 drm file descriptor
805 * @handle: gem buffer object handle
806 * @offset: offset in the gem buffer of the mmap arena
807 * @size: size of the mmap arena
808 * @prot: memory protection bits as used by mmap()
809 *
810 * Like __gem_mmap__wc() except we assert on failure.
811 *
812 * Returns: A pointer to the created memory mapping
813 */
814void *gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot)
815{
816 void *ptr = __gem_mmap__wc(fd, handle, offset, size, prot);
817 igt_assert(ptr);
818 return ptr;
819}
820
821/**
822 * __gem_mmap__cpu:
Daniel Vetter556c49f2014-03-11 23:27:06 +0100823 * @fd: open i915 drm file descriptor
824 * @handle: gem buffer object handle
Thomas Woodd01ebbd2015-06-29 16:47:14 +0100825 * @offset: offset in the gem buffer of the mmap arena
Chris Wilson6fff1f82014-11-04 12:06:17 +0000826 * @size: size of the mmap arena
Daniel Vetter556c49f2014-03-11 23:27:06 +0100827 * @prot: memory protection bits as used by mmap()
828 *
829 * This functions wraps up procedure to establish a memory mapping through
830 * direct cpu access, bypassing the gpu completely.
831 *
Ville Syrjälä8986bbc2015-10-09 16:59:13 +0300832 * Returns: A pointer to the created memory mapping, NULL on failure.
Daniel Vetter556c49f2014-03-11 23:27:06 +0100833 */
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300834void *__gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot)
Daniel Vetter556c49f2014-03-11 23:27:06 +0100835{
836 struct drm_i915_gem_mmap mmap_arg;
837
Chris Wilsonacca7242014-07-21 07:57:25 +0100838 memset(&mmap_arg, 0, sizeof(mmap_arg));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100839 mmap_arg.handle = handle;
Chris Wilson6fff1f82014-11-04 12:06:17 +0000840 mmap_arg.offset = offset;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100841 mmap_arg.size = size;
Chris Wilsonc1fed522016-03-19 13:00:29 +0000842 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg))
Daniel Vetter556c49f2014-03-11 23:27:06 +0100843 return NULL;
844
Maarten Lankhorstd930b642017-02-09 10:42:01 +0100845 VG(VALGRIND_MAKE_MEM_DEFINED(from_user_pointer(mmap_arg.addr_ptr), mmap_arg.size));
846
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100847 errno = 0;
Chris Wilson8f393102017-01-02 11:23:33 +0000848 return from_user_pointer(mmap_arg.addr_ptr);
Daniel Vetter556c49f2014-03-11 23:27:06 +0100849}
850
851/**
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300852 * gem_mmap__cpu:
853 * @fd: open i915 drm file descriptor
854 * @handle: gem buffer object handle
855 * @offset: offset in the gem buffer of the mmap arena
856 * @size: size of the mmap arena
857 * @prot: memory protection bits as used by mmap()
858 *
859 * Like __gem_mmap__cpu() except we assert on failure.
860 *
861 * Returns: A pointer to the created memory mapping
862 */
863void *gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot)
864{
865 void *ptr = __gem_mmap__cpu(fd, handle, offset, size, prot);
866 igt_assert(ptr);
867 return ptr;
868}
869
870/**
Daniel Vetter556c49f2014-03-11 23:27:06 +0100871 * gem_madvise:
872 * @fd: open i915 drm file descriptor
873 * @handle: gem buffer object handle
874 * @state: desired madvise state
875 *
Arkadiusz Hiler2fd6ada2017-06-28 13:27:59 +0300876 * This wraps the MADVISE ioctl, which is used in libdrm to implement
Daniel Vetter556c49f2014-03-11 23:27:06 +0100877 * opportunistic buffer object caching. Objects in the cache are set to DONTNEED
878 * (internally in the kernel tracked as purgeable objects). When such a cached
879 * object is in need again it must be set back to WILLNEED before first use.
880 *
881 * Returns: When setting the madvise state to WILLNEED this returns whether the
Thomas Wood519f3772014-09-26 14:24:52 +0100882 * backing storage was still available or not.
Daniel Vetter556c49f2014-03-11 23:27:06 +0100883 */
884int gem_madvise(int fd, uint32_t handle, int state)
885{
886 struct drm_i915_gem_madvise madv;
887
Chris Wilsonacca7242014-07-21 07:57:25 +0100888 memset(&madv, 0, sizeof(madv));
Daniel Vetter556c49f2014-03-11 23:27:06 +0100889 madv.handle = handle;
890 madv.madv = state;
891 madv.retained = 1;
892 do_ioctl(fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
893
894 return madv.retained;
895}
896
897/**
898 * gem_context_create:
899 * @fd: open i915 drm file descriptor
900 *
Arkadiusz Hiler2fd6ada2017-06-28 13:27:59 +0300901 * This wraps the CONTEXT_CREATE ioctl, which is used to allocate a new
902 * context. Note that similarly to gem_set_caching() this wrapper skips on
903 * kernels and platforms where context support is not available.
Daniel Vetter556c49f2014-03-11 23:27:06 +0100904 *
Arkadiusz Hiler2fd6ada2017-06-28 13:27:59 +0300905 * Returns: The id of the allocated context.
Daniel Vetter556c49f2014-03-11 23:27:06 +0100906 */
907uint32_t gem_context_create(int fd)
908{
909 struct drm_i915_gem_context_create create;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100910
Chris Wilsonacca7242014-07-21 07:57:25 +0100911 memset(&create, 0, sizeof(create));
Chris Wilsonc1fed522016-03-19 13:00:29 +0000912 if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create)) {
Chris Wilsonffbc59a2016-02-05 11:16:18 +0000913 int err = -errno;
914 igt_skip_on(err == -ENODEV || errno == -EINVAL);
915 igt_assert_eq(err, 0);
916 }
917 igt_assert(create.ctx_id != 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +0100918 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +0100919
920 return create.ctx_id;
921}
922
Daniel Vetter09b82112015-02-06 17:15:13 +0100923int __gem_context_destroy(int fd, uint32_t ctx_id)
924{
925 struct drm_i915_gem_context_destroy destroy;
926 int ret;
927
928 memset(&destroy, 0, sizeof(destroy));
929 destroy.ctx_id = ctx_id;
930
Chris Wilsonc1fed522016-03-19 13:00:29 +0000931 ret = igt_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
Daniel Vetter09b82112015-02-06 17:15:13 +0100932 if (ret)
933 return -errno;
934 return 0;
935}
936
937/**
Thomas Wooda22548f2015-02-16 11:17:11 +0000938 * gem_context_destroy:
Daniel Vetter09b82112015-02-06 17:15:13 +0100939 * @fd: open i915 drm file descriptor
Arkadiusz Hiler2fd6ada2017-06-28 13:27:59 +0300940 * @ctx_id: i915 context id
Daniel Vetter09b82112015-02-06 17:15:13 +0100941 *
Arkadiusz Hiler2fd6ada2017-06-28 13:27:59 +0300942 * This wraps the CONTEXT_DESTROY ioctl, which is used to free a context.
Daniel Vetter09b82112015-02-06 17:15:13 +0100943 */
944void gem_context_destroy(int fd, uint32_t ctx_id)
945{
946 struct drm_i915_gem_context_destroy destroy;
947
948 memset(&destroy, 0, sizeof(destroy));
949 destroy.ctx_id = ctx_id;
950
951 do_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
952}
953
Chris Wilson7b349f32016-10-13 22:44:08 +0100954int __gem_context_get_param(int fd, struct local_i915_gem_context_param *p)
955{
956#define LOCAL_I915_GEM_CONTEXT_GETPARAM 0x34
957#define LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_CONTEXT_GETPARAM, struct local_i915_gem_context_param)
958 if (igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, p))
959 return -errno;
960
961 errno = 0;
962 return 0;
963}
964
Daniel Vetter556c49f2014-03-11 23:27:06 +0100965/**
Daniel Vetter75c075c2015-02-06 17:13:59 +0100966 * gem_context_get_param:
967 * @fd: open i915 drm file descriptor
Arkadiusz Hiler2fd6ada2017-06-28 13:27:59 +0300968 * @p: i915 context parameter
Daniel Vetter75c075c2015-02-06 17:13:59 +0100969 *
Arkadiusz Hiler2fd6ada2017-06-28 13:27:59 +0300970 * This wraps the CONTEXT_GET_PARAM ioctl, which is used to get a context
Radoslaw Szwichtenbergcec2d4a2017-06-23 16:01:05 +0200971 * parameter.
Daniel Vetter75c075c2015-02-06 17:13:59 +0100972 */
973void gem_context_get_param(int fd, struct local_i915_gem_context_param *p)
974{
Chris Wilson7b349f32016-10-13 22:44:08 +0100975 igt_assert(__gem_context_get_param(fd, p) == 0);
Daniel Vetter75c075c2015-02-06 17:13:59 +0100976}
977
Chris Wilson7b349f32016-10-13 22:44:08 +0100978
Chris Wilson19642c62015-12-11 13:27:49 +0000979int __gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
980{
981#define LOCAL_I915_GEM_CONTEXT_SETPARAM 0x35
982#define LOCAL_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_CONTEXT_SETPARAM, struct local_i915_gem_context_param)
Chris Wilsonc1fed522016-03-19 13:00:29 +0000983 if (igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_SETPARAM, p))
Chris Wilson19642c62015-12-11 13:27:49 +0000984 return -errno;
985
986 errno = 0;
987 return 0;
988}
Daniel Vetter75c075c2015-02-06 17:13:59 +0100989/**
990 * gem_context_set_param:
991 * @fd: open i915 drm file descriptor
Arkadiusz Hiler2fd6ada2017-06-28 13:27:59 +0300992 * @p: i915 context parameter
Daniel Vetter75c075c2015-02-06 17:13:59 +0100993 *
Arkadiusz Hiler2fd6ada2017-06-28 13:27:59 +0300994 * This wraps the CONTEXT_SET_PARAM ioctl, which is used to set a context
Radoslaw Szwichtenbergcec2d4a2017-06-23 16:01:05 +0200995 * parameter.
Daniel Vetter75c075c2015-02-06 17:13:59 +0100996 */
997void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
998{
Chris Wilson19642c62015-12-11 13:27:49 +0000999 igt_assert(__gem_context_set_param(fd, p) == 0);
Daniel Vetter75c075c2015-02-06 17:13:59 +01001000}
1001
1002/**
Thomas Wood26f40812015-02-20 11:31:01 +00001003 * gem_context_require_param:
Daniel Vetter75c075c2015-02-06 17:13:59 +01001004 * @fd: open i915 drm file descriptor
Arkadiusz Hiler2fd6ada2017-06-28 13:27:59 +03001005 * @param: i915 context parameter
Daniel Vetter75c075c2015-02-06 17:13:59 +01001006 *
Arkadiusz Hiler2fd6ada2017-06-28 13:27:59 +03001007 * Feature test macro to query whether context parameter support for @param
Daniel Vetter75c075c2015-02-06 17:13:59 +01001008 * is available. Automatically skips through igt_require() if not.
1009 */
1010void gem_context_require_param(int fd, uint64_t param)
1011{
1012 struct local_i915_gem_context_param p;
1013
1014 p.context = 0;
1015 p.param = param;
1016 p.value = 0;
1017 p.size = 0;
1018
Chris Wilsonc1fed522016-03-19 13:00:29 +00001019 igt_require(igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0);
Daniel Vetter75c075c2015-02-06 17:13:59 +01001020}
1021
Mika Kuoppala773ac7c2016-11-08 12:31:06 +02001022void gem_context_require_bannable(int fd)
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001023{
1024 static int has_ban_period = -1;
Mika Kuoppala773ac7c2016-11-08 12:31:06 +02001025 static int has_bannable = -1;
1026
1027 if (has_bannable < 0) {
1028 struct local_i915_gem_context_param p;
1029
1030 p.context = 0;
1031 p.param = LOCAL_CONTEXT_PARAM_BANNABLE;
1032 p.value = 0;
1033 p.size = 0;
1034
1035 has_bannable = igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0;
1036 }
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001037
1038 if (has_ban_period < 0) {
1039 struct local_i915_gem_context_param p;
1040
1041 p.context = 0;
1042 p.param = LOCAL_CONTEXT_PARAM_BAN_PERIOD;
1043 p.value = 0;
1044 p.size = 0;
1045
Chris Wilsonc1fed522016-03-19 13:00:29 +00001046 has_ban_period = igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0;
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001047 }
1048
Mika Kuoppala773ac7c2016-11-08 12:31:06 +02001049 igt_require(has_ban_period || has_bannable);
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001050}
1051
Tiago Vignattie1f663b2015-08-12 15:57:12 -03001052int __gem_userptr(int fd, void *ptr, int size, int read_only, uint32_t flags, uint32_t *handle)
1053{
1054 struct local_i915_gem_userptr userptr;
Tiago Vignattie1f663b2015-08-12 15:57:12 -03001055
1056 memset(&userptr, 0, sizeof(userptr));
Chris Wilson39858a12017-01-02 11:05:21 +00001057 userptr.user_ptr = to_user_pointer(ptr);
Tiago Vignattie1f663b2015-08-12 15:57:12 -03001058 userptr.user_size = size;
1059 userptr.flags = flags;
1060 if (read_only)
1061 userptr.flags |= LOCAL_I915_USERPTR_READ_ONLY;
1062
Chris Wilson91d37802016-09-30 17:41:01 +01001063 if (igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr))
1064 return -errno;
Tiago Vignattie1f663b2015-08-12 15:57:12 -03001065
Chris Wilson91d37802016-09-30 17:41:01 +01001066 *handle = userptr.handle;
1067 return 0;
Tiago Vignattie1f663b2015-08-12 15:57:12 -03001068}
1069
1070/**
1071 * gem_userptr:
1072 * @fd: open i915 drm file descriptor
1073 * @ptr: userptr pointer to be passed
1074 * @size: desired size of the buffer
1075 * @read_only: specify whether userptr is opened read only
1076 * @flags: other userptr flags
1077 * @handle: returned handle for the object
1078 *
1079 * Returns userptr handle for the GEM object.
1080 */
1081void gem_userptr(int fd, void *ptr, int size, int read_only, uint32_t flags, uint32_t *handle)
1082{
1083 igt_assert_eq(__gem_userptr(fd, ptr, size, read_only, flags, handle), 0);
1084}
1085
Daniel Vetter75c075c2015-02-06 17:13:59 +01001086/**
Daniel Vetter556c49f2014-03-11 23:27:06 +01001087 * gem_sw_finish:
1088 * @fd: open i915 drm file descriptor
1089 * @handle: gem buffer object handle
1090 *
Arkadiusz Hiler2fd6ada2017-06-28 13:27:59 +03001091 * This wraps the SW_FINISH ioctl, which is used to flush out frontbuffer
Daniel Vetter556c49f2014-03-11 23:27:06 +01001092 * rendering done through the direct cpu memory mappings. Shipping userspace
1093 * does _not_ call this after frontbuffer rendering through gtt memory mappings.
1094 */
1095void gem_sw_finish(int fd, uint32_t handle)
1096{
1097 struct drm_i915_gem_sw_finish finish;
1098
Chris Wilsonacca7242014-07-21 07:57:25 +01001099 memset(&finish, 0, sizeof(finish));
Daniel Vetter556c49f2014-03-11 23:27:06 +01001100 finish.handle = handle;
1101
1102 do_ioctl(fd, DRM_IOCTL_I915_GEM_SW_FINISH, &finish);
1103}
1104
1105/**
1106 * gem_bo_busy:
1107 * @fd: open i915 drm file descriptor
1108 * @handle: gem buffer object handle
1109 *
Arkadiusz Hiler2fd6ada2017-06-28 13:27:59 +03001110 * This wraps the BUSY ioctl, which tells whether a buffer object is still
Daniel Vetter556c49f2014-03-11 23:27:06 +01001111 * actively used by the gpu in a execbuffer.
1112 *
1113 * Returns: The busy state of the buffer object.
1114 */
1115bool gem_bo_busy(int fd, uint32_t handle)
1116{
1117 struct drm_i915_gem_busy busy;
1118
Chris Wilsonacca7242014-07-21 07:57:25 +01001119 memset(&busy, 0, sizeof(busy));
Daniel Vetter556c49f2014-03-11 23:27:06 +01001120 busy.handle = handle;
1121
1122 do_ioctl(fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
1123
1124 return !!busy.busy;
1125}
1126
1127
1128/* feature test helpers */
1129
1130/**
Michał Winiarski52b5d502016-01-25 19:35:01 +01001131 * gem_gtt_type:
Daniel Vetter556c49f2014-03-11 23:27:06 +01001132 * @fd: open i915 drm file descriptor
1133 *
Michał Winiarski52b5d502016-01-25 19:35:01 +01001134 * Feature test macro to check what type of gtt is being used by the kernel:
1135 * 0 - global gtt
1136 * 1 - aliasing ppgtt
1137 * 2 - full ppgtt, limited to 32bit address space
1138 * 3 - full ppgtt, 64bit address space
Daniel Vetter556c49f2014-03-11 23:27:06 +01001139 *
Michał Winiarski52b5d502016-01-25 19:35:01 +01001140 * Returns: Type of gtt being used.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001141 */
Michał Winiarski52b5d502016-01-25 19:35:01 +01001142int gem_gtt_type(int fd)
Daniel Vetter556c49f2014-03-11 23:27:06 +01001143{
1144 struct drm_i915_getparam gp;
Chris Wilsonacca7242014-07-21 07:57:25 +01001145 int val = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +01001146
Chris Wilsonacca7242014-07-21 07:57:25 +01001147 memset(&gp, 0, sizeof(gp));
Daniel Vetter556c49f2014-03-11 23:27:06 +01001148 gp.param = 18; /* HAS_ALIASING_PPGTT */
1149 gp.value = &val;
1150
1151 if (ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp)))
1152 return 0;
1153
Chris Wilsonb918a3b2014-04-25 07:40:34 +01001154 errno = 0;
Daniel Vetter556c49f2014-03-11 23:27:06 +01001155 return val;
1156}
1157
1158/**
Michał Winiarski52b5d502016-01-25 19:35:01 +01001159 * gem_uses_ppgtt:
1160 * @fd: open i915 drm file descriptor
1161 *
1162 * Feature test macro to check whether the kernel internally uses ppgtt to
1163 * execute batches. Note that this is also true when we're using full ppgtt.
1164 *
1165 * Returns: Whether batches are run through ppgtt.
1166 */
1167bool gem_uses_ppgtt(int fd)
1168{
1169 return gem_gtt_type(fd) > 0;
1170}
1171
1172/**
1173 * gem_uses_full_ppgtt:
1174 * @fd: open i915 drm file descriptor
1175 *
1176 * Feature test macro to check whether the kernel internally uses full
1177 * per-process gtt to execute batches. Note that this is also true when we're
1178 * using full 64b ppgtt.
1179 *
1180 * Returns: Whether batches are run through full ppgtt.
1181 */
1182bool gem_uses_full_ppgtt(int fd)
1183{
1184 return gem_gtt_type(fd) > 1;
1185}
1186
1187/**
Michel Thierrybcb9d6f2017-06-28 11:36:54 -07001188 * gem_gpu_reset_type:
1189 * @fd: open i915 drm file descriptor
1190 *
1191 * Query whether reset-engine (2), global-reset (1) or reset-disable (0)
1192 * is available.
1193 *
1194 * Returns: GPU reset type available
1195 */
1196int gem_gpu_reset_type(int fd)
1197{
1198 struct drm_i915_getparam gp;
1199 int gpu_reset_type = -1;
1200
1201 memset(&gp, 0, sizeof(gp));
1202 gp.param = I915_PARAM_HAS_GPU_RESET;
1203 gp.value = &gpu_reset_type;
1204 drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
1205
1206 return gpu_reset_type;
1207}
1208
1209/**
1210 * gem_gpu_reset_enabled:
1211 * @fd: open i915 drm file descriptor
1212 *
1213 * Feature test macro to check whether the kernel internally uses hangchecks
1214 * and can reset the GPU upon hang detection. Note that this is also true when
1215 * reset-engine (the lightweight, single engine reset) is available.
1216 *
1217 * Returns: Whether the driver will detect hangs and perform a reset.
1218 */
1219bool gem_gpu_reset_enabled(int fd)
1220{
1221 return gem_gpu_reset_type(fd) > 0;
1222}
1223
1224/**
1225 * gem_engine_reset_enabled:
1226 * @fd: open i915 drm file descriptor
1227 *
1228 * Feature test macro to check whether the kernel internally uses hangchecks
1229 * and can reset individual engines upon hang detection.
1230 *
1231 * Returns: Whether the driver will detect hangs and perform an engine reset.
1232 */
1233bool gem_engine_reset_enabled(int fd)
1234{
1235 return gem_gpu_reset_type(fd) > 1;
1236}
1237
1238/**
Thomas Woodae3a9462014-11-25 11:59:37 +00001239 * gem_available_fences:
Daniel Vetter556c49f2014-03-11 23:27:06 +01001240 * @fd: open i915 drm file descriptor
1241 *
1242 * Feature test macro to query the kernel for the number of available fences
Thomas Wood519f3772014-09-26 14:24:52 +01001243 * usable in a batchbuffer. Only relevant for pre-gen4.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001244 *
1245 * Returns: The number of available fences.
1246 */
1247int gem_available_fences(int fd)
1248{
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001249 static int num_fences = -1;
Daniel Vetter556c49f2014-03-11 23:27:06 +01001250
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001251 if (num_fences < 0) {
1252 struct drm_i915_getparam gp;
Daniel Vetter556c49f2014-03-11 23:27:06 +01001253
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001254 memset(&gp, 0, sizeof(gp));
1255 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
1256 gp.value = &num_fences;
Daniel Vetter556c49f2014-03-11 23:27:06 +01001257
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001258 num_fences = 0;
1259 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
1260 errno = 0;
1261 }
1262
1263 return num_fences;
Daniel Vetter556c49f2014-03-11 23:27:06 +01001264}
1265
Chris Wilsonb76f1d82014-09-08 10:28:41 +01001266bool gem_has_llc(int fd)
1267{
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001268 static int has_llc = -1;
Chris Wilsonb76f1d82014-09-08 10:28:41 +01001269
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001270 if (has_llc < 0) {
1271 struct drm_i915_getparam gp;
Chris Wilsonb76f1d82014-09-08 10:28:41 +01001272
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001273 memset(&gp, 0, sizeof(gp));
1274 gp.param = I915_PARAM_HAS_LLC;
1275 gp.value = &has_llc;
Chris Wilsonb76f1d82014-09-08 10:28:41 +01001276
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001277 has_llc = 0;
1278 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
1279 errno = 0;
1280 }
1281
1282 return has_llc;
Chris Wilsonb76f1d82014-09-08 10:28:41 +01001283}
1284
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001285static bool has_param(int fd, int param)
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001286{
1287 drm_i915_getparam_t gp;
Chris Wilsonacca7242014-07-21 07:57:25 +01001288 int tmp = 0;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001289
Chris Wilsonacca7242014-07-21 07:57:25 +01001290 memset(&gp, 0, sizeof(gp));
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001291 gp.value = &tmp;
1292 gp.param = param;
1293
Chris Wilsonc1fed522016-03-19 13:00:29 +00001294 if (igt_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001295 return false;
Chris Wilsonb918a3b2014-04-25 07:40:34 +01001296
1297 errno = 0;
1298 return tmp > 0;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001299}
1300
Daniel Vetter556c49f2014-03-11 23:27:06 +01001301/**
1302 * gem_has_bsd:
1303 * @fd: open i915 drm file descriptor
1304 *
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001305 * Feature test macro to query whether the BSD ring is available.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001306 *
1307 * Note that recent Bspec calls this the VCS ring for Video Command Submission.
1308 *
Thomas Wood519f3772014-09-26 14:24:52 +01001309 * Returns: Whether the BSD ring is available or not.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001310 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001311bool gem_has_bsd(int fd)
1312{
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001313 static int has_bsd = -1;
1314 if (has_bsd < 0)
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001315 has_bsd = has_param(fd, I915_PARAM_HAS_BSD);
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001316 return has_bsd;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001317}
1318
Daniel Vetter556c49f2014-03-11 23:27:06 +01001319/**
1320 * gem_has_blt:
1321 * @fd: open i915 drm file descriptor
1322 *
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001323 * Feature test macro to query whether the blitter ring is available.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001324 *
1325 * Note that recent Bspec calls this the BCS ring for Blitter Command Submission.
1326 *
Thomas Wood519f3772014-09-26 14:24:52 +01001327 * Returns: Whether the blitter ring is available or not.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001328 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001329bool gem_has_blt(int fd)
1330{
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001331 static int has_blt = -1;
1332 if (has_blt < 0)
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001333 has_blt = has_param(fd, I915_PARAM_HAS_BLT);
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001334 return has_blt;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001335}
1336
1337#define LOCAL_I915_PARAM_HAS_VEBOX 22
Daniel Vetter556c49f2014-03-11 23:27:06 +01001338/**
1339 * gem_has_vebox:
1340 * @fd: open i915 drm file descriptor
1341 *
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001342 * Feature test macro to query whether the vebox ring is available.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001343 *
1344 * Note that recent Bspec calls this the VECS ring for Video Enhancement Command
1345 * Submission.
1346 *
Thomas Wood519f3772014-09-26 14:24:52 +01001347 * Returns: Whether the vebox ring is available or not.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001348 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001349bool gem_has_vebox(int fd)
1350{
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001351 static int has_vebox = -1;
1352 if (has_vebox < 0)
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001353 has_vebox = has_param(fd, LOCAL_I915_PARAM_HAS_VEBOX);
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001354 return has_vebox;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001355}
1356
Zhipeng Gong17937a02015-01-13 08:50:19 +08001357#define LOCAL_I915_PARAM_HAS_BSD2 31
1358/**
1359 * gem_has_bsd2:
1360 * @fd: open i915 drm file descriptor
1361 *
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001362 * Feature test macro to query whether the BSD2 ring is available.
Zhipeng Gong17937a02015-01-13 08:50:19 +08001363 *
1364 * Note that recent Bspec calls this the VCS ring for Video Command Submission.
1365 *
1366 * Returns: Whether the BSD ring is avaible or not.
1367 */
1368bool gem_has_bsd2(int fd)
1369{
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001370 static int has_bsd2 = -1;
1371 if (has_bsd2 < 0)
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001372 has_bsd2 = has_param(fd, LOCAL_I915_PARAM_HAS_BSD2);
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001373 return has_bsd2;
Zhipeng Gong17937a02015-01-13 08:50:19 +08001374}
Ankitprasad Sharmabeef31a2016-06-06 14:52:42 +05301375
1376struct local_i915_gem_get_aperture {
1377 __u64 aper_size;
1378 __u64 aper_available_size;
1379 __u64 version;
1380 __u64 map_total_size;
1381 __u64 stolen_total_size;
1382};
1383#define DRM_I915_GEM_GET_APERTURE 0x23
1384#define LOCAL_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct local_i915_gem_get_aperture)
1385/**
1386 * gem_total_mappable_size:
1387 * @fd: open i915 drm file descriptor
1388 *
1389 * Feature test macro to query the kernel for the total mappable size.
1390 *
1391 * Returns: Total mappable address space size.
1392 */
1393uint64_t gem_total_mappable_size(int fd)
1394{
1395 struct local_i915_gem_get_aperture aperture;
1396
1397 memset(&aperture, 0, sizeof(aperture));
1398 do_ioctl(fd, LOCAL_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1399
1400 return aperture.map_total_size;
1401}
1402
1403/**
1404 * gem_total_stolen_size:
1405 * @fd: open i915 drm file descriptor
1406 *
1407 * Feature test macro to query the kernel for the total stolen size.
1408 *
1409 * Returns: Total stolen memory.
1410 */
1411uint64_t gem_total_stolen_size(int fd)
1412{
1413 struct local_i915_gem_get_aperture aperture;
1414
1415 memset(&aperture, 0, sizeof(aperture));
1416 do_ioctl(fd, LOCAL_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1417
1418 return aperture.stolen_total_size;
1419}
1420
Chris Wilson16038902016-02-18 10:35:10 +00001421/**
1422 * gem_available_aperture_size:
1423 * @fd: open i915 drm file descriptor
1424 *
1425 * Feature test macro to query the kernel for the available gpu aperture size
1426 * usable in a batchbuffer.
1427 *
1428 * Returns: The available gtt address space size.
1429 */
1430uint64_t gem_available_aperture_size(int fd)
1431{
1432 struct drm_i915_gem_get_aperture aperture;
1433
1434 memset(&aperture, 0, sizeof(aperture));
1435 aperture.aper_size = 256*1024*1024;
1436 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1437
1438 return aperture.aper_available_size;
1439}
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001440
Daniel Vetter556c49f2014-03-11 23:27:06 +01001441/**
1442 * gem_aperture_size:
1443 * @fd: open i915 drm file descriptor
1444 *
1445 * Feature test macro to query the kernel for the total gpu aperture size.
1446 *
1447 * Returns: The total gtt address space size.
1448 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001449uint64_t gem_aperture_size(int fd)
1450{
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001451 static uint64_t aperture_size = 0;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001452
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001453 if (aperture_size == 0) {
Chris Wilsona2271932015-10-14 14:17:55 +01001454 struct local_i915_gem_context_param p;
Chris Wilsonacca7242014-07-21 07:57:25 +01001455
Chris Wilsona2271932015-10-14 14:17:55 +01001456 memset(&p, 0, sizeof(p));
1457 p.param = 0x3;
1458 if (ioctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0) {
1459 aperture_size = p.value;
1460 } else {
1461 struct drm_i915_gem_get_aperture aperture;
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001462
Chris Wilsona2271932015-10-14 14:17:55 +01001463 memset(&aperture, 0, sizeof(aperture));
1464 aperture.aper_size = 256*1024*1024;
1465
1466 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1467 aperture_size = aperture.aper_size;
1468 }
Chris Wilsonfb950bc2015-04-13 19:04:13 +01001469 }
1470
1471 return aperture_size;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001472}
1473
Daniel Vetter556c49f2014-03-11 23:27:06 +01001474/**
Thomas Woodae3a9462014-11-25 11:59:37 +00001475 * gem_mappable_aperture_size:
Daniel Vetter556c49f2014-03-11 23:27:06 +01001476 *
1477 * Feature test macro to query the kernel for the mappable gpu aperture size.
Thomas Wood519f3772014-09-26 14:24:52 +01001478 * This is the area available for GTT memory mappings.
Daniel Vetter556c49f2014-03-11 23:27:06 +01001479 *
1480 * Returns: The mappable gtt address space size.
1481 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001482uint64_t gem_mappable_aperture_size(void)
1483{
Chris Wilsonacca7242014-07-21 07:57:25 +01001484 struct pci_device *pci_dev = intel_get_pci_device();
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001485 int bar;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001486
1487 if (intel_gen(pci_dev->device_id) < 3)
1488 bar = 0;
1489 else
1490 bar = 2;
1491
1492 return pci_dev->regions[bar].size;
1493}
1494
Chris Wilson391b32c2016-02-05 18:35:21 +00001495/**
1496 * gem_global_aperture_size:
Daniel Vetterded99582016-07-27 14:32:15 +02001497 * @fd: open i915 drm file descriptor
Chris Wilson391b32c2016-02-05 18:35:21 +00001498 *
1499 * Feature test macro to query the kernel for the global gpu aperture size.
1500 * This is the area available for the kernel to perform address translations.
1501 *
1502 * Returns: The mappable gtt address space size.
1503 */
1504uint64_t gem_global_aperture_size(int fd)
1505{
1506 struct drm_i915_gem_get_aperture aperture;
1507
1508 memset(&aperture, 0, sizeof(aperture));
1509 aperture.aper_size = 256*1024*1024;
1510 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1511
1512 return aperture.aper_size;
1513}
1514
Michał Winiarskie6ca4bd2016-01-25 19:35:02 +01001515#define LOCAL_I915_PARAM_HAS_EXEC_SOFTPIN 37
1516/**
1517 * gem_has_softpin:
1518 * @fd: open i915 drm file descriptor
1519 *
1520 * Feature test macro to query whether the softpinning functionality is
1521 * supported.
1522 *
1523 * Returns: Whether softpin support is available
1524 */
1525bool gem_has_softpin(int fd)
1526{
1527 static int has_softpin = -1;
1528
1529 if (has_softpin < 0) {
1530 struct drm_i915_getparam gp;
1531
1532 memset(&gp, 0, sizeof(gp));
1533 gp.param = LOCAL_I915_PARAM_HAS_EXEC_SOFTPIN;
1534 gp.value = &has_softpin;
1535
1536 has_softpin = 0;
1537 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
1538 errno = 0;
1539 }
1540
1541 return has_softpin;
1542}
1543
Daniele Ceraolo Spurio25b5a742017-02-03 14:45:30 -08001544#define LOCAL_PARAM_HAS_EXEC_FENCE 44
1545/**
1546 * gem_has_exec_fence:
1547 * @fd: open i915 drm file descriptor
1548 *
1549 * Feature test macro to query whether in/out fence support in execbuffer is
1550 * available.
1551 *
1552 * Returns: Whether fence support is available
1553 */
1554bool gem_has_exec_fence(int fd)
1555{
1556 static int has_exec_fence = -1;
1557
1558 if (has_exec_fence < 0) {
1559 struct drm_i915_getparam gp;
1560
1561 memset(&gp, 0, sizeof(gp));
1562 gp.param = LOCAL_PARAM_HAS_EXEC_FENCE;
1563 gp.value = &has_exec_fence;
1564
1565 has_exec_fence = 0;
1566 ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
1567 errno = 0;
1568 }
1569
1570 return has_exec_fence;
1571}
1572
Daniel Vetter556c49f2014-03-11 23:27:06 +01001573/**
1574 * gem_require_caching:
1575 * @fd: open i915 drm file descriptor
1576 *
1577 * Feature test macro to query whether buffer object caching control is
1578 * available. Automatically skips through igt_require() if not.
1579 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001580void gem_require_caching(int fd)
1581{
Chris Wilson95090bb2016-03-18 11:49:23 +00001582 uint32_t handle;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001583
Chris Wilson95090bb2016-03-18 11:49:23 +00001584 handle = gem_create(fd, 4096);
1585 gem_set_caching(fd, handle, 0);
1586 gem_close(fd, handle);
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001587
Chris Wilsonb918a3b2014-04-25 07:40:34 +01001588 errno = 0;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001589}
1590
Chris Wilson9518cb52017-02-22 15:24:54 +00001591void igt_require_gem(int fd)
1592{
Chris Wilson406bb362017-08-18 11:40:50 +01001593 char path[256];
Chris Wilson9518cb52017-02-22 15:24:54 +00001594 int err;
1595
Chris Wilson673bf762017-03-16 22:46:30 +00001596 igt_require_intel(fd);
1597
Chris Wilson406bb362017-08-18 11:40:50 +01001598 /* We only want to use the throttle-ioctl for its -EIO reporting
1599 * of a wedged device, not for actually waiting on outstanding
1600 * requests! So create a new drm_file for the device that is clean.
1601 */
1602 snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
1603 fd = open(path, O_RDWR);
1604 igt_assert_lte(0, fd);
1605
Chris Wilson9518cb52017-02-22 15:24:54 +00001606 err = 0;
1607 if (ioctl(fd, DRM_IOCTL_I915_GEM_THROTTLE))
1608 err = -errno;
Chris Wilson406bb362017-08-18 11:40:50 +01001609 close(fd);
Chris Wilson9518cb52017-02-22 15:24:54 +00001610
1611 igt_require_f(err == 0, "Unresponsive i915/GEM device\n");
1612}
1613
Chris Wilson60eafc52016-03-04 09:40:51 +00001614bool gem_has_ring(int fd, unsigned ring)
Chris Wilsonb7f150b2016-01-27 14:30:24 +00001615{
Chris Wilsonb7f150b2016-01-27 14:30:24 +00001616 struct drm_i915_gem_execbuffer2 execbuf;
1617 struct drm_i915_gem_exec_object2 exec;
Chris Wilsonb7f150b2016-01-27 14:30:24 +00001618
Chris Wilson60eafc52016-03-04 09:40:51 +00001619 /* silly ABI, the kernel thinks everyone who has BSD also has BSD2 */
1620 if ((ring & ~(3<<13)) == I915_EXEC_BSD) {
1621 if (ring & (3 << 13) && !gem_has_bsd2(fd))
1622 return false;
1623 }
1624
Chris Wilsonb7f150b2016-01-27 14:30:24 +00001625 memset(&exec, 0, sizeof(exec));
Chris Wilsonb7f150b2016-01-27 14:30:24 +00001626 memset(&execbuf, 0, sizeof(execbuf));
Chris Wilson39858a12017-01-02 11:05:21 +00001627 execbuf.buffers_ptr = to_user_pointer(&exec);
Chris Wilsonb7f150b2016-01-27 14:30:24 +00001628 execbuf.buffer_count = 1;
1629 execbuf.flags = ring;
Chris Wilson9ba9af22016-03-04 20:38:16 +00001630 return __gem_execbuf(fd, &execbuf) == -ENOENT;
Chris Wilsonb7f150b2016-01-27 14:30:24 +00001631}
1632
Daniel Vetter556c49f2014-03-11 23:27:06 +01001633/**
1634 * gem_require_ring:
1635 * @fd: open i915 drm file descriptor
Chris Wilson60eafc52016-03-04 09:40:51 +00001636 * @ring: ring flag bit as used in gem_execbuf()
Daniel Vetter556c49f2014-03-11 23:27:06 +01001637 *
1638 * Feature test macro to query whether a specific ring is available.
Chris Wilson3d9bcd02016-03-14 13:24:54 +00001639 * This automagically skips if the ring isn't available by
1640 * calling igt_require().
Daniel Vetter556c49f2014-03-11 23:27:06 +01001641 */
Chris Wilson60eafc52016-03-04 09:40:51 +00001642void gem_require_ring(int fd, unsigned ring)
Daniel Vetter556c49f2014-03-11 23:27:06 +01001643{
Chris Wilson60eafc52016-03-04 09:40:51 +00001644 igt_require(gem_has_ring(fd, ring));
Daniel Vetter556c49f2014-03-11 23:27:06 +01001645}
1646
Peter Antoine8af67d12016-04-11 17:50:09 +01001647/**
1648 * gem_has_mocs_registers:
1649 * @fd: open i915 drm file descriptor
1650 *
1651 * Feature test macro to query whether the device has MOCS registers.
1652 * These exist gen 9+.
1653 */
1654bool gem_has_mocs_registers(int fd)
1655{
1656 return intel_gen(intel_get_drm_devid(fd)) >= 9;
1657}
1658
1659/**
1660 * gem_require_mocs_registers:
1661 * @fd: open i915 drm file descriptor
1662 *
1663 * Feature test macro to query whether the device has MOCS registers.
1664 * These exist gen 9+.
1665 */
1666void gem_require_mocs_registers(int fd)
1667{
1668 igt_require(gem_has_mocs_registers(fd));
1669}
1670
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001671/* prime */
Daniel Vetter556c49f2014-03-11 23:27:06 +01001672
1673/**
1674 * prime_handle_to_fd:
1675 * @fd: open i915 drm file descriptor
1676 * @handle: file-private gem buffer object handle
1677 *
1678 * This wraps the PRIME_HANDLE_TO_FD ioctl, which is used to export a gem buffer
1679 * object into a global (i.e. potentially cross-device) dma-buf file-descriptor
1680 * handle.
1681 *
1682 * Returns: The created dma-buf fd handle.
1683 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001684int prime_handle_to_fd(int fd, uint32_t handle)
1685{
1686 struct drm_prime_handle args;
1687
Chris Wilsonacca7242014-07-21 07:57:25 +01001688 memset(&args, 0, sizeof(args));
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001689 args.handle = handle;
1690 args.flags = DRM_CLOEXEC;
1691 args.fd = -1;
1692
1693 do_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
1694
1695 return args.fd;
1696}
1697
Daniel Vetter556c49f2014-03-11 23:27:06 +01001698/**
Tiago Vignatti4edfa092015-07-29 18:26:29 -03001699 * prime_handle_to_fd_for_mmap:
1700 * @fd: open i915 drm file descriptor
1701 * @handle: file-private gem buffer object handle
1702 *
1703 * Same as prime_handle_to_fd above but with DRM_RDWR capabilities, which can
1704 * be useful for writing into the mmap'ed dma-buf file-descriptor.
1705 *
1706 * Returns: The created dma-buf fd handle or -1 if the ioctl fails.
1707 */
1708int prime_handle_to_fd_for_mmap(int fd, uint32_t handle)
1709{
1710 struct drm_prime_handle args;
1711
1712 memset(&args, 0, sizeof(args));
1713 args.handle = handle;
1714 args.flags = DRM_CLOEXEC | DRM_RDWR;
1715 args.fd = -1;
1716
Chris Wilsonc1fed522016-03-19 13:00:29 +00001717 if (igt_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args) != 0)
Tiago Vignatti4edfa092015-07-29 18:26:29 -03001718 return -1;
1719
1720 return args.fd;
1721}
1722
1723/**
Daniel Vetter556c49f2014-03-11 23:27:06 +01001724 * prime_fd_to_handle:
1725 * @fd: open i915 drm file descriptor
1726 * @dma_buf_fd: dma-buf fd handle
1727 *
1728 * This wraps the PRIME_FD_TO_HANDLE ioctl, which is used to import a dma-buf
1729 * file-descriptor into a gem buffer object.
1730 *
1731 * Returns: The created gem buffer object handle.
1732 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001733uint32_t prime_fd_to_handle(int fd, int dma_buf_fd)
1734{
1735 struct drm_prime_handle args;
1736
Chris Wilsonacca7242014-07-21 07:57:25 +01001737 memset(&args, 0, sizeof(args));
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001738 args.fd = dma_buf_fd;
1739 args.flags = 0;
1740 args.handle = 0;
1741
1742 do_ioctl(fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
1743
1744 return args.handle;
1745}
1746
Daniel Vetter556c49f2014-03-11 23:27:06 +01001747/**
1748 * prime_get_size:
1749 * @dma_buf_fd: dma-buf fd handle
1750 *
1751 * This wraps the lseek() protocol used to query the invariant size of a
1752 * dma-buf. Not all kernels support this, which is check with igt_require() and
1753 * so will result in automagic test skipping.
1754 *
1755 * Returns: The lifetime-invariant size of the dma-buf object.
1756 */
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001757off_t prime_get_size(int dma_buf_fd)
1758{
1759 off_t ret;
Chris Wilsonacca7242014-07-21 07:57:25 +01001760
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001761 ret = lseek(dma_buf_fd, 0, SEEK_END);
1762 igt_assert(ret >= 0 || errno == ESPIPE);
1763 igt_require(ret >= 0);
Chris Wilsonb918a3b2014-04-25 07:40:34 +01001764 errno = 0;
Daniel Vetter766c5bc2014-03-11 22:58:07 +01001765
1766 return ret;
1767}
Tvrtko Ursulin0a087732015-03-03 14:10:54 +00001768
1769/**
Tiago Vignatti35debab2015-12-11 18:50:35 -02001770 * prime_sync_start
1771 * @dma_buf_fd: dma-buf fd handle
Daniel Vetterded99582016-07-27 14:32:15 +02001772 * @write: read/write or read-only access
1773 *
1774 * Must be called before starting CPU mmap access to a dma-buf.
Tiago Vignatti35debab2015-12-11 18:50:35 -02001775 */
Chris Wilsonaed69b52016-02-25 21:43:01 +00001776void prime_sync_start(int dma_buf_fd, bool write)
Tiago Vignatti35debab2015-12-11 18:50:35 -02001777{
1778 struct local_dma_buf_sync sync_start;
1779
1780 memset(&sync_start, 0, sizeof(sync_start));
Chris Wilsonaed69b52016-02-25 21:43:01 +00001781 sync_start.flags = LOCAL_DMA_BUF_SYNC_START;
1782 sync_start.flags |= LOCAL_DMA_BUF_SYNC_READ;
1783 if (write)
1784 sync_start.flags |= LOCAL_DMA_BUF_SYNC_WRITE;
Tiago Vignatti35debab2015-12-11 18:50:35 -02001785 do_ioctl(dma_buf_fd, LOCAL_DMA_BUF_IOCTL_SYNC, &sync_start);
1786}
1787
1788/**
1789 * prime_sync_end
1790 * @dma_buf_fd: dma-buf fd handle
Daniel Vetterded99582016-07-27 14:32:15 +02001791 * @write: read/write or read-only access
1792 *
1793 * Must be called after finishing CPU mmap access to a dma-buf.
Tiago Vignatti35debab2015-12-11 18:50:35 -02001794 */
Chris Wilsonaed69b52016-02-25 21:43:01 +00001795void prime_sync_end(int dma_buf_fd, bool write)
Tiago Vignatti35debab2015-12-11 18:50:35 -02001796{
1797 struct local_dma_buf_sync sync_end;
1798
1799 memset(&sync_end, 0, sizeof(sync_end));
Chris Wilsonaed69b52016-02-25 21:43:01 +00001800 sync_end.flags = LOCAL_DMA_BUF_SYNC_END;
1801 sync_end.flags |= LOCAL_DMA_BUF_SYNC_READ;
1802 if (write)
1803 sync_end.flags |= LOCAL_DMA_BUF_SYNC_WRITE;
Tiago Vignatti35debab2015-12-11 18:50:35 -02001804 do_ioctl(dma_buf_fd, LOCAL_DMA_BUF_IOCTL_SYNC, &sync_end);
1805}
1806
1807/**
Tvrtko Ursulin0a087732015-03-03 14:10:54 +00001808 * igt_require_fb_modifiers:
1809 * @fd: Open DRM file descriptor.
1810 *
1811 * Requires presence of DRM_CAP_ADDFB2_MODIFIERS.
1812 */
1813void igt_require_fb_modifiers(int fd)
1814{
1815 static bool has_modifiers, cap_modifiers_tested;
1816
1817 if (!cap_modifiers_tested) {
1818 uint64_t cap_modifiers;
1819 int ret;
1820
1821 ret = drmGetCap(fd, LOCAL_DRM_CAP_ADDFB2_MODIFIERS, &cap_modifiers);
1822 igt_assert(ret == 0 || errno == EINVAL);
1823 has_modifiers = ret == 0 && cap_modifiers == 1;
1824 cap_modifiers_tested = true;
1825 }
1826
1827 igt_require(has_modifiers);
1828}
Tvrtko Ursulinc7bac3c2015-03-03 14:11:02 +00001829
1830int __kms_addfb(int fd, uint32_t handle, uint32_t width, uint32_t height,
1831 uint32_t stride, uint32_t pixel_format, uint64_t modifier,
1832 uint32_t flags, uint32_t *buf_id)
1833{
1834 struct local_drm_mode_fb_cmd2 f;
1835 int ret;
1836
1837 igt_require_fb_modifiers(fd);
1838
1839 memset(&f, 0, sizeof(f));
1840
1841 f.width = width;
1842 f.height = height;
1843 f.pixel_format = pixel_format;
1844 f.flags = flags;
1845 f.handles[0] = handle;
1846 f.pitches[0] = stride;
1847 f.modifier[0] = modifier;
1848
Chris Wilsonc1fed522016-03-19 13:00:29 +00001849 ret = igt_ioctl(fd, LOCAL_DRM_IOCTL_MODE_ADDFB2, &f);
Tvrtko Ursulinc7bac3c2015-03-03 14:11:02 +00001850
1851 *buf_id = f.fb_id;
1852
1853 return ret < 0 ? -errno : ret;
1854}