blob: 019628eda1f837ce67653882a6d8bc5cb6c4ad02 [file] [log] [blame]
Daniel Vetter3dba47e2013-08-06 22:27:37 +02001/*
2 * Copyright © 2009,2012,2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 * Daniel Vetter <daniel.vetter@ffwll.ch>
27 *
28 */
29
Chris Wilson77633492015-03-26 08:11:43 +000030/** @file gem_concurrent.c
Daniel Vetter3dba47e2013-08-06 22:27:37 +020031 *
Chris Wilson77633492015-03-26 08:11:43 +000032 * This is a test of pread/pwrite/mmap behavior when writing to active
Daniel Vetter3dba47e2013-08-06 22:27:37 +020033 * buffers.
34 *
35 * Based on gem_gtt_concurrent_blt.
36 */
37
Thomas Wood804e11f2015-08-17 17:57:43 +010038#include "igt.h"
Daniel Vetter3dba47e2013-08-06 22:27:37 +020039#include <stdlib.h>
40#include <stdio.h>
41#include <string.h>
Daniel Vetter3dba47e2013-08-06 22:27:37 +020042#include <fcntl.h>
43#include <inttypes.h>
44#include <errno.h>
Chris Wilson3d8af562016-03-20 10:49:54 +000045#include <sys/resource.h>
Daniel Vetter3dba47e2013-08-06 22:27:37 +020046#include <sys/stat.h>
47#include <sys/time.h>
Chris Wilson99431a42013-08-14 11:03:34 +010048#include <sys/wait.h>
Daniel Vetterf5daeec2014-03-23 13:35:09 +010049
50#include <drm.h>
51
Daniel Vetter3dba47e2013-08-06 22:27:37 +020052#include "intel_bufmgr.h"
Daniel Vetter3dba47e2013-08-06 22:27:37 +020053
Chris Wilson77633492015-03-26 08:11:43 +000054IGT_TEST_DESCRIPTION("Test of pread/pwrite/mmap behavior when writing to active"
Thomas Woodb2ac2642014-11-28 11:02:44 +000055 " buffers.");
56
Chris Wilson6c428a62014-08-29 13:11:37 +010057int fd, devid, gen;
Chris Wilson77633492015-03-26 08:11:43 +000058int all;
Chris Wilson1c61c0f2016-01-08 10:51:09 +000059int pass;
Chris Wilson6c428a62014-08-29 13:11:37 +010060
Chris Wilson4eba8e22016-03-18 10:44:31 +000061struct create {
62 const char *name;
Chris Wilsone85613b2016-03-19 14:01:38 +000063 void (*require)(const struct create *, unsigned);
Chris Wilson4eba8e22016-03-18 10:44:31 +000064 drm_intel_bo *(*create)(drm_intel_bufmgr *, uint64_t size);
65};
66
Chris Wilson5d669bf2016-03-18 14:44:53 +000067struct size {
68 const char *name;
69 int width, height;
70};
71
Chris Wilson37f4da02016-01-27 13:02:35 +000072struct buffers {
Chris Wilson4eba8e22016-03-18 10:44:31 +000073 const char *name;
74 const struct create *create;
Chris Wilson37f4da02016-01-27 13:02:35 +000075 const struct access_mode *mode;
Chris Wilson5d669bf2016-03-18 14:44:53 +000076 const struct size *size;
Chris Wilson37f4da02016-01-27 13:02:35 +000077 drm_intel_bufmgr *bufmgr;
Chris Wilson094e0cb2016-03-01 13:22:03 +000078 struct intel_batchbuffer *batch;
Chris Wilson37f4da02016-01-27 13:02:35 +000079 drm_intel_bo **src, **dst;
80 drm_intel_bo *snoop, *spare;
81 uint32_t *tmp;
Chris Wilson5d669bf2016-03-18 14:44:53 +000082 int width, height, npixels;
Chris Wilson094e0cb2016-03-01 13:22:03 +000083 int count, num_buffers;
Chris Wilson37f4da02016-01-27 13:02:35 +000084};
85
Chris Wilson571b8762016-01-08 11:51:56 +000086#define MIN_BUFFERS 3
87
Chris Wilson37f4da02016-01-27 13:02:35 +000088static void blt_copy_bo(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src);
Chris Wilson6f759902016-01-27 11:17:03 +000089
Daniel Vetter3dba47e2013-08-06 22:27:37 +020090static void
Chris Wilsonf2a045f2015-01-02 16:33:33 +053091nop_release_bo(drm_intel_bo *bo)
92{
93 drm_intel_bo_unreference(bo);
94}
95
96static void
Chris Wilson37f4da02016-01-27 13:02:35 +000097prw_set_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
Daniel Vetter3dba47e2013-08-06 22:27:37 +020098{
Chris Wilson5d669bf2016-03-18 14:44:53 +000099 for (int i = 0; i < b->npixels; i++)
Chris Wilson37f4da02016-01-27 13:02:35 +0000100 b->tmp[i] = val;
Chris Wilson5d669bf2016-03-18 14:44:53 +0000101 drm_intel_bo_subdata(bo, 0, 4*b->npixels, b->tmp);
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200102}
103
104static void
Chris Wilson37f4da02016-01-27 13:02:35 +0000105prw_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200106{
Chris Wilsonc12f2922014-08-31 16:14:40 +0100107 uint32_t *vaddr;
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200108
Chris Wilson37f4da02016-01-27 13:02:35 +0000109 vaddr = b->tmp;
Chris Wilson5d669bf2016-03-18 14:44:53 +0000110 do_or_die(drm_intel_bo_get_subdata(bo, 0, 4*b->npixels, vaddr));
111 for (int i = 0; i < b->npixels; i++)
Chris Wilsonc12f2922014-08-31 16:14:40 +0100112 igt_assert_eq_u32(vaddr[i], val);
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200113}
114
Chris Wilson1c61c0f2016-01-08 10:51:09 +0000115#define pixel(y, width) ((y)*(width) + (((y) + pass)%(width)))
116
117static void
Chris Wilson37f4da02016-01-27 13:02:35 +0000118partial_set_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
Chris Wilson1c61c0f2016-01-08 10:51:09 +0000119{
Chris Wilson37f4da02016-01-27 13:02:35 +0000120 for (int y = 0; y < b->height; y++)
121 do_or_die(drm_intel_bo_subdata(bo, 4*pixel(y, b->width), 4, &val));
Chris Wilson1c61c0f2016-01-08 10:51:09 +0000122}
123
124static void
Chris Wilson37f4da02016-01-27 13:02:35 +0000125partial_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
Chris Wilson1c61c0f2016-01-08 10:51:09 +0000126{
Chris Wilson37f4da02016-01-27 13:02:35 +0000127 for (int y = 0; y < b->height; y++) {
Chris Wilson1c61c0f2016-01-08 10:51:09 +0000128 uint32_t buf;
Chris Wilson37f4da02016-01-27 13:02:35 +0000129 do_or_die(drm_intel_bo_get_subdata(bo, 4*pixel(y, b->width), 4, &buf));
Chris Wilson1c61c0f2016-01-08 10:51:09 +0000130 igt_assert_eq_u32(buf, val);
131 }
132}
133
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200134static drm_intel_bo *
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000135create_normal_bo(drm_intel_bufmgr *bufmgr, uint64_t size)
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200136{
137 drm_intel_bo *bo;
138
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000139 bo = drm_intel_bo_alloc(bufmgr, "bo", size, 0);
Daniel Vetter83440952013-08-13 12:35:58 +0200140 igt_assert(bo);
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200141
142 return bo;
143}
144
Chris Wilsone85613b2016-03-19 14:01:38 +0000145static void can_create_normal(const struct create *create, unsigned count)
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000146{
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000147}
148
Chris Wilson4eba8e22016-03-18 10:44:31 +0000149#if HAVE_CREATE_PRIVATE
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000150static drm_intel_bo *
151create_private_bo(drm_intel_bufmgr *bufmgr, uint64_t size)
152{
153 drm_intel_bo *bo;
154 uint32_t handle;
155
156 /* XXX gem_create_with_flags(fd, size, I915_CREATE_PRIVATE); */
157
158 handle = gem_create(fd, size);
159 bo = gem_handle_to_libdrm_bo(bufmgr, fd, "stolen", handle);
160 gem_close(fd, handle);
161
162 return bo;
163}
164
Chris Wilsone85613b2016-03-19 14:01:38 +0000165static void can_create_private(const struct create *create, unsigned count)
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000166{
Chris Wilson4eba8e22016-03-18 10:44:31 +0000167 igt_require(0);
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000168}
Chris Wilson4eba8e22016-03-18 10:44:31 +0000169#endif
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000170
Chris Wilson4eba8e22016-03-18 10:44:31 +0000171#if HAVE_CREATE_STOLEN
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000172static drm_intel_bo *
173create_stolen_bo(drm_intel_bufmgr *bufmgr, uint64_t size)
174{
175 drm_intel_bo *bo;
176 uint32_t handle;
177
178 /* XXX gem_create_with_flags(fd, size, I915_CREATE_STOLEN); */
179
180 handle = gem_create(fd, size);
181 bo = gem_handle_to_libdrm_bo(bufmgr, fd, "stolen", handle);
182 gem_close(fd, handle);
183
184 return bo;
185}
186
Chris Wilsone85613b2016-03-19 14:01:38 +0000187static void can_create_stolen(const struct create *create, unsigned count)
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000188{
189 /* XXX check num_buffers against available stolen */
Chris Wilson4eba8e22016-03-18 10:44:31 +0000190 igt_require(0);
191}
192#endif
193
Chris Wilsone85613b2016-03-19 14:01:38 +0000194static void create_cpu_require(const struct create *create, unsigned count)
Chris Wilson4eba8e22016-03-18 10:44:31 +0000195{
196#if HAVE_CREATE_STOLEN
197 igt_require(create->create != create_stolen_bo);
198#endif
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000199}
200
201static drm_intel_bo *
Chris Wilson4eba8e22016-03-18 10:44:31 +0000202unmapped_create_bo(const struct buffers *b)
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000203{
Chris Wilson5d669bf2016-03-18 14:44:53 +0000204 return b->create->create(b->bufmgr, 4*b->npixels);
Chris Wilson4eba8e22016-03-18 10:44:31 +0000205}
206
Chris Wilsone85613b2016-03-19 14:01:38 +0000207static void create_snoop_require(const struct create *create, unsigned count)
Chris Wilson4eba8e22016-03-18 10:44:31 +0000208{
Chris Wilsone85613b2016-03-19 14:01:38 +0000209 create_cpu_require(create, count);
Chris Wilson4eba8e22016-03-18 10:44:31 +0000210 igt_require(!gem_has_llc(fd));
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000211}
212
213static drm_intel_bo *
Chris Wilson4eba8e22016-03-18 10:44:31 +0000214snoop_create_bo(const struct buffers *b)
Chris Wilson46ec33e2015-10-20 14:40:50 +0100215{
216 drm_intel_bo *bo;
217
Chris Wilson4eba8e22016-03-18 10:44:31 +0000218 bo = unmapped_create_bo(b);
Chris Wilson46ec33e2015-10-20 14:40:50 +0100219 gem_set_caching(fd, bo->handle, I915_CACHING_CACHED);
220 drm_intel_bo_disable_reuse(bo);
221
222 return bo;
223}
224
Chris Wilsone85613b2016-03-19 14:01:38 +0000225static void create_userptr_require(const struct create *create, unsigned count)
Chris Wilson0143d4f2016-01-21 09:53:50 +0000226{
Chris Wilson4eba8e22016-03-18 10:44:31 +0000227 static int has_userptr = -1;
228 if (has_userptr < 0) {
Chris Wilson0143d4f2016-01-21 09:53:50 +0000229 struct drm_i915_gem_userptr arg;
230
Chris Wilson4eba8e22016-03-18 10:44:31 +0000231 has_userptr = 0;
Chris Wilson0143d4f2016-01-21 09:53:50 +0000232
233 memset(&arg, 0, sizeof(arg));
234 arg.user_ptr = -4096ULL;
235 arg.user_size = 8192;
236 errno = 0;
237 drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &arg);
238 if (errno == EFAULT) {
239 igt_assert(posix_memalign((void **)&arg.user_ptr,
240 4096, arg.user_size) == 0);
Chris Wilson4eba8e22016-03-18 10:44:31 +0000241 has_userptr = drmIoctl(fd,
Chris Wilson0143d4f2016-01-21 09:53:50 +0000242 LOCAL_IOCTL_I915_GEM_USERPTR,
243 &arg) == 0;
244 free((void *)(uintptr_t)arg.user_ptr);
245 }
246
247 }
Chris Wilson4eba8e22016-03-18 10:44:31 +0000248 igt_require(has_userptr);
Chris Wilson0143d4f2016-01-21 09:53:50 +0000249}
250
251static drm_intel_bo *
Chris Wilson4eba8e22016-03-18 10:44:31 +0000252userptr_create_bo(const struct buffers *b)
Chris Wilson0143d4f2016-01-21 09:53:50 +0000253{
254 struct local_i915_gem_userptr userptr;
255 drm_intel_bo *bo;
Chris Wilsond4a05bc2016-01-23 09:07:12 +0000256 void *ptr;
Chris Wilson0143d4f2016-01-21 09:53:50 +0000257
258 memset(&userptr, 0, sizeof(userptr));
Chris Wilson5d669bf2016-03-18 14:44:53 +0000259 userptr.user_size = b->npixels * 4;
Chris Wilson0143d4f2016-01-21 09:53:50 +0000260 userptr.user_size = (userptr.user_size + 4095) & -4096;
Chris Wilsond4a05bc2016-01-23 09:07:12 +0000261
262 ptr = mmap(NULL, userptr.user_size,
263 PROT_READ | PROT_WRITE, MAP_ANON | MAP_SHARED, -1, 0);
264 igt_assert(ptr != (void *)-1);
265 userptr.user_ptr = (uintptr_t)ptr;
Chris Wilson0143d4f2016-01-21 09:53:50 +0000266
Chris Wilsonc2248ef2016-03-19 13:10:17 +0000267#if 0
Chris Wilson0143d4f2016-01-21 09:53:50 +0000268 do_or_die(drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr));
Chris Wilson4eba8e22016-03-18 10:44:31 +0000269 bo = gem_handle_to_libdrm_bo(b->bufmgr, fd, "userptr", userptr.handle);
Chris Wilsona64f31b2016-01-27 11:19:26 +0000270 gem_close(fd, userptr.handle);
Chris Wilsonc2248ef2016-03-19 13:10:17 +0000271#else
272 bo = drm_intel_bo_alloc_userptr(b->bufmgr, "name",
273 ptr, I915_TILING_NONE, 0,
274 userptr.user_size, 0);
275 igt_assert(bo);
276#endif
277 bo->virtual = (void *)(uintptr_t)userptr.user_ptr;
Chris Wilson0143d4f2016-01-21 09:53:50 +0000278
279 return bo;
280}
281
282static void
Chris Wilson37f4da02016-01-27 13:02:35 +0000283userptr_set_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
Chris Wilson0143d4f2016-01-21 09:53:50 +0000284{
Chris Wilson5d669bf2016-03-18 14:44:53 +0000285 int size = b->npixels;
Chris Wilson0143d4f2016-01-21 09:53:50 +0000286 uint32_t *vaddr = bo->virtual;
287
288 gem_set_domain(fd, bo->handle,
289 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
290 while (size--)
291 *vaddr++ = val;
292}
293
294static void
Chris Wilson37f4da02016-01-27 13:02:35 +0000295userptr_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
Chris Wilson0143d4f2016-01-21 09:53:50 +0000296{
Chris Wilson5d669bf2016-03-18 14:44:53 +0000297 int size = b->npixels;
Chris Wilson0143d4f2016-01-21 09:53:50 +0000298 uint32_t *vaddr = bo->virtual;
299
300 gem_set_domain(fd, bo->handle,
301 I915_GEM_DOMAIN_CPU, 0);
302 while (size--)
303 igt_assert_eq_u32(*vaddr++, val);
304}
305
306static void
307userptr_release_bo(drm_intel_bo *bo)
308{
Chris Wilson094e0cb2016-03-01 13:22:03 +0000309 igt_assert(bo->virtual);
310
Chris Wilsond4a05bc2016-01-23 09:07:12 +0000311 munmap(bo->virtual, bo->size);
Chris Wilson0143d4f2016-01-21 09:53:50 +0000312 bo->virtual = NULL;
313
314 drm_intel_bo_unreference(bo);
315}
316
Chris Wilsone85613b2016-03-19 14:01:38 +0000317static void create_dmabuf_require(const struct create *create, unsigned count)
Chris Wilsoncf569c22016-02-25 17:58:24 +0000318{
Chris Wilson4eba8e22016-03-18 10:44:31 +0000319 static int has_dmabuf = -1;
320 if (has_dmabuf < 0) {
Chris Wilsoncf569c22016-02-25 17:58:24 +0000321 struct drm_prime_handle args;
322 void *ptr;
323
324 memset(&args, 0, sizeof(args));
325 args.handle = gem_create(fd, 4096);
326 args.flags = DRM_RDWR;
327 args.fd = -1;
328
329 drmIoctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
330 gem_close(fd, args.handle);
331
Chris Wilson4eba8e22016-03-18 10:44:31 +0000332 has_dmabuf = 0;
Chris Wilsoncf569c22016-02-25 17:58:24 +0000333 ptr = mmap(NULL, 4096, PROT_READ, MAP_SHARED, args.fd, 0);
334 if (ptr != MAP_FAILED) {
Chris Wilson4eba8e22016-03-18 10:44:31 +0000335 has_dmabuf = 1;
Chris Wilsoncf569c22016-02-25 17:58:24 +0000336 munmap(ptr, 4096);
337 }
338
339 close(args.fd);
340 }
Chris Wilson4eba8e22016-03-18 10:44:31 +0000341 igt_require(has_dmabuf);
Chris Wilsone85613b2016-03-19 14:01:38 +0000342 intel_require_files(2*count);
Chris Wilsoncf569c22016-02-25 17:58:24 +0000343}
344
345struct dmabuf {
346 int fd;
347 void *map;
348};
349
350static drm_intel_bo *
Chris Wilson4eba8e22016-03-18 10:44:31 +0000351dmabuf_create_bo(const struct buffers *b)
Chris Wilsoncf569c22016-02-25 17:58:24 +0000352{
353 struct drm_prime_handle args;
354 drm_intel_bo *bo;
355 struct dmabuf *dmabuf;
356 int size;
357
Chris Wilson5d669bf2016-03-18 14:44:53 +0000358 size = 4*b->npixels;
Chris Wilsoncf569c22016-02-25 17:58:24 +0000359 size = (size + 4095) & -4096;
360
361 memset(&args, 0, sizeof(args));
362 args.handle = gem_create(fd, size);
363 args.flags = DRM_RDWR;
364 args.fd = -1;
365
366 do_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
367 gem_close(fd, args.handle);
368
Chris Wilson4eba8e22016-03-18 10:44:31 +0000369 bo = drm_intel_bo_gem_create_from_prime(b->bufmgr, args.fd, size);
Chris Wilsoncf569c22016-02-25 17:58:24 +0000370 igt_assert(bo);
371
372 dmabuf = malloc(sizeof(*dmabuf));
373 igt_assert(dmabuf);
374
375 dmabuf->fd = args.fd;
376 dmabuf->map = mmap(NULL, size,
377 PROT_READ | PROT_WRITE, MAP_SHARED,
378 dmabuf->fd, 0);
379 igt_assert(dmabuf->map != (void *)-1);
380
381 bo->virtual = dmabuf;
382
383 return bo;
384}
385
386static void
387dmabuf_set_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
388{
389 struct dmabuf *dmabuf = bo->virtual;
390 uint32_t *v;
391 int size;
392
Chris Wilsonaed69b52016-02-25 21:43:01 +0000393 prime_sync_start(dmabuf->fd, true);
Chris Wilson5d669bf2016-03-18 14:44:53 +0000394 for (v = dmabuf->map, size = b->npixels; size--; v++)
Chris Wilsoncf569c22016-02-25 17:58:24 +0000395 *v = val;
Chris Wilsonaed69b52016-02-25 21:43:01 +0000396 prime_sync_end(dmabuf->fd, true);
Chris Wilsoncf569c22016-02-25 17:58:24 +0000397}
398
399static void
400dmabuf_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
401{
402 struct dmabuf *dmabuf = bo->virtual;
403 uint32_t *v;
404 int size;
405
Chris Wilsonaed69b52016-02-25 21:43:01 +0000406 prime_sync_start(dmabuf->fd, false);
Chris Wilson5d669bf2016-03-18 14:44:53 +0000407 for (v = dmabuf->map, size = b->npixels; size--; v++)
Chris Wilsoncf569c22016-02-25 17:58:24 +0000408 igt_assert_eq_u32(*v, val);
Chris Wilsonaed69b52016-02-25 21:43:01 +0000409 prime_sync_end(dmabuf->fd, false);
Chris Wilsoncf569c22016-02-25 17:58:24 +0000410}
411
412static void
413dmabuf_release_bo(drm_intel_bo *bo)
414{
415 struct dmabuf *dmabuf = bo->virtual;
Chris Wilson094e0cb2016-03-01 13:22:03 +0000416 igt_assert(dmabuf);
Chris Wilsoncf569c22016-02-25 17:58:24 +0000417
418 munmap(dmabuf->map, bo->size);
419 close(dmabuf->fd);
420 free(dmabuf);
421
422 bo->virtual = NULL;
423 drm_intel_bo_unreference(bo);
424}
425
Daniel Vetter43779e32013-08-14 14:50:50 +0200426static void
Chris Wilson37f4da02016-01-27 13:02:35 +0000427gtt_set_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200428{
Chris Wilson3e766b82014-09-26 07:55:49 +0100429 uint32_t *vaddr = bo->virtual;
Chris Wilson5d669bf2016-03-18 14:44:53 +0000430 int size = b->npixels;
Daniel Vetter43779e32013-08-14 14:50:50 +0200431
432 drm_intel_gem_bo_start_gtt_access(bo, true);
Daniel Vetter43779e32013-08-14 14:50:50 +0200433 while (size--)
434 *vaddr++ = val;
435}
436
437static void
Chris Wilson37f4da02016-01-27 13:02:35 +0000438gtt_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
Daniel Vetter43779e32013-08-14 14:50:50 +0200439{
Chris Wilson3e766b82014-09-26 07:55:49 +0100440 uint32_t *vaddr = bo->virtual;
Daniel Vetter43779e32013-08-14 14:50:50 +0200441
Chris Wilson3e766b82014-09-26 07:55:49 +0100442 /* GTT access is slow. So we just compare a few points */
Daniel Vetter43779e32013-08-14 14:50:50 +0200443 drm_intel_gem_bo_start_gtt_access(bo, false);
Chris Wilson37f4da02016-01-27 13:02:35 +0000444 for (int y = 0; y < b->height; y++)
445 igt_assert_eq_u32(vaddr[pixel(y, b->width)], val);
Daniel Vetter43779e32013-08-14 14:50:50 +0200446}
447
448static drm_intel_bo *
Chris Wilson86055df2014-08-29 17:36:29 +0100449map_bo(drm_intel_bo *bo)
Daniel Vetter43779e32013-08-14 14:50:50 +0200450{
Daniel Vetter43779e32013-08-14 14:50:50 +0200451 /* gtt map doesn't have a write parameter, so just keep the mapping
452 * around (to avoid the set_domain with the gtt write domain set) and
453 * manually tell the kernel when we start access the gtt. */
454 do_or_die(drm_intel_gem_bo_map_gtt(bo));
455
456 return bo;
457}
458
Chris Wilson86055df2014-08-29 17:36:29 +0100459static drm_intel_bo *
460tile_bo(drm_intel_bo *bo, int width)
461{
462 uint32_t tiling = I915_TILING_X;
463 uint32_t stride = width * 4;
464
465 do_or_die(drm_intel_bo_set_tiling(bo, &tiling, stride));
466
467 return bo;
468}
469
470static drm_intel_bo *
Chris Wilson4eba8e22016-03-18 10:44:31 +0000471gtt_create_bo(const struct buffers *b)
Chris Wilson86055df2014-08-29 17:36:29 +0100472{
Chris Wilson4eba8e22016-03-18 10:44:31 +0000473 return map_bo(unmapped_create_bo(b));
Chris Wilson86055df2014-08-29 17:36:29 +0100474}
475
476static drm_intel_bo *
Chris Wilson4eba8e22016-03-18 10:44:31 +0000477gttX_create_bo(const struct buffers *b)
Chris Wilson86055df2014-08-29 17:36:29 +0100478{
Chris Wilson4eba8e22016-03-18 10:44:31 +0000479 return tile_bo(gtt_create_bo(b), b->width);
Chris Wilson86055df2014-08-29 17:36:29 +0100480}
481
Chris Wilson5d669bf2016-03-18 14:44:53 +0000482static void bit17_require(void)
483{
484 static struct drm_i915_gem_get_tiling2 {
485 uint32_t handle;
486 uint32_t tiling_mode;
487 uint32_t swizzle_mode;
488 uint32_t phys_swizzle_mode;
489 } arg;
490#define DRM_IOCTL_I915_GEM_GET_TILING2 DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling2)
491
492 if (arg.handle == 0) {
493 arg.handle = gem_create(fd, 4096);
494 gem_set_tiling(fd, arg.handle, I915_TILING_X, 512);
495
496 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING2, &arg);
497 gem_close(fd, arg.handle);
498 }
499 igt_require(arg.phys_swizzle_mode == arg.swizzle_mode);
500}
501
502static void wc_require(void)
503{
504 bit17_require();
505 gem_require_mmap_wc(fd);
506}
507
508static void
Chris Wilsone85613b2016-03-19 14:01:38 +0000509wc_create_require(const struct create *create, unsigned count)
Chris Wilson5d669bf2016-03-18 14:44:53 +0000510{
511 wc_require();
512}
513
Chris Wilson86055df2014-08-29 17:36:29 +0100514static drm_intel_bo *
Chris Wilson4eba8e22016-03-18 10:44:31 +0000515wc_create_bo(const struct buffers *b)
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530516{
517 drm_intel_bo *bo;
518
Chris Wilson4eba8e22016-03-18 10:44:31 +0000519 bo = unmapped_create_bo(b);
Chris Wilson5d669bf2016-03-18 14:44:53 +0000520 bo->virtual = gem_mmap__wc(fd, bo->handle, 0, bo->size, PROT_READ | PROT_WRITE);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530521 return bo;
522}
523
524static void
525wc_release_bo(drm_intel_bo *bo)
526{
Chris Wilson094e0cb2016-03-01 13:22:03 +0000527 igt_assert(bo->virtual);
528
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530529 munmap(bo->virtual, bo->size);
530 bo->virtual = NULL;
531
532 nop_release_bo(bo);
533}
534
535static drm_intel_bo *
Chris Wilson4eba8e22016-03-18 10:44:31 +0000536gpu_create_bo(const struct buffers *b)
Chris Wilson86055df2014-08-29 17:36:29 +0100537{
Chris Wilson4eba8e22016-03-18 10:44:31 +0000538 return unmapped_create_bo(b);
Chris Wilson86055df2014-08-29 17:36:29 +0100539}
540
Chris Wilson86055df2014-08-29 17:36:29 +0100541static drm_intel_bo *
Chris Wilson4eba8e22016-03-18 10:44:31 +0000542gpuX_create_bo(const struct buffers *b)
Chris Wilson86055df2014-08-29 17:36:29 +0100543{
Chris Wilson4eba8e22016-03-18 10:44:31 +0000544 return tile_bo(gpu_create_bo(b), b->width);
Chris Wilson86055df2014-08-29 17:36:29 +0100545}
546
Daniel Vetter43779e32013-08-14 14:50:50 +0200547static void
Chris Wilson37f4da02016-01-27 13:02:35 +0000548cpu_set_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
Daniel Vetter43779e32013-08-14 14:50:50 +0200549{
Chris Wilson5d669bf2016-03-18 14:44:53 +0000550 int size = b->npixels;
Daniel Vetter43779e32013-08-14 14:50:50 +0200551 uint32_t *vaddr;
552
553 do_or_die(drm_intel_bo_map(bo, true));
554 vaddr = bo->virtual;
555 while (size--)
556 *vaddr++ = val;
557 drm_intel_bo_unmap(bo);
558}
559
560static void
Chris Wilson37f4da02016-01-27 13:02:35 +0000561cpu_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
Daniel Vetter43779e32013-08-14 14:50:50 +0200562{
Chris Wilson5d669bf2016-03-18 14:44:53 +0000563 int size = b->npixels;
Daniel Vetter43779e32013-08-14 14:50:50 +0200564 uint32_t *vaddr;
565
566 do_or_die(drm_intel_bo_map(bo, false));
567 vaddr = bo->virtual;
568 while (size--)
Chris Wilson6c428a62014-08-29 13:11:37 +0100569 igt_assert_eq_u32(*vaddr++, val);
Daniel Vetter43779e32013-08-14 14:50:50 +0200570 drm_intel_bo_unmap(bo);
571}
572
Chris Wilson6c428a62014-08-29 13:11:37 +0100573static void
Chris Wilson37f4da02016-01-27 13:02:35 +0000574gpu_set_bo(struct buffers *buffers, drm_intel_bo *bo, uint32_t val)
Chris Wilson6c428a62014-08-29 13:11:37 +0100575{
576 struct drm_i915_gem_relocation_entry reloc[1];
577 struct drm_i915_gem_exec_object2 gem_exec[2];
578 struct drm_i915_gem_execbuffer2 execbuf;
Chris Wilson6c428a62014-08-29 13:11:37 +0100579 uint32_t buf[10], *b;
Chris Wilson86055df2014-08-29 17:36:29 +0100580 uint32_t tiling, swizzle;
581
582 drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
Chris Wilson6c428a62014-08-29 13:11:37 +0100583
584 memset(reloc, 0, sizeof(reloc));
585 memset(gem_exec, 0, sizeof(gem_exec));
586 memset(&execbuf, 0, sizeof(execbuf));
587
588 b = buf;
589 *b++ = XY_COLOR_BLT_CMD_NOLEN |
590 ((gen >= 8) ? 5 : 4) |
591 COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB;
Chris Wilson86055df2014-08-29 17:36:29 +0100592 if (gen >= 4 && tiling) {
593 b[-1] |= XY_COLOR_BLT_TILED;
Chris Wilson37f4da02016-01-27 13:02:35 +0000594 *b = buffers->width;
Chris Wilson86055df2014-08-29 17:36:29 +0100595 } else
Chris Wilson37f4da02016-01-27 13:02:35 +0000596 *b = buffers->width << 2;
Chris Wilson86055df2014-08-29 17:36:29 +0100597 *b++ |= 0xf0 << 16 | 1 << 25 | 1 << 24;
Chris Wilson6c428a62014-08-29 13:11:37 +0100598 *b++ = 0;
Chris Wilson37f4da02016-01-27 13:02:35 +0000599 *b++ = buffers->height << 16 | buffers->width;
Chris Wilson6c428a62014-08-29 13:11:37 +0100600 reloc[0].offset = (b - buf) * sizeof(uint32_t);
601 reloc[0].target_handle = bo->handle;
602 reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
603 reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
604 *b++ = 0;
605 if (gen >= 8)
606 *b++ = 0;
607 *b++ = val;
608 *b++ = MI_BATCH_BUFFER_END;
609 if ((b - buf) & 1)
610 *b++ = 0;
611
612 gem_exec[0].handle = bo->handle;
613 gem_exec[0].flags = EXEC_OBJECT_NEEDS_FENCE;
614
Chris Wilson6f759902016-01-27 11:17:03 +0000615 gem_exec[1].handle = gem_create(fd, 4096);
Chris Wilson6c428a62014-08-29 13:11:37 +0100616 gem_exec[1].relocation_count = 1;
617 gem_exec[1].relocs_ptr = (uintptr_t)reloc;
618
619 execbuf.buffers_ptr = (uintptr_t)gem_exec;
620 execbuf.buffer_count = 2;
621 execbuf.batch_len = (b - buf) * sizeof(buf[0]);
Chris Wilson86055df2014-08-29 17:36:29 +0100622 if (gen >= 6)
623 execbuf.flags = I915_EXEC_BLT;
Chris Wilson6c428a62014-08-29 13:11:37 +0100624
Chris Wilson6f759902016-01-27 11:17:03 +0000625 gem_write(fd, gem_exec[1].handle, 0, buf, execbuf.batch_len);
626 gem_execbuf(fd, &execbuf);
Chris Wilson6c428a62014-08-29 13:11:37 +0100627
Chris Wilson6f759902016-01-27 11:17:03 +0000628 gem_close(fd, gem_exec[1].handle);
Chris Wilson6c428a62014-08-29 13:11:37 +0100629}
630
631static void
Chris Wilson37f4da02016-01-27 13:02:35 +0000632gpu_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
Chris Wilson6c428a62014-08-29 13:11:37 +0100633{
Chris Wilson37f4da02016-01-27 13:02:35 +0000634 blt_copy_bo(b, b->snoop, bo);
635 cpu_cmp_bo(b, b->snoop, val);
Chris Wilson6c428a62014-08-29 13:11:37 +0100636}
637
Chris Wilsonc2248ef2016-03-19 13:10:17 +0000638struct access_mode {
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530639 const char *name;
Chris Wilsone85613b2016-03-19 14:01:38 +0000640 void (*require)(const struct create *, unsigned);
Chris Wilson4eba8e22016-03-18 10:44:31 +0000641 drm_intel_bo *(*create_bo)(const struct buffers *b);
Chris Wilson37f4da02016-01-27 13:02:35 +0000642 void (*set_bo)(struct buffers *b, drm_intel_bo *bo, uint32_t val);
643 void (*cmp_bo)(struct buffers *b, drm_intel_bo *bo, uint32_t val);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530644 void (*release_bo)(drm_intel_bo *bo);
Daniel Vetter43779e32013-08-14 14:50:50 +0200645};
Chris Wilson59c55622014-08-29 13:11:37 +0100646igt_render_copyfunc_t rendercopy;
647
Chris Wilson5d669bf2016-03-18 14:44:53 +0000648static int read_sysctl(const char *path)
649{
650 FILE *file = fopen(path, "r");
651 int max = 0;
652 if (file) {
Chris Wilsonc46f3c32016-04-10 20:44:58 +0100653 if (fscanf(file, "%d", &max) != 1)
654 max = 0; /* silence! */
Chris Wilson5d669bf2016-03-18 14:44:53 +0000655 fclose(file);
656 }
657 return max;
658}
659
660static int write_sysctl(const char *path, int value)
661{
662 FILE *file = fopen(path, "w");
663 if (file) {
664 fprintf(file, "%d", value);
665 fclose(file);
666 }
667 return read_sysctl(path);
668}
669
670static bool set_max_map_count(int num_buffers)
671{
672 int max = read_sysctl("/proc/sys/vm/max_map_count");
673 if (max < num_buffers + 1024)
674 max = write_sysctl("/proc/sys/vm/max_map_count",
675 num_buffers + 1024);
676 return max > num_buffers;
677}
678
Chris Wilson4eba8e22016-03-18 10:44:31 +0000679static void buffers_init(struct buffers *b,
680 const char *name,
681 const struct create *create,
Chris Wilson094e0cb2016-03-01 13:22:03 +0000682 const struct access_mode *mode,
Chris Wilson5d669bf2016-03-18 14:44:53 +0000683 const struct size *size,
Chris Wilson094e0cb2016-03-01 13:22:03 +0000684 int num_buffers,
Chris Wilson094e0cb2016-03-01 13:22:03 +0000685 int _fd, int enable_reuse)
Chris Wilson99b5ee82015-01-22 10:03:45 +0000686{
Chris Wilson4eba8e22016-03-18 10:44:31 +0000687 memset(b, 0, sizeof(*b));
688 b->name = name;
689 b->create = create;
690 b->mode = mode;
Chris Wilson5d669bf2016-03-18 14:44:53 +0000691 b->size = size;
Chris Wilson4eba8e22016-03-18 10:44:31 +0000692 b->num_buffers = num_buffers;
Chris Wilson4eba8e22016-03-18 10:44:31 +0000693 b->count = 0;
Chris Wilson37f4da02016-01-27 13:02:35 +0000694
Chris Wilson5d669bf2016-03-18 14:44:53 +0000695 b->width = size->width;
696 b->height = size->height;
697 b->npixels = size->width * size->height;
698 b->tmp = malloc(4*b->npixels);
Chris Wilson4eba8e22016-03-18 10:44:31 +0000699 igt_assert(b->tmp);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000700
Chris Wilson4eba8e22016-03-18 10:44:31 +0000701 b->bufmgr = drm_intel_bufmgr_gem_init(_fd, 4096);
702 igt_assert(b->bufmgr);
703
704 b->src = malloc(2*sizeof(drm_intel_bo *)*num_buffers);
705 igt_assert(b->src);
706 b->dst = b->src + num_buffers;
Chris Wilson2d08e9e2015-12-11 09:25:03 +0000707
Chris Wilsona1b47ef2016-01-27 19:44:16 +0000708 if (enable_reuse)
Chris Wilson4eba8e22016-03-18 10:44:31 +0000709 drm_intel_bufmgr_gem_enable_reuse(b->bufmgr);
710 b->batch = intel_batchbuffer_alloc(b->bufmgr, devid);
711 igt_assert(b->batch);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000712}
713
Chris Wilson4eba8e22016-03-18 10:44:31 +0000714static void buffers_destroy(struct buffers *b)
Chris Wilson99b5ee82015-01-22 10:03:45 +0000715{
Chris Wilson4eba8e22016-03-18 10:44:31 +0000716 int count = b->count;
Chris Wilson094e0cb2016-03-01 13:22:03 +0000717 if (count == 0)
Chris Wilson99b5ee82015-01-22 10:03:45 +0000718 return;
719
Chris Wilsonc19b0492016-03-20 11:13:30 +0000720 /* Be safe so that we can clean up a partial creation */
Chris Wilson4eba8e22016-03-18 10:44:31 +0000721 b->count = 0;
Chris Wilsonc19b0492016-03-20 11:13:30 +0000722 for (int i = 0; i < count; i++) {
723 if (b->src[i]) {
724 b->mode->release_bo(b->src[i]);
725 b->src[i] = NULL;
726 } else
727 break;
728
729 if (b->dst[i]) {
730 b->mode->release_bo(b->dst[i]);
731 b->dst[i] = NULL;
732 }
733 }
734 if (b->snoop) {
735 nop_release_bo(b->snoop);
736 b->snoop = NULL;
737 }
738 if (b->spare) {
739 b->mode->release_bo(b->spare);
740 b->spare = NULL;
741 }
Chris Wilson99b5ee82015-01-22 10:03:45 +0000742}
743
Chris Wilson4eba8e22016-03-18 10:44:31 +0000744static void buffers_create(struct buffers *b)
Chris Wilson99b5ee82015-01-22 10:03:45 +0000745{
Chris Wilson4eba8e22016-03-18 10:44:31 +0000746 int count = b->num_buffers;
747 igt_assert(b->bufmgr);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000748
Chris Wilson4eba8e22016-03-18 10:44:31 +0000749 buffers_destroy(b);
750 igt_assert(b->count == 0);
Chris Wilsonc19b0492016-03-20 11:13:30 +0000751 b->count = count;
Chris Wilson99b5ee82015-01-22 10:03:45 +0000752
753 for (int i = 0; i < count; i++) {
Chris Wilson4eba8e22016-03-18 10:44:31 +0000754 b->src[i] = b->mode->create_bo(b);
755 b->dst[i] = b->mode->create_bo(b);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000756 }
Chris Wilson4eba8e22016-03-18 10:44:31 +0000757 b->spare = b->mode->create_bo(b);
758 b->snoop = snoop_create_bo(b);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000759}
760
Chris Wilsonc46f3c32016-04-10 20:44:58 +0100761static void buffers_reset(struct buffers *b, bool enable_reuse)
762{
763 buffers_destroy(b);
764
765 igt_assert(b->count == 0);
766 igt_assert(b->tmp);
767 igt_assert(b->src);
768 igt_assert(b->dst);
769
770 intel_batchbuffer_free(b->batch);
771 drm_intel_bufmgr_destroy(b->bufmgr);
772
773 b->bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
774 igt_assert(b->bufmgr);
775
776 if (enable_reuse)
777 drm_intel_bufmgr_gem_enable_reuse(b->bufmgr);
778 b->batch = intel_batchbuffer_alloc(b->bufmgr, devid);
779 igt_assert(b->batch);
780}
781
Chris Wilson4eba8e22016-03-18 10:44:31 +0000782static void buffers_fini(struct buffers *b)
Chris Wilson99b5ee82015-01-22 10:03:45 +0000783{
Chris Wilson4eba8e22016-03-18 10:44:31 +0000784 if (b->bufmgr == NULL)
Chris Wilson99b5ee82015-01-22 10:03:45 +0000785 return;
786
Chris Wilson4eba8e22016-03-18 10:44:31 +0000787 buffers_destroy(b);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000788
Chris Wilson4eba8e22016-03-18 10:44:31 +0000789 free(b->tmp);
790 free(b->src);
Chris Wilson2d08e9e2015-12-11 09:25:03 +0000791
Chris Wilson4eba8e22016-03-18 10:44:31 +0000792 intel_batchbuffer_free(b->batch);
793 drm_intel_bufmgr_destroy(b->bufmgr);
Chris Wilson094e0cb2016-03-01 13:22:03 +0000794
Chris Wilson4eba8e22016-03-18 10:44:31 +0000795 memset(b, 0, sizeof(*b));
Chris Wilson99b5ee82015-01-22 10:03:45 +0000796}
797
Chris Wilson37f4da02016-01-27 13:02:35 +0000798typedef void (*do_copy)(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100799typedef struct igt_hang_ring (*do_hang)(void);
Chris Wilson59c55622014-08-29 13:11:37 +0100800
Chris Wilson37f4da02016-01-27 13:02:35 +0000801static void render_copy_bo(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src)
Chris Wilson59c55622014-08-29 13:11:37 +0100802{
803 struct igt_buf d = {
804 .bo = dst,
Chris Wilson5d669bf2016-03-18 14:44:53 +0000805 .size = b->npixels * 4,
806 .num_tiles = b->npixels * 4,
Chris Wilson37f4da02016-01-27 13:02:35 +0000807 .stride = b->width * 4,
Chris Wilson59c55622014-08-29 13:11:37 +0100808 }, s = {
809 .bo = src,
Chris Wilson5d669bf2016-03-18 14:44:53 +0000810 .size = b->npixels * 4,
811 .num_tiles = b->npixels * 4,
Chris Wilson37f4da02016-01-27 13:02:35 +0000812 .stride = b->width * 4,
Chris Wilson59c55622014-08-29 13:11:37 +0100813 };
Chris Wilson86055df2014-08-29 17:36:29 +0100814 uint32_t swizzle;
815
816 drm_intel_bo_get_tiling(dst, &d.tiling, &swizzle);
817 drm_intel_bo_get_tiling(src, &s.tiling, &swizzle);
818
Chris Wilson094e0cb2016-03-01 13:22:03 +0000819 rendercopy(b->batch, NULL,
Chris Wilson59c55622014-08-29 13:11:37 +0100820 &s, 0, 0,
Chris Wilson37f4da02016-01-27 13:02:35 +0000821 b->width, b->height,
Chris Wilson59c55622014-08-29 13:11:37 +0100822 &d, 0, 0);
823}
824
Chris Wilson37f4da02016-01-27 13:02:35 +0000825static void blt_copy_bo(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src)
Chris Wilson59c55622014-08-29 13:11:37 +0100826{
Chris Wilson094e0cb2016-03-01 13:22:03 +0000827 intel_blt_copy(b->batch,
Chris Wilson37f4da02016-01-27 13:02:35 +0000828 src, 0, 0, 4*b->width,
829 dst, 0, 0, 4*b->width,
830 b->width, b->height, 32);
Chris Wilson59c55622014-08-29 13:11:37 +0100831}
Daniel Vetter5a598c92013-08-14 15:08:05 +0200832
Chris Wilson37f4da02016-01-27 13:02:35 +0000833static void cpu_copy_bo(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src)
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530834{
Chris Wilson5d669bf2016-03-18 14:44:53 +0000835 const int size = b->npixels * sizeof(uint32_t);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530836 void *d, *s;
837
838 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_CPU, 0);
839 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
Ville Syrjäläf52e7ec2015-10-09 19:11:39 +0300840 s = gem_mmap__cpu(fd, src->handle, 0, size, PROT_READ);
841 d = gem_mmap__cpu(fd, dst->handle, 0, size, PROT_WRITE);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530842
843 memcpy(d, s, size);
844
845 munmap(d, size);
846 munmap(s, size);
847}
848
Chris Wilson37f4da02016-01-27 13:02:35 +0000849static void gtt_copy_bo(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src)
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530850{
Chris Wilson5d669bf2016-03-18 14:44:53 +0000851 const int size = b->npixels * sizeof(uint32_t);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530852 void *d, *s;
853
854 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0);
855 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
856
Ville Syrjäläf52e7ec2015-10-09 19:11:39 +0300857 s = gem_mmap__gtt(fd, src->handle, size, PROT_READ);
858 d = gem_mmap__gtt(fd, dst->handle, size, PROT_WRITE);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530859
860 memcpy(d, s, size);
861
862 munmap(d, size);
863 munmap(s, size);
864}
865
Chris Wilson37f4da02016-01-27 13:02:35 +0000866static void wc_copy_bo(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src)
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530867{
Chris Wilson37f4da02016-01-27 13:02:35 +0000868 const int size = b->width * sizeof(uint32_t);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530869 void *d, *s;
870
871 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0);
872 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
873
Ville Syrjäläf52e7ec2015-10-09 19:11:39 +0300874 s = gem_mmap__wc(fd, src->handle, 0, size, PROT_READ);
875 d = gem_mmap__wc(fd, dst->handle, 0, size, PROT_WRITE);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530876
877 memcpy(d, s, size);
878
879 munmap(d, size);
880 munmap(s, size);
881}
882
Chris Wilson16bafdf2014-09-04 09:26:24 +0100883static struct igt_hang_ring no_hang(void)
884{
885 return (struct igt_hang_ring){0, 0};
886}
887
888static struct igt_hang_ring bcs_hang(void)
889{
Daniel Vetter3cd45de2015-02-10 17:46:43 +0100890 return igt_hang_ring(fd, I915_EXEC_BLT);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100891}
892
893static struct igt_hang_ring rcs_hang(void)
894{
Daniel Vetter3cd45de2015-02-10 17:46:43 +0100895 return igt_hang_ring(fd, I915_EXEC_RENDER);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100896}
897
Chris Wilsonfcb4cfa2016-03-20 11:38:26 +0000898static struct igt_hang_ring all_hang(void)
899{
900 uint32_t bbe = MI_BATCH_BUFFER_END;
901 struct drm_i915_gem_execbuffer2 execbuf;
902 struct drm_i915_gem_exec_object2 obj;
903 struct igt_hang_ring hang;
904 unsigned engine;
905
906 memset(&obj, 0, sizeof(obj));
907 obj.handle = gem_create(fd, 4096);
908 gem_write(fd, obj.handle, 0, &bbe, sizeof(&bbe));
909
910 memset(&execbuf, 0, sizeof(execbuf));
911 execbuf.buffers_ptr = (uintptr_t)&obj;
912 execbuf.buffer_count = 1;
913
914 for_each_engine(fd, engine) {
915 hang = igt_hang_ring(fd, engine);
916
917 execbuf.flags = engine;
918 __gem_execbuf(fd, &execbuf);
919
920 gem_close(fd, hang.handle);
921 }
922
923 hang.handle = obj.handle;
924 return hang;
925}
926
Chris Wilson8bf09f32015-12-17 09:16:42 +0000927static void do_basic0(struct buffers *buffers,
928 do_copy do_copy_func,
929 do_hang do_hang_func)
930{
931 gem_quiescent_gpu(fd);
932
Chris Wilson37f4da02016-01-27 13:02:35 +0000933 buffers->mode->set_bo(buffers, buffers->src[0], 0xdeadbeef);
Chris Wilson8bf09f32015-12-17 09:16:42 +0000934 for (int i = 0; i < buffers->count; i++) {
935 struct igt_hang_ring hang = do_hang_func();
936
Chris Wilson37f4da02016-01-27 13:02:35 +0000937 do_copy_func(buffers, buffers->dst[i], buffers->src[0]);
938 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef);
Chris Wilson8bf09f32015-12-17 09:16:42 +0000939
940 igt_post_hang_ring(fd, hang);
941 }
942}
943
944static void do_basic1(struct buffers *buffers,
945 do_copy do_copy_func,
946 do_hang do_hang_func)
Chris Wilson197db862015-12-09 20:54:10 +0000947{
948 gem_quiescent_gpu(fd);
949
950 for (int i = 0; i < buffers->count; i++) {
951 struct igt_hang_ring hang = do_hang_func();
952
Chris Wilson37f4da02016-01-27 13:02:35 +0000953 buffers->mode->set_bo(buffers, buffers->src[i], i);
954 buffers->mode->set_bo(buffers, buffers->dst[i], ~i);
Chris Wilson8bf09f32015-12-17 09:16:42 +0000955
Chris Wilson37f4da02016-01-27 13:02:35 +0000956 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
Chris Wilson8bf09f32015-12-17 09:16:42 +0000957 usleep(0); /* let someone else claim the mutex */
Chris Wilson37f4da02016-01-27 13:02:35 +0000958 buffers->mode->cmp_bo(buffers, buffers->dst[i], i);
Chris Wilson197db862015-12-09 20:54:10 +0000959
960 igt_post_hang_ring(fd, hang);
961 }
962}
963
Chris Wilson8bf09f32015-12-17 09:16:42 +0000964static void do_basicN(struct buffers *buffers,
965 do_copy do_copy_func,
966 do_hang do_hang_func)
967{
968 struct igt_hang_ring hang;
969
970 gem_quiescent_gpu(fd);
971
972 for (int i = 0; i < buffers->count; i++) {
Chris Wilson37f4da02016-01-27 13:02:35 +0000973 buffers->mode->set_bo(buffers, buffers->src[i], i);
974 buffers->mode->set_bo(buffers, buffers->dst[i], ~i);
Chris Wilson8bf09f32015-12-17 09:16:42 +0000975 }
976
977 hang = do_hang_func();
978
979 for (int i = 0; i < buffers->count; i++) {
Chris Wilson37f4da02016-01-27 13:02:35 +0000980 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
Chris Wilson8bf09f32015-12-17 09:16:42 +0000981 usleep(0); /* let someone else claim the mutex */
982 }
983
984 for (int i = 0; i < buffers->count; i++)
Chris Wilson37f4da02016-01-27 13:02:35 +0000985 buffers->mode->cmp_bo(buffers, buffers->dst[i], i);
Chris Wilson8bf09f32015-12-17 09:16:42 +0000986
987 igt_post_hang_ring(fd, hang);
988}
989
Chris Wilson99b5ee82015-01-22 10:03:45 +0000990static void do_overwrite_source(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100991 do_copy do_copy_func,
992 do_hang do_hang_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +0200993{
Chris Wilson16bafdf2014-09-04 09:26:24 +0100994 struct igt_hang_ring hang;
Daniel Vetter5a598c92013-08-14 15:08:05 +0200995 int i;
996
997 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000998 for (i = 0; i < buffers->count; i++) {
Chris Wilson37f4da02016-01-27 13:02:35 +0000999 buffers->mode->set_bo(buffers, buffers->src[i], i);
1000 buffers->mode->set_bo(buffers, buffers->dst[i], ~i);
Daniel Vetter5a598c92013-08-14 15:08:05 +02001001 }
Chris Wilson99b5ee82015-01-22 10:03:45 +00001002 for (i = 0; i < buffers->count; i++)
Chris Wilson37f4da02016-01-27 13:02:35 +00001003 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001004 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001005 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001006 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001007 for (i = 0; i < buffers->count; i++)
Chris Wilson37f4da02016-01-27 13:02:35 +00001008 buffers->mode->cmp_bo(buffers, buffers->dst[i], i);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001009 igt_post_hang_ring(fd, hang);
1010}
1011
Chris Wilsona1726762015-03-16 16:29:57 +00001012static void do_overwrite_source_read(struct buffers *buffers,
1013 do_copy do_copy_func,
1014 do_hang do_hang_func,
1015 int do_rcs)
1016{
1017 const int half = buffers->count/2;
1018 struct igt_hang_ring hang;
1019 int i;
1020
1021 gem_quiescent_gpu(fd);
1022 for (i = 0; i < half; i++) {
Chris Wilson37f4da02016-01-27 13:02:35 +00001023 buffers->mode->set_bo(buffers, buffers->src[i], i);
1024 buffers->mode->set_bo(buffers, buffers->dst[i], ~i);
1025 buffers->mode->set_bo(buffers, buffers->dst[i+half], ~i);
Chris Wilsona1726762015-03-16 16:29:57 +00001026 }
1027 for (i = 0; i < half; i++) {
Chris Wilson37f4da02016-01-27 13:02:35 +00001028 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
Chris Wilsona1726762015-03-16 16:29:57 +00001029 if (do_rcs)
Chris Wilson37f4da02016-01-27 13:02:35 +00001030 render_copy_bo(buffers, buffers->dst[i+half], buffers->src[i]);
Chris Wilsona1726762015-03-16 16:29:57 +00001031 else
Chris Wilson37f4da02016-01-27 13:02:35 +00001032 blt_copy_bo(buffers, buffers->dst[i+half], buffers->src[i]);
Chris Wilsona1726762015-03-16 16:29:57 +00001033 }
1034 hang = do_hang_func();
1035 for (i = half; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001036 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef);
Chris Wilsona1726762015-03-16 16:29:57 +00001037 for (i = 0; i < half; i++) {
Chris Wilson37f4da02016-01-27 13:02:35 +00001038 buffers->mode->cmp_bo(buffers, buffers->dst[i], i);
1039 buffers->mode->cmp_bo(buffers, buffers->dst[i+half], i);
Chris Wilsona1726762015-03-16 16:29:57 +00001040 }
1041 igt_post_hang_ring(fd, hang);
1042}
1043
1044static void do_overwrite_source_read_bcs(struct buffers *buffers,
1045 do_copy do_copy_func,
1046 do_hang do_hang_func)
1047{
1048 do_overwrite_source_read(buffers, do_copy_func, do_hang_func, 0);
1049}
1050
1051static void do_overwrite_source_read_rcs(struct buffers *buffers,
1052 do_copy do_copy_func,
1053 do_hang do_hang_func)
1054{
1055 do_overwrite_source_read(buffers, do_copy_func, do_hang_func, 1);
1056}
1057
Chris Wilson99b5ee82015-01-22 10:03:45 +00001058static void do_overwrite_source__rev(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001059 do_copy do_copy_func,
1060 do_hang do_hang_func)
1061{
1062 struct igt_hang_ring hang;
1063 int i;
1064
1065 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001066 for (i = 0; i < buffers->count; i++) {
Chris Wilson37f4da02016-01-27 13:02:35 +00001067 buffers->mode->set_bo(buffers, buffers->src[i], i);
1068 buffers->mode->set_bo(buffers, buffers->dst[i], ~i);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001069 }
Chris Wilson99b5ee82015-01-22 10:03:45 +00001070 for (i = 0; i < buffers->count; i++)
Chris Wilson37f4da02016-01-27 13:02:35 +00001071 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001072 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001073 for (i = 0; i < buffers->count; i++)
Chris Wilson37f4da02016-01-27 13:02:35 +00001074 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001075 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001076 buffers->mode->cmp_bo(buffers, buffers->dst[i], i);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001077 igt_post_hang_ring(fd, hang);
1078}
1079
Chris Wilson99b5ee82015-01-22 10:03:45 +00001080static void do_overwrite_source__one(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001081 do_copy do_copy_func,
1082 do_hang do_hang_func)
1083{
1084 struct igt_hang_ring hang;
1085
1086 gem_quiescent_gpu(fd);
Chris Wilson37f4da02016-01-27 13:02:35 +00001087 buffers->mode->set_bo(buffers, buffers->src[0], 0);
1088 buffers->mode->set_bo(buffers, buffers->dst[0], ~0);
1089 do_copy_func(buffers, buffers->dst[0], buffers->src[0]);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001090 hang = do_hang_func();
Chris Wilson37f4da02016-01-27 13:02:35 +00001091 buffers->mode->set_bo(buffers, buffers->src[0], 0xdeadbeef);
1092 buffers->mode->cmp_bo(buffers, buffers->dst[0], 0);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001093 igt_post_hang_ring(fd, hang);
Daniel Vetter5a598c92013-08-14 15:08:05 +02001094}
1095
Chris Wilsona72d4052015-03-18 14:15:22 +00001096static void do_intermix(struct buffers *buffers,
1097 do_copy do_copy_func,
1098 do_hang do_hang_func,
1099 int do_rcs)
1100{
1101 const int half = buffers->count/2;
1102 struct igt_hang_ring hang;
1103 int i;
1104
1105 gem_quiescent_gpu(fd);
1106 for (i = 0; i < buffers->count; i++) {
Chris Wilson37f4da02016-01-27 13:02:35 +00001107 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef^~i);
1108 buffers->mode->set_bo(buffers, buffers->dst[i], i);
Chris Wilsona72d4052015-03-18 14:15:22 +00001109 }
1110 for (i = 0; i < half; i++) {
1111 if (do_rcs == 1 || (do_rcs == -1 && i & 1))
Chris Wilson37f4da02016-01-27 13:02:35 +00001112 render_copy_bo(buffers, buffers->dst[i], buffers->src[i]);
Chris Wilsona72d4052015-03-18 14:15:22 +00001113 else
Chris Wilson37f4da02016-01-27 13:02:35 +00001114 blt_copy_bo(buffers, buffers->dst[i], buffers->src[i]);
Chris Wilsona72d4052015-03-18 14:15:22 +00001115
Chris Wilson37f4da02016-01-27 13:02:35 +00001116 do_copy_func(buffers, buffers->dst[i+half], buffers->src[i]);
Chris Wilsona72d4052015-03-18 14:15:22 +00001117
1118 if (do_rcs == 1 || (do_rcs == -1 && (i & 1) == 0))
Chris Wilson37f4da02016-01-27 13:02:35 +00001119 render_copy_bo(buffers, buffers->dst[i], buffers->dst[i+half]);
Chris Wilsona72d4052015-03-18 14:15:22 +00001120 else
Chris Wilson37f4da02016-01-27 13:02:35 +00001121 blt_copy_bo(buffers, buffers->dst[i], buffers->dst[i+half]);
Chris Wilsona72d4052015-03-18 14:15:22 +00001122
Chris Wilson37f4da02016-01-27 13:02:35 +00001123 do_copy_func(buffers, buffers->dst[i+half], buffers->src[i+half]);
Chris Wilsona72d4052015-03-18 14:15:22 +00001124 }
1125 hang = do_hang_func();
1126 for (i = 0; i < 2*half; i++)
Chris Wilson37f4da02016-01-27 13:02:35 +00001127 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef^~i);
Chris Wilsona72d4052015-03-18 14:15:22 +00001128 igt_post_hang_ring(fd, hang);
1129}
1130
1131static void do_intermix_rcs(struct buffers *buffers,
1132 do_copy do_copy_func,
1133 do_hang do_hang_func)
1134{
1135 do_intermix(buffers, do_copy_func, do_hang_func, 1);
1136}
1137
1138static void do_intermix_bcs(struct buffers *buffers,
1139 do_copy do_copy_func,
1140 do_hang do_hang_func)
1141{
1142 do_intermix(buffers, do_copy_func, do_hang_func, 0);
1143}
1144
1145static void do_intermix_both(struct buffers *buffers,
1146 do_copy do_copy_func,
1147 do_hang do_hang_func)
1148{
1149 do_intermix(buffers, do_copy_func, do_hang_func, -1);
1150}
1151
Chris Wilson99b5ee82015-01-22 10:03:45 +00001152static void do_early_read(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001153 do_copy do_copy_func,
1154 do_hang do_hang_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +02001155{
Chris Wilson16bafdf2014-09-04 09:26:24 +01001156 struct igt_hang_ring hang;
Daniel Vetter5a598c92013-08-14 15:08:05 +02001157 int i;
1158
1159 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001160 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001161 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001162 for (i = 0; i < buffers->count; i++)
Chris Wilson37f4da02016-01-27 13:02:35 +00001163 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001164 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001165 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001166 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001167 igt_post_hang_ring(fd, hang);
Daniel Vetter5a598c92013-08-14 15:08:05 +02001168}
1169
Chris Wilson35b0ac92015-03-16 11:55:46 +00001170static void do_read_read_bcs(struct buffers *buffers,
1171 do_copy do_copy_func,
1172 do_hang do_hang_func)
1173{
1174 struct igt_hang_ring hang;
1175 int i;
1176
1177 gem_quiescent_gpu(fd);
1178 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001179 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef ^ i);
Chris Wilson35b0ac92015-03-16 11:55:46 +00001180 for (i = 0; i < buffers->count; i++) {
Chris Wilson37f4da02016-01-27 13:02:35 +00001181 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
1182 blt_copy_bo(buffers, buffers->spare, buffers->src[i]);
Chris Wilson35b0ac92015-03-16 11:55:46 +00001183 }
Chris Wilson37f4da02016-01-27 13:02:35 +00001184 buffers->mode->cmp_bo(buffers, buffers->spare, 0xdeadbeef^(buffers->count-1));
Chris Wilson35b0ac92015-03-16 11:55:46 +00001185 hang = do_hang_func();
1186 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001187 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef ^ i);
Chris Wilson35b0ac92015-03-16 11:55:46 +00001188 igt_post_hang_ring(fd, hang);
1189}
1190
Chris Wilson0c266522015-11-11 16:37:16 +00001191static void do_write_read_bcs(struct buffers *buffers,
1192 do_copy do_copy_func,
1193 do_hang do_hang_func)
1194{
1195 struct igt_hang_ring hang;
1196 int i;
1197
1198 gem_quiescent_gpu(fd);
1199 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001200 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef ^ i);
Chris Wilson0c266522015-11-11 16:37:16 +00001201 for (i = 0; i < buffers->count; i++) {
Chris Wilson37f4da02016-01-27 13:02:35 +00001202 blt_copy_bo(buffers, buffers->spare, buffers->src[i]);
1203 do_copy_func(buffers, buffers->dst[i], buffers->spare);
Chris Wilson0c266522015-11-11 16:37:16 +00001204 }
1205 hang = do_hang_func();
1206 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001207 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef ^ i);
Chris Wilson0c266522015-11-11 16:37:16 +00001208 igt_post_hang_ring(fd, hang);
1209}
1210
Chris Wilson35b0ac92015-03-16 11:55:46 +00001211static void do_read_read_rcs(struct buffers *buffers,
1212 do_copy do_copy_func,
1213 do_hang do_hang_func)
1214{
1215 struct igt_hang_ring hang;
1216 int i;
1217
1218 gem_quiescent_gpu(fd);
1219 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001220 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef ^ i);
Chris Wilson35b0ac92015-03-16 11:55:46 +00001221 for (i = 0; i < buffers->count; i++) {
Chris Wilson37f4da02016-01-27 13:02:35 +00001222 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
1223 render_copy_bo(buffers, buffers->spare, buffers->src[i]);
Chris Wilson35b0ac92015-03-16 11:55:46 +00001224 }
Chris Wilson37f4da02016-01-27 13:02:35 +00001225 buffers->mode->cmp_bo(buffers, buffers->spare, 0xdeadbeef^(buffers->count-1));
Chris Wilson35b0ac92015-03-16 11:55:46 +00001226 hang = do_hang_func();
1227 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001228 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef ^ i);
Chris Wilson35b0ac92015-03-16 11:55:46 +00001229 igt_post_hang_ring(fd, hang);
1230}
1231
Chris Wilson0c266522015-11-11 16:37:16 +00001232static void do_write_read_rcs(struct buffers *buffers,
1233 do_copy do_copy_func,
1234 do_hang do_hang_func)
1235{
1236 struct igt_hang_ring hang;
1237 int i;
1238
1239 gem_quiescent_gpu(fd);
1240 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001241 buffers->mode->set_bo(buffers, buffers->src[i], 0xdeadbeef ^ i);
Chris Wilson0c266522015-11-11 16:37:16 +00001242 for (i = 0; i < buffers->count; i++) {
Chris Wilson37f4da02016-01-27 13:02:35 +00001243 render_copy_bo(buffers, buffers->spare, buffers->src[i]);
1244 do_copy_func(buffers, buffers->dst[i], buffers->spare);
Chris Wilson0c266522015-11-11 16:37:16 +00001245 }
1246 hang = do_hang_func();
1247 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001248 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef ^ i);
Chris Wilson0c266522015-11-11 16:37:16 +00001249 igt_post_hang_ring(fd, hang);
1250}
1251
Chris Wilson99b5ee82015-01-22 10:03:45 +00001252static void do_gpu_read_after_write(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001253 do_copy do_copy_func,
1254 do_hang do_hang_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +02001255{
Chris Wilson16bafdf2014-09-04 09:26:24 +01001256 struct igt_hang_ring hang;
Daniel Vetter5a598c92013-08-14 15:08:05 +02001257 int i;
1258
1259 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001260 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001261 buffers->mode->set_bo(buffers, buffers->src[i], 0xabcdabcd);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001262 for (i = 0; i < buffers->count; i++)
Chris Wilson37f4da02016-01-27 13:02:35 +00001263 do_copy_func(buffers, buffers->dst[i], buffers->src[i]);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001264 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001265 do_copy_func(buffers, buffers->spare, buffers->dst[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001266 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001267 for (i = buffers->count; i--; )
Chris Wilson37f4da02016-01-27 13:02:35 +00001268 buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xabcdabcd);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001269 igt_post_hang_ring(fd, hang);
Daniel Vetter5a598c92013-08-14 15:08:05 +02001270}
1271
Chris Wilson99b5ee82015-01-22 10:03:45 +00001272typedef void (*do_test)(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001273 do_copy do_copy_func,
1274 do_hang do_hang_func);
Daniel Vetterec283d62013-08-14 15:18:37 +02001275
Chris Wilson99b5ee82015-01-22 10:03:45 +00001276typedef void (*run_wrap)(struct buffers *buffers,
Chris Wilson59c55622014-08-29 13:11:37 +01001277 do_test do_test_func,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001278 do_copy do_copy_func,
1279 do_hang do_hang_func);
Daniel Vetterec283d62013-08-14 15:18:37 +02001280
Chris Wilson99b5ee82015-01-22 10:03:45 +00001281static void run_single(struct buffers *buffers,
Chris Wilson59c55622014-08-29 13:11:37 +01001282 do_test do_test_func,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001283 do_copy do_copy_func,
1284 do_hang do_hang_func)
Daniel Vetterec283d62013-08-14 15:18:37 +02001285{
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001286 pass = 0;
Chris Wilson99b5ee82015-01-22 10:03:45 +00001287 do_test_func(buffers, do_copy_func, do_hang_func);
Chris Wilson5b675f72016-01-22 17:33:40 +00001288 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
Daniel Vetterec283d62013-08-14 15:18:37 +02001289}
1290
Chris Wilson99b5ee82015-01-22 10:03:45 +00001291static void run_interruptible(struct buffers *buffers,
Chris Wilson59c55622014-08-29 13:11:37 +01001292 do_test do_test_func,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001293 do_copy do_copy_func,
1294 do_hang do_hang_func)
Daniel Vetterec283d62013-08-14 15:18:37 +02001295{
Chris Wilson1c61c0f2016-01-08 10:51:09 +00001296 pass = 0;
Daniel Vetterd7050f92016-05-11 17:06:28 +02001297 igt_while_interruptible(true)
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001298 do_test_func(buffers, do_copy_func, do_hang_func);
Chris Wilson5b675f72016-01-22 17:33:40 +00001299 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
Daniel Vetterec283d62013-08-14 15:18:37 +02001300}
1301
Chris Wilson46456302016-01-22 19:29:07 +00001302static void run_child(struct buffers *buffers,
1303 do_test do_test_func,
1304 do_copy do_copy_func,
1305 do_hang do_hang_func)
1306
1307{
Chris Wilson69ecede2016-01-22 22:14:33 +00001308 /* We inherit the buffers from the parent, but the bufmgr/batch
1309 * needs to be local as the cache of reusable itself will be COWed,
1310 * leading to the child closing an object without the parent knowing.
1311 */
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001312 pass = 0;
Chris Wilsona1b47ef2016-01-27 19:44:16 +00001313 igt_fork(child, 1)
Chris Wilson46456302016-01-22 19:29:07 +00001314 do_test_func(buffers, do_copy_func, do_hang_func);
Chris Wilson46456302016-01-22 19:29:07 +00001315 igt_waitchildren();
1316 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
1317}
1318
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001319static void __run_forked(struct buffers *buffers,
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001320 int num_children, int loops, bool interrupt,
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001321 do_test do_test_func,
1322 do_copy do_copy_func,
1323 do_hang do_hang_func)
1324
Daniel Vetterec283d62013-08-14 15:18:37 +02001325{
Chris Wilsonc46f3c32016-04-10 20:44:58 +01001326 /* purge the libdrm caches before cloing the process */
1327 buffers_reset(buffers, true);
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001328
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001329 igt_fork(child, num_children) {
Chris Wilson459ff6b2016-04-20 07:49:02 +01001330 int num_buffers;
1331
Daniel Vettercd1f2202013-08-29 10:06:51 +02001332 /* recreate process local variables */
Micah Fedkec81d2932015-07-22 21:54:02 +00001333 fd = drm_open_driver(DRIVER_INTEL);
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301334
Chris Wilson459ff6b2016-04-20 07:49:02 +01001335 num_buffers = buffers->num_buffers / num_children;
1336 num_buffers += MIN_BUFFERS;
1337 if (num_buffers < buffers->num_buffers)
1338 buffers->num_buffers = num_buffers;
Chris Wilsonc46f3c32016-04-10 20:44:58 +01001339
1340 buffers_reset(buffers, true);
Chris Wilson094e0cb2016-03-01 13:22:03 +00001341 buffers_create(buffers);
Daniel Vettercd1f2202013-08-29 10:06:51 +02001342
Daniel Vetterd7050f92016-05-11 17:06:28 +02001343 igt_while_interruptible(interrupt) {
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001344 for (pass = 0; pass < loops; pass++)
1345 do_test_func(buffers,
1346 do_copy_func,
1347 do_hang_func);
1348 }
1349 }
Daniel Vettercd1f2202013-08-29 10:06:51 +02001350 igt_waitchildren();
Chris Wilson5b675f72016-01-22 17:33:40 +00001351 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
Daniel Vetterec283d62013-08-14 15:18:37 +02001352}
Daniel Vetter5a598c92013-08-14 15:08:05 +02001353
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001354static void run_forked(struct buffers *buffers,
1355 do_test do_test_func,
1356 do_copy do_copy_func,
1357 do_hang do_hang_func)
1358{
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001359 const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
1360 __run_forked(buffers, ncpus, ncpus, false,
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001361 do_test_func, do_copy_func, do_hang_func);
1362}
1363
1364static void run_bomb(struct buffers *buffers,
1365 do_test do_test_func,
1366 do_copy do_copy_func,
1367 do_hang do_hang_func)
1368{
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001369 const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
1370 __run_forked(buffers, 8*ncpus, 2, true,
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001371 do_test_func, do_copy_func, do_hang_func);
1372}
1373
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301374static void cpu_require(void)
1375{
1376 bit17_require();
1377}
1378
1379static void gtt_require(void)
1380{
1381}
1382
Chris Wilson08188752014-09-03 13:38:30 +01001383static void bcs_require(void)
1384{
1385}
1386
1387static void rcs_require(void)
1388{
1389 igt_require(rendercopy);
1390}
1391
Daniel Vetter5a598c92013-08-14 15:08:05 +02001392static void
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001393run_mode(const char *prefix,
1394 const struct create *create,
1395 const struct access_mode *mode,
1396 const struct size *size,
1397 const int num_buffers,
1398 const char *suffix,
1399 run_wrap run_wrap_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +02001400{
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301401 const struct {
Chris Wilson59c55622014-08-29 13:11:37 +01001402 const char *prefix;
1403 do_copy copy;
Chris Wilson08188752014-09-03 13:38:30 +01001404 void (*require)(void);
Chris Wilson59c55622014-08-29 13:11:37 +01001405 } pipelines[] = {
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301406 { "cpu", cpu_copy_bo, cpu_require },
1407 { "gtt", gtt_copy_bo, gtt_require },
1408 { "wc", wc_copy_bo, wc_require },
Daniel Vetter3e9b4e32015-02-06 23:10:26 +01001409 { "blt", blt_copy_bo, bcs_require },
1410 { "render", render_copy_bo, rcs_require },
Chris Wilson59c55622014-08-29 13:11:37 +01001411 { NULL, NULL }
Chris Wilson77633492015-03-26 08:11:43 +00001412 }, *pskip = pipelines + 3, *p;
Chris Wilson16bafdf2014-09-04 09:26:24 +01001413 const struct {
1414 const char *suffix;
1415 do_hang hang;
Chris Wilson16bafdf2014-09-04 09:26:24 +01001416 } hangs[] = {
Chris Wilson92caf132015-12-16 09:23:56 +00001417 { "", no_hang },
1418 { "-hang-blt", bcs_hang },
1419 { "-hang-render", rcs_hang },
Chris Wilsonfcb4cfa2016-03-20 11:38:26 +00001420 { "-hang-all", all_hang },
Chris Wilson16bafdf2014-09-04 09:26:24 +01001421 { NULL, NULL },
1422 }, *h;
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001423 struct buffers buffers;
1424
1425 igt_fixture
1426 buffers_init(&buffers, prefix, create, mode,
1427 size, num_buffers,
1428 fd, run_wrap_func != run_child);
Daniel Vetter5a598c92013-08-14 15:08:05 +02001429
Chris Wilson16bafdf2014-09-04 09:26:24 +01001430 for (h = hangs; h->suffix; h++) {
Chris Wilson77633492015-03-26 08:11:43 +00001431 if (!all && *h->suffix)
1432 continue;
1433
Chris Wilson6867b872016-03-24 07:57:30 +00001434 if (!*h->suffix)
1435 igt_fork_hang_detector(fd);
1436
Chris Wilson77633492015-03-26 08:11:43 +00001437 for (p = all ? pipelines : pskip; p->prefix; p++) {
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001438 igt_fixture p->require();
Chris Wilson16bafdf2014-09-04 09:26:24 +01001439
Chris Wilson8bf09f32015-12-17 09:16:42 +00001440 igt_subtest_f("%s-%s-%s-sanitycheck0%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson094e0cb2016-03-01 13:22:03 +00001441 buffers_create(&buffers);
Chris Wilson8bf09f32015-12-17 09:16:42 +00001442 run_wrap_func(&buffers, do_basic0,
1443 p->copy, h->hang);
1444 }
1445
1446 igt_subtest_f("%s-%s-%s-sanitycheck1%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson094e0cb2016-03-01 13:22:03 +00001447 buffers_create(&buffers);
Chris Wilson8bf09f32015-12-17 09:16:42 +00001448 run_wrap_func(&buffers, do_basic1,
1449 p->copy, h->hang);
1450 }
1451
1452 igt_subtest_f("%s-%s-%s-sanitycheckN%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson094e0cb2016-03-01 13:22:03 +00001453 buffers_create(&buffers);
Chris Wilson8bf09f32015-12-17 09:16:42 +00001454 run_wrap_func(&buffers, do_basicN,
Chris Wilson197db862015-12-09 20:54:10 +00001455 p->copy, h->hang);
1456 }
1457
Chris Wilson16bafdf2014-09-04 09:26:24 +01001458 /* try to overwrite the source values */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001459 igt_subtest_f("%s-%s-%s-overwrite-source-one%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson094e0cb2016-03-01 13:22:03 +00001460 buffers_create(&buffers);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001461 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001462 do_overwrite_source__one,
1463 p->copy, h->hang);
1464 }
1465
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001466 igt_subtest_f("%s-%s-%s-overwrite-source%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson094e0cb2016-03-01 13:22:03 +00001467 buffers_create(&buffers);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001468 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001469 do_overwrite_source,
1470 p->copy, h->hang);
1471 }
Chris Wilsona1726762015-03-16 16:29:57 +00001472
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001473 igt_subtest_f("%s-%s-%s-overwrite-source-read-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson094e0cb2016-03-01 13:22:03 +00001474 buffers_create(&buffers);
Chris Wilsona1726762015-03-16 16:29:57 +00001475 run_wrap_func(&buffers,
1476 do_overwrite_source_read_bcs,
1477 p->copy, h->hang);
1478 }
1479
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001480 igt_subtest_f("%s-%s-%s-overwrite-source-read-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilsona1726762015-03-16 16:29:57 +00001481 igt_require(rendercopy);
Chris Wilson094e0cb2016-03-01 13:22:03 +00001482 buffers_create(&buffers);
Chris Wilsona1726762015-03-16 16:29:57 +00001483 run_wrap_func(&buffers,
1484 do_overwrite_source_read_rcs,
1485 p->copy, h->hang);
1486 }
1487
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001488 igt_subtest_f("%s-%s-%s-overwrite-source-rev%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson094e0cb2016-03-01 13:22:03 +00001489 buffers_create(&buffers);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001490 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001491 do_overwrite_source__rev,
1492 p->copy, h->hang);
1493 }
1494
Chris Wilsona72d4052015-03-18 14:15:22 +00001495 /* try to intermix copies with GPU copies*/
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001496 igt_subtest_f("%s-%s-%s-intermix-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilsona72d4052015-03-18 14:15:22 +00001497 igt_require(rendercopy);
Chris Wilson094e0cb2016-03-01 13:22:03 +00001498 buffers_create(&buffers);
Chris Wilsona72d4052015-03-18 14:15:22 +00001499 run_wrap_func(&buffers,
1500 do_intermix_rcs,
1501 p->copy, h->hang);
1502 }
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001503 igt_subtest_f("%s-%s-%s-intermix-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilsona72d4052015-03-18 14:15:22 +00001504 igt_require(rendercopy);
Chris Wilson094e0cb2016-03-01 13:22:03 +00001505 buffers_create(&buffers);
Chris Wilsona72d4052015-03-18 14:15:22 +00001506 run_wrap_func(&buffers,
1507 do_intermix_bcs,
1508 p->copy, h->hang);
1509 }
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001510 igt_subtest_f("%s-%s-%s-intermix-both%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilsona72d4052015-03-18 14:15:22 +00001511 igt_require(rendercopy);
Chris Wilson094e0cb2016-03-01 13:22:03 +00001512 buffers_create(&buffers);
Chris Wilsona72d4052015-03-18 14:15:22 +00001513 run_wrap_func(&buffers,
1514 do_intermix_both,
1515 p->copy, h->hang);
1516 }
1517
Chris Wilson16bafdf2014-09-04 09:26:24 +01001518 /* try to read the results before the copy completes */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001519 igt_subtest_f("%s-%s-%s-early-read%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson094e0cb2016-03-01 13:22:03 +00001520 buffers_create(&buffers);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001521 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001522 do_early_read,
1523 p->copy, h->hang);
1524 }
1525
Chris Wilson35b0ac92015-03-16 11:55:46 +00001526 /* concurrent reads */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001527 igt_subtest_f("%s-%s-%s-read-read-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson094e0cb2016-03-01 13:22:03 +00001528 buffers_create(&buffers);
Chris Wilson35b0ac92015-03-16 11:55:46 +00001529 run_wrap_func(&buffers,
1530 do_read_read_bcs,
1531 p->copy, h->hang);
1532 }
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001533 igt_subtest_f("%s-%s-%s-read-read-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson35b0ac92015-03-16 11:55:46 +00001534 igt_require(rendercopy);
Chris Wilson094e0cb2016-03-01 13:22:03 +00001535 buffers_create(&buffers);
Chris Wilson35b0ac92015-03-16 11:55:46 +00001536 run_wrap_func(&buffers,
1537 do_read_read_rcs,
1538 p->copy, h->hang);
1539 }
1540
Chris Wilson0c266522015-11-11 16:37:16 +00001541 /* split copying between rings */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001542 igt_subtest_f("%s-%s-%s-write-read-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson094e0cb2016-03-01 13:22:03 +00001543 buffers_create(&buffers);
Chris Wilson0c266522015-11-11 16:37:16 +00001544 run_wrap_func(&buffers,
1545 do_write_read_bcs,
1546 p->copy, h->hang);
1547 }
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001548 igt_subtest_f("%s-%s-%s-write-read-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson0c266522015-11-11 16:37:16 +00001549 igt_require(rendercopy);
Chris Wilson094e0cb2016-03-01 13:22:03 +00001550 buffers_create(&buffers);
Chris Wilson0c266522015-11-11 16:37:16 +00001551 run_wrap_func(&buffers,
1552 do_write_read_rcs,
1553 p->copy, h->hang);
1554 }
1555
Chris Wilson16bafdf2014-09-04 09:26:24 +01001556 /* and finally try to trick the kernel into loosing the pending write */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001557 igt_subtest_f("%s-%s-%s-gpu-read-after-write%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson094e0cb2016-03-01 13:22:03 +00001558 buffers_create(&buffers);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001559 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001560 do_gpu_read_after_write,
1561 p->copy, h->hang);
1562 }
Chris Wilson08188752014-09-03 13:38:30 +01001563 }
Chris Wilson6867b872016-03-24 07:57:30 +00001564
1565 if (!*h->suffix)
1566 igt_stop_hang_detector();
Chris Wilson59c55622014-08-29 13:11:37 +01001567 }
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001568
1569 igt_fixture
1570 buffers_fini(&buffers);
Daniel Vetter5a598c92013-08-14 15:08:05 +02001571}
Daniel Vetter43779e32013-08-14 14:50:50 +02001572
1573static void
Chris Wilson094e0cb2016-03-01 13:22:03 +00001574run_modes(const char *style,
Chris Wilson4eba8e22016-03-18 10:44:31 +00001575 const struct create *create,
Chris Wilson094e0cb2016-03-01 13:22:03 +00001576 const struct access_mode *mode,
Chris Wilson5d669bf2016-03-18 14:44:53 +00001577 const struct size *size,
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001578 const int num)
Daniel Vetter43779e32013-08-14 14:50:50 +02001579{
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001580 const struct wrap {
1581 const char *suffix;
1582 run_wrap func;
1583 } wrappers[] = {
1584 { "", run_single },
1585 { "-child", run_child },
1586 { "-forked", run_forked },
1587 { "-interruptible", run_interruptible },
1588 { "-bomb", run_bomb },
1589 { NULL },
1590 };
Daniel Vetter3dba47e2013-08-06 22:27:37 +02001591
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001592 while (mode->name) {
1593 igt_subtest_group {
1594 igt_fixture {
1595 if (mode->require)
Chris Wilsone85613b2016-03-19 14:01:38 +00001596 mode->require(create, num);
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001597 }
1598
1599 for (const struct wrap *w = wrappers; w->suffix; w++) {
1600 run_mode(style, create, mode, size, num,
1601 w->suffix, w->func);
1602 }
Daniel Vetter96650732016-03-18 21:55:00 +01001603 }
1604
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001605 mode++;
Daniel Vetter96650732016-03-18 21:55:00 +01001606 }
Daniel Vetter43779e32013-08-14 14:50:50 +02001607}
1608
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001609static unsigned
Chris Wilsonf338e982016-03-19 13:10:17 +00001610num_buffers(uint64_t max,
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001611 const struct size *s,
1612 const struct create *c,
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001613 unsigned allow_mem)
1614{
1615 unsigned size = 4*s->width*s->height;
Chris Wilson9e7e7c32016-04-11 09:17:33 +01001616 uint64_t n;
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001617
Chris Wilson9e7e7c32016-04-11 09:17:33 +01001618 igt_assert(size);
1619 n = max / (2*size);
1620 n += MIN_BUFFERS;
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001621
Chris Wilson9e7e7c32016-04-11 09:17:33 +01001622 igt_require(n < INT32_MAX);
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001623 igt_require(set_max_map_count(2*n));
1624
Chris Wilsone85613b2016-03-19 14:01:38 +00001625 if (c->require)
1626 c->require(c, n);
1627
Chris Wilson6867b872016-03-24 07:57:30 +00001628 intel_require_memory(2*n, size, allow_mem);
1629
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001630 return n;
1631}
1632
Chris Wilson3d8af562016-03-20 10:49:54 +00001633static bool allow_unlimited_files(void)
1634{
1635 struct rlimit rlim;
1636 unsigned nofile_rlim = 1024*1024;
1637
1638 FILE *file = fopen("/proc/sys/fs/file-max", "r");
1639 if (file) {
1640 igt_assert(fscanf(file, "%u", &nofile_rlim) == 1);
1641 igt_info("System limit for open files is %u\n", nofile_rlim);
1642 fclose(file);
1643 }
1644
1645 if (getrlimit(RLIMIT_NOFILE, &rlim))
1646 return false;
1647
1648 rlim.rlim_cur = nofile_rlim;
1649 rlim.rlim_max = nofile_rlim;
1650 return setrlimit(RLIMIT_NOFILE, &rlim) == 0;
1651}
1652
Daniel Vetter071e9ca2013-10-31 16:23:26 +01001653igt_main
Daniel Vetter43779e32013-08-14 14:50:50 +02001654{
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001655 const struct access_mode modes[] = {
1656 {
1657 .name = "prw",
1658 .create_bo = unmapped_create_bo,
1659 .set_bo = prw_set_bo,
1660 .cmp_bo = prw_cmp_bo,
1661 .release_bo = nop_release_bo,
1662 },
1663 {
1664 .name = "partial",
1665 .create_bo = unmapped_create_bo,
1666 .set_bo = partial_set_bo,
1667 .cmp_bo = partial_cmp_bo,
1668 .release_bo = nop_release_bo,
1669 },
1670 {
1671 .name = "cpu",
1672 .create_bo = unmapped_create_bo,
1673 .require = create_cpu_require,
1674 .set_bo = cpu_set_bo,
1675 .cmp_bo = cpu_cmp_bo,
1676 .release_bo = nop_release_bo,
1677 },
1678 {
1679 .name = "snoop",
1680 .create_bo = snoop_create_bo,
1681 .require = create_snoop_require,
1682 .set_bo = cpu_set_bo,
1683 .cmp_bo = cpu_cmp_bo,
1684 .release_bo = nop_release_bo,
1685 },
1686 {
1687 .name = "userptr",
1688 .create_bo = userptr_create_bo,
1689 .require = create_userptr_require,
1690 .set_bo = userptr_set_bo,
1691 .cmp_bo = userptr_cmp_bo,
1692 .release_bo = userptr_release_bo,
1693 },
1694 {
1695 .name = "dmabuf",
1696 .create_bo = dmabuf_create_bo,
1697 .require = create_dmabuf_require,
1698 .set_bo = dmabuf_set_bo,
1699 .cmp_bo = dmabuf_cmp_bo,
1700 .release_bo = dmabuf_release_bo,
1701 },
1702 {
1703 .name = "gtt",
1704 .create_bo = gtt_create_bo,
1705 .set_bo = gtt_set_bo,
1706 .cmp_bo = gtt_cmp_bo,
1707 .release_bo = nop_release_bo,
1708 },
1709 {
1710 .name = "gttX",
1711 .create_bo = gttX_create_bo,
1712 .set_bo = gtt_set_bo,
1713 .cmp_bo = gtt_cmp_bo,
1714 .release_bo = nop_release_bo,
1715 },
1716 {
1717 .name = "wc",
1718 .require = wc_create_require,
1719 .create_bo = wc_create_bo,
1720 .set_bo = gtt_set_bo,
1721 .cmp_bo = gtt_cmp_bo,
1722 .release_bo = wc_release_bo,
1723 },
1724 {
1725 .name = "gpu",
1726 .create_bo = gpu_create_bo,
1727 .set_bo = gpu_set_bo,
1728 .cmp_bo = gpu_cmp_bo,
1729 .release_bo = nop_release_bo,
1730 },
1731 {
1732 .name = "gpuX",
1733 .create_bo = gpuX_create_bo,
1734 .set_bo = gpu_set_bo,
1735 .cmp_bo = gpu_cmp_bo,
1736 .release_bo = nop_release_bo,
1737 },
1738 { NULL },
1739 };
Chris Wilson4eba8e22016-03-18 10:44:31 +00001740 const struct create create[] = {
1741 { "", can_create_normal, create_normal_bo},
1742#if HAVE_CREATE_PRIVATE
1743 { "private-", can_create_private, create_private_bo},
1744#endif
1745#if HAVE_CREATE_STOLEN
1746 { "stolen-", can_create_stolen, create_stolen_bo},
1747#endif
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001748 { NULL, NULL }
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001749 };
Chris Wilson5d669bf2016-03-18 14:44:53 +00001750 const struct size sizes[] = {
1751 { "4KiB", 128, 8 },
1752 { "256KiB", 128, 128 },
1753 { "1MiB", 512, 512 },
1754 { "16MiB", 2048, 2048 },
1755 { NULL}
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001756 };
Chris Wilson42291f22016-01-07 11:19:26 +00001757 uint64_t pin_sz = 0;
1758 void *pinned = NULL;
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001759 char name[80];
1760 int count = 0;
Daniel Vetter43779e32013-08-14 14:50:50 +02001761
Daniel Vetter43779e32013-08-14 14:50:50 +02001762 igt_skip_on_simulation();
1763
Chris Wilson77633492015-03-26 08:11:43 +00001764 if (strstr(igt_test_name(), "all"))
1765 all = true;
1766
Daniel Vetter2dbd9982013-08-14 15:48:54 +02001767 igt_fixture {
Chris Wilson98dcf2f2016-03-25 00:50:45 +00001768 allow_unlimited_files();
Chris Wilson3d8af562016-03-20 10:49:54 +00001769
Micah Fedkec81d2932015-07-22 21:54:02 +00001770 fd = drm_open_driver(DRIVER_INTEL);
Chris Wilson5b675f72016-01-22 17:33:40 +00001771 intel_detect_and_clear_missed_interrupts(fd);
Chris Wilson6c428a62014-08-29 13:11:37 +01001772 devid = intel_get_drm_devid(fd);
1773 gen = intel_gen(devid);
Chris Wilson59c55622014-08-29 13:11:37 +01001774 rendercopy = igt_get_render_copyfunc(devid);
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001775 }
Daniel Vetter43779e32013-08-14 14:50:50 +02001776
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001777 for (const struct create *c = create; c->name; c++) {
1778 for (const struct size *s = sizes; s->name; s++) {
1779 /* Minimum test set */
Chris Wilsonf338e982016-03-19 13:10:17 +00001780 snprintf(name, sizeof(name), "%s%s-%s",
1781 c->name, s->name, "tiny");
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001782 igt_subtest_group {
1783 igt_fixture {
Chris Wilsonf338e982016-03-19 13:10:17 +00001784 count = num_buffers(0, s, c, CHECK_RAM);
Chris Wilson5d669bf2016-03-18 14:44:53 +00001785 }
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001786 run_modes(name, c, modes, s, count);
Chris Wilson5d669bf2016-03-18 14:44:53 +00001787 }
1788
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001789 /* "Average" test set */
Chris Wilsonf338e982016-03-19 13:10:17 +00001790 snprintf(name, sizeof(name), "%s%s-%s",
1791 c->name, s->name, "small");
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001792 igt_subtest_group {
1793 igt_fixture {
Chris Wilsonf338e982016-03-19 13:10:17 +00001794 count = num_buffers(gem_mappable_aperture_size()/4,
1795 s, c, CHECK_RAM);
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001796 }
1797 run_modes(name, c, modes, s, count);
1798 }
Chris Wilson5d669bf2016-03-18 14:44:53 +00001799
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001800 /* Use the entire mappable aperture */
Chris Wilsonf338e982016-03-19 13:10:17 +00001801 snprintf(name, sizeof(name), "%s%s-%s",
1802 c->name, s->name, "thrash");
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001803 igt_subtest_group {
1804 igt_fixture {
Chris Wilsonf338e982016-03-19 13:10:17 +00001805 count = num_buffers(gem_mappable_aperture_size(),
1806 s, c, CHECK_RAM);
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001807 }
1808 run_modes(name, c, modes, s, count);
1809 }
1810
1811 /* Use the entire global GTT */
Chris Wilsonf338e982016-03-19 13:10:17 +00001812 snprintf(name, sizeof(name), "%s%s-%s",
1813 c->name, s->name, "global");
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001814 igt_subtest_group {
1815 igt_fixture {
Chris Wilsonf338e982016-03-19 13:10:17 +00001816 count = num_buffers(gem_global_aperture_size(fd),
1817 s, c, CHECK_RAM);
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001818 }
1819 run_modes(name, c, modes, s, count);
1820 }
1821
1822 /* Use the entire per-process GTT */
Chris Wilsonf338e982016-03-19 13:10:17 +00001823 snprintf(name, sizeof(name), "%s%s-%s",
1824 c->name, s->name, "full");
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001825 igt_subtest_group {
1826 igt_fixture {
Chris Wilsonf338e982016-03-19 13:10:17 +00001827 count = num_buffers(gem_aperture_size(fd),
1828 s, c, CHECK_RAM);
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001829 }
1830 run_modes(name, c, modes, s, count);
1831 }
1832
1833 /* Use the entire mappable aperture, force swapping */
Chris Wilsonf338e982016-03-19 13:10:17 +00001834 snprintf(name, sizeof(name), "%s%s-%s",
1835 c->name, s->name, "swap");
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001836 igt_subtest_group {
1837 igt_fixture {
1838 if (intel_get_avail_ram_mb() > gem_mappable_aperture_size()/(1024*1024)) {
1839 pin_sz = intel_get_avail_ram_mb() - gem_mappable_aperture_size()/(1024*1024);
1840
1841 igt_debug("Pinning %lld MiB\n", (long long)pin_sz);
1842 pin_sz *= 1024 * 1024;
1843
1844 if (posix_memalign(&pinned, 4096, pin_sz) ||
1845 mlock(pinned, pin_sz) ||
1846 madvise(pinned, pin_sz, MADV_DONTFORK)) {
1847 free(pinned);
1848 pinned = NULL;
1849 }
1850 igt_require(pinned);
1851 }
1852
Chris Wilsonf338e982016-03-19 13:10:17 +00001853 count = num_buffers(gem_mappable_aperture_size(),
1854 s, c, CHECK_RAM | CHECK_SWAP);
Chris Wilsonc2248ef2016-03-19 13:10:17 +00001855 }
1856 run_modes(name, c, modes, s, count);
1857
1858 igt_fixture {
1859 if (pinned) {
1860 munlock(pinned, pin_sz);
1861 free(pinned);
1862 pinned = NULL;
1863 }
Chris Wilson5d669bf2016-03-18 14:44:53 +00001864 }
Chris Wilson42291f22016-01-07 11:19:26 +00001865 }
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001866 }
Daniel Vetter2dbd9982013-08-14 15:48:54 +02001867 }
Daniel Vetter3dba47e2013-08-06 22:27:37 +02001868}