blob: 18206c6e68d1210091fbdad031da67d879120215 [file] [log] [blame]
Daniel Vetter3dba47e2013-08-06 22:27:37 +02001/*
2 * Copyright © 2009,2012,2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 * Daniel Vetter <daniel.vetter@ffwll.ch>
27 *
28 */
29
Chris Wilson77633492015-03-26 08:11:43 +000030/** @file gem_concurrent.c
Daniel Vetter3dba47e2013-08-06 22:27:37 +020031 *
Chris Wilson77633492015-03-26 08:11:43 +000032 * This is a test of pread/pwrite/mmap behavior when writing to active
Daniel Vetter3dba47e2013-08-06 22:27:37 +020033 * buffers.
34 *
35 * Based on gem_gtt_concurrent_blt.
36 */
37
Thomas Wood804e11f2015-08-17 17:57:43 +010038#include "igt.h"
Daniel Vetter3dba47e2013-08-06 22:27:37 +020039#include <stdlib.h>
40#include <stdio.h>
41#include <string.h>
Daniel Vetter3dba47e2013-08-06 22:27:37 +020042#include <fcntl.h>
43#include <inttypes.h>
44#include <errno.h>
45#include <sys/stat.h>
46#include <sys/time.h>
Chris Wilson99431a42013-08-14 11:03:34 +010047#include <sys/wait.h>
Daniel Vetterf5daeec2014-03-23 13:35:09 +010048
49#include <drm.h>
50
Daniel Vetter3dba47e2013-08-06 22:27:37 +020051#include "intel_bufmgr.h"
Daniel Vetter3dba47e2013-08-06 22:27:37 +020052
Chris Wilson77633492015-03-26 08:11:43 +000053IGT_TEST_DESCRIPTION("Test of pread/pwrite/mmap behavior when writing to active"
Thomas Woodb2ac2642014-11-28 11:02:44 +000054 " buffers.");
55
Chris Wilson0143d4f2016-01-21 09:53:50 +000056#define LOCAL_I915_GEM_USERPTR 0x33
57#define LOCAL_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_USERPTR, struct local_i915_gem_userptr)
58struct local_i915_gem_userptr {
59 uint64_t user_ptr;
60 uint64_t user_size;
61 uint32_t flags;
62 uint32_t handle;
63};
64
Chris Wilson6c428a62014-08-29 13:11:37 +010065int fd, devid, gen;
66struct intel_batchbuffer *batch;
Chris Wilson77633492015-03-26 08:11:43 +000067int all;
Chris Wilson1c61c0f2016-01-08 10:51:09 +000068int pass;
Chris Wilson6c428a62014-08-29 13:11:37 +010069
Chris Wilson571b8762016-01-08 11:51:56 +000070#define MIN_BUFFERS 3
71
Daniel Vetter3dba47e2013-08-06 22:27:37 +020072static void
Chris Wilsonf2a045f2015-01-02 16:33:33 +053073nop_release_bo(drm_intel_bo *bo)
74{
75 drm_intel_bo_unreference(bo);
76}
77
78static void
Daniel Vetter43779e32013-08-14 14:50:50 +020079prw_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
Daniel Vetter3dba47e2013-08-06 22:27:37 +020080{
Chris Wilson0b4c33f2014-01-26 14:36:32 +000081 int size = width * height, i;
82 uint32_t *tmp;
Daniel Vetter3dba47e2013-08-06 22:27:37 +020083
Chris Wilson0b4c33f2014-01-26 14:36:32 +000084 tmp = malloc(4*size);
85 if (tmp) {
86 for (i = 0; i < size; i++)
87 tmp[i] = val;
88 drm_intel_bo_subdata(bo, 0, 4*size, tmp);
89 free(tmp);
90 } else {
91 for (i = 0; i < size; i++)
92 drm_intel_bo_subdata(bo, 4*i, 4, &val);
93 }
Daniel Vetter3dba47e2013-08-06 22:27:37 +020094}
95
96static void
Chris Wilsonc12f2922014-08-31 16:14:40 +010097prw_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
Daniel Vetter3dba47e2013-08-06 22:27:37 +020098{
Chris Wilson0b4c33f2014-01-26 14:36:32 +000099 int size = width * height, i;
Chris Wilsonc12f2922014-08-31 16:14:40 +0100100 uint32_t *vaddr;
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200101
Chris Wilsonc12f2922014-08-31 16:14:40 +0100102 do_or_die(drm_intel_bo_map(tmp, true));
103 do_or_die(drm_intel_bo_get_subdata(bo, 0, 4*size, tmp->virtual));
104 vaddr = tmp->virtual;
105 for (i = 0; i < size; i++)
106 igt_assert_eq_u32(vaddr[i], val);
107 drm_intel_bo_unmap(tmp);
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200108}
109
Chris Wilson1c61c0f2016-01-08 10:51:09 +0000110#define pixel(y, width) ((y)*(width) + (((y) + pass)%(width)))
111
112static void
113partial_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
114{
115 int y;
116
117 for (y = 0; y < height; y++)
118 do_or_die(drm_intel_bo_subdata(bo, 4*pixel(y, width), 4, &val));
119}
120
121static void
122partial_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
123{
124 int y;
125
126 for (y = 0; y < height; y++) {
127 uint32_t buf;
128 do_or_die(drm_intel_bo_get_subdata(bo, 4*pixel(y, width), 4, &buf));
129 igt_assert_eq_u32(buf, val);
130 }
131}
132
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200133static drm_intel_bo *
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000134create_normal_bo(drm_intel_bufmgr *bufmgr, uint64_t size)
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200135{
136 drm_intel_bo *bo;
137
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000138 bo = drm_intel_bo_alloc(bufmgr, "bo", size, 0);
Daniel Vetter83440952013-08-13 12:35:58 +0200139 igt_assert(bo);
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200140
141 return bo;
142}
143
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000144static bool can_create_normal(void)
145{
146 return true;
147}
148
149static drm_intel_bo *
150create_private_bo(drm_intel_bufmgr *bufmgr, uint64_t size)
151{
152 drm_intel_bo *bo;
153 uint32_t handle;
154
155 /* XXX gem_create_with_flags(fd, size, I915_CREATE_PRIVATE); */
156
157 handle = gem_create(fd, size);
158 bo = gem_handle_to_libdrm_bo(bufmgr, fd, "stolen", handle);
159 gem_close(fd, handle);
160
161 return bo;
162}
163
164static bool can_create_private(void)
165{
166 return false;
167}
168
169static drm_intel_bo *
170create_stolen_bo(drm_intel_bufmgr *bufmgr, uint64_t size)
171{
172 drm_intel_bo *bo;
173 uint32_t handle;
174
175 /* XXX gem_create_with_flags(fd, size, I915_CREATE_STOLEN); */
176
177 handle = gem_create(fd, size);
178 bo = gem_handle_to_libdrm_bo(bufmgr, fd, "stolen", handle);
179 gem_close(fd, handle);
180
181 return bo;
182}
183
184static bool can_create_stolen(void)
185{
186 /* XXX check num_buffers against available stolen */
187 return false;
188}
189
190static drm_intel_bo *
191(*create_func)(drm_intel_bufmgr *bufmgr, uint64_t size);
192
Chris Wilson42291f22016-01-07 11:19:26 +0000193static bool create_cpu_require(void)
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000194{
Chris Wilson42291f22016-01-07 11:19:26 +0000195 return create_func != create_stolen_bo;
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000196}
197
198static drm_intel_bo *
199unmapped_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
200{
201 return create_func(bufmgr, (uint64_t)4*width*height);
202}
203
Chris Wilson0143d4f2016-01-21 09:53:50 +0000204static bool create_snoop_require(void)
205{
206 if (!create_cpu_require())
207 return false;
208
209 return !gem_has_llc(fd);
210}
211
Chris Wilson46ec33e2015-10-20 14:40:50 +0100212static drm_intel_bo *
213snoop_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
214{
215 drm_intel_bo *bo;
216
217 igt_skip_on(gem_has_llc(fd));
218
219 bo = unmapped_create_bo(bufmgr, width, height);
220 gem_set_caching(fd, bo->handle, I915_CACHING_CACHED);
221 drm_intel_bo_disable_reuse(bo);
222
223 return bo;
224}
225
Chris Wilson0143d4f2016-01-21 09:53:50 +0000226static bool create_userptr_require(void)
227{
228 static int found = -1;
229 if (found < 0) {
230 struct drm_i915_gem_userptr arg;
231
232 found = 0;
233
234 memset(&arg, 0, sizeof(arg));
235 arg.user_ptr = -4096ULL;
236 arg.user_size = 8192;
237 errno = 0;
238 drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &arg);
239 if (errno == EFAULT) {
240 igt_assert(posix_memalign((void **)&arg.user_ptr,
241 4096, arg.user_size) == 0);
242 found = drmIoctl(fd,
243 LOCAL_IOCTL_I915_GEM_USERPTR,
244 &arg) == 0;
245 free((void *)(uintptr_t)arg.user_ptr);
246 }
247
248 }
249 return found;
250}
251
252static drm_intel_bo *
253userptr_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
254{
255 struct local_i915_gem_userptr userptr;
256 drm_intel_bo *bo;
Chris Wilsond4a05bc2016-01-23 09:07:12 +0000257 void *ptr;
Chris Wilson0143d4f2016-01-21 09:53:50 +0000258
259 memset(&userptr, 0, sizeof(userptr));
260 userptr.user_size = width * height * 4;
261 userptr.user_size = (userptr.user_size + 4095) & -4096;
Chris Wilsond4a05bc2016-01-23 09:07:12 +0000262
263 ptr = mmap(NULL, userptr.user_size,
264 PROT_READ | PROT_WRITE, MAP_ANON | MAP_SHARED, -1, 0);
265 igt_assert(ptr != (void *)-1);
266 userptr.user_ptr = (uintptr_t)ptr;
Chris Wilson0143d4f2016-01-21 09:53:50 +0000267
268 do_or_die(drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr));
269 bo = gem_handle_to_libdrm_bo(bufmgr, fd, "userptr", userptr.handle);
270 bo->virtual = (void *)(uintptr_t)userptr.user_ptr;
271
272 return bo;
273}
274
275static void
276userptr_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
277{
278 int size = width * height;
279 uint32_t *vaddr = bo->virtual;
280
281 gem_set_domain(fd, bo->handle,
282 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
283 while (size--)
284 *vaddr++ = val;
285}
286
287static void
288userptr_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
289{
290 int size = width * height;
291 uint32_t *vaddr = bo->virtual;
292
293 gem_set_domain(fd, bo->handle,
294 I915_GEM_DOMAIN_CPU, 0);
295 while (size--)
296 igt_assert_eq_u32(*vaddr++, val);
297}
298
299static void
300userptr_release_bo(drm_intel_bo *bo)
301{
Chris Wilsond4a05bc2016-01-23 09:07:12 +0000302 munmap(bo->virtual, bo->size);
Chris Wilson0143d4f2016-01-21 09:53:50 +0000303 bo->virtual = NULL;
304
305 drm_intel_bo_unreference(bo);
306}
307
Daniel Vetter43779e32013-08-14 14:50:50 +0200308static void
309gtt_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200310{
Chris Wilson3e766b82014-09-26 07:55:49 +0100311 uint32_t *vaddr = bo->virtual;
Daniel Vetter43779e32013-08-14 14:50:50 +0200312 int size = width * height;
Daniel Vetter43779e32013-08-14 14:50:50 +0200313
314 drm_intel_gem_bo_start_gtt_access(bo, true);
Daniel Vetter43779e32013-08-14 14:50:50 +0200315 while (size--)
316 *vaddr++ = val;
317}
318
319static void
Chris Wilsonc12f2922014-08-31 16:14:40 +0100320gtt_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
Daniel Vetter43779e32013-08-14 14:50:50 +0200321{
Chris Wilson3e766b82014-09-26 07:55:49 +0100322 uint32_t *vaddr = bo->virtual;
323 int y;
Daniel Vetter43779e32013-08-14 14:50:50 +0200324
Chris Wilson3e766b82014-09-26 07:55:49 +0100325 /* GTT access is slow. So we just compare a few points */
Daniel Vetter43779e32013-08-14 14:50:50 +0200326 drm_intel_gem_bo_start_gtt_access(bo, false);
Chris Wilson3e766b82014-09-26 07:55:49 +0100327 for (y = 0; y < height; y++)
Chris Wilson1c61c0f2016-01-08 10:51:09 +0000328 igt_assert_eq_u32(vaddr[pixel(y, width)], val);
Daniel Vetter43779e32013-08-14 14:50:50 +0200329}
330
331static drm_intel_bo *
Chris Wilson86055df2014-08-29 17:36:29 +0100332map_bo(drm_intel_bo *bo)
Daniel Vetter43779e32013-08-14 14:50:50 +0200333{
Daniel Vetter43779e32013-08-14 14:50:50 +0200334 /* gtt map doesn't have a write parameter, so just keep the mapping
335 * around (to avoid the set_domain with the gtt write domain set) and
336 * manually tell the kernel when we start access the gtt. */
337 do_or_die(drm_intel_gem_bo_map_gtt(bo));
338
339 return bo;
340}
341
Chris Wilson86055df2014-08-29 17:36:29 +0100342static drm_intel_bo *
343tile_bo(drm_intel_bo *bo, int width)
344{
345 uint32_t tiling = I915_TILING_X;
346 uint32_t stride = width * 4;
347
348 do_or_die(drm_intel_bo_set_tiling(bo, &tiling, stride));
349
350 return bo;
351}
352
353static drm_intel_bo *
354gtt_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
355{
356 return map_bo(unmapped_create_bo(bufmgr, width, height));
357}
358
359static drm_intel_bo *
360gttX_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
361{
362 return tile_bo(gtt_create_bo(bufmgr, width, height), width);
363}
364
365static drm_intel_bo *
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530366wc_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
367{
368 drm_intel_bo *bo;
369
Daniel Vettera3e34ce2015-02-06 11:05:28 +0100370 gem_require_mmap_wc(fd);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530371
372 bo = unmapped_create_bo(bufmgr, width, height);
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300373 bo->virtual = __gem_mmap__wc(fd, bo->handle, 0, bo->size, PROT_READ | PROT_WRITE);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530374 return bo;
375}
376
377static void
378wc_release_bo(drm_intel_bo *bo)
379{
380 munmap(bo->virtual, bo->size);
381 bo->virtual = NULL;
382
383 nop_release_bo(bo);
384}
385
386static drm_intel_bo *
Chris Wilson86055df2014-08-29 17:36:29 +0100387gpu_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
388{
389 return unmapped_create_bo(bufmgr, width, height);
390}
391
Chris Wilson86055df2014-08-29 17:36:29 +0100392static drm_intel_bo *
393gpuX_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
394{
395 return tile_bo(gpu_create_bo(bufmgr, width, height), width);
396}
397
Daniel Vetter43779e32013-08-14 14:50:50 +0200398static void
399cpu_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
400{
401 int size = width * height;
402 uint32_t *vaddr;
403
404 do_or_die(drm_intel_bo_map(bo, true));
405 vaddr = bo->virtual;
406 while (size--)
407 *vaddr++ = val;
408 drm_intel_bo_unmap(bo);
409}
410
411static void
Chris Wilsonc12f2922014-08-31 16:14:40 +0100412cpu_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
Daniel Vetter43779e32013-08-14 14:50:50 +0200413{
414 int size = width * height;
415 uint32_t *vaddr;
416
417 do_or_die(drm_intel_bo_map(bo, false));
418 vaddr = bo->virtual;
419 while (size--)
Chris Wilson6c428a62014-08-29 13:11:37 +0100420 igt_assert_eq_u32(*vaddr++, val);
Daniel Vetter43779e32013-08-14 14:50:50 +0200421 drm_intel_bo_unmap(bo);
422}
423
Chris Wilson6c428a62014-08-29 13:11:37 +0100424static void
425gpu_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
426{
427 struct drm_i915_gem_relocation_entry reloc[1];
428 struct drm_i915_gem_exec_object2 gem_exec[2];
429 struct drm_i915_gem_execbuffer2 execbuf;
430 struct drm_i915_gem_pwrite gem_pwrite;
431 struct drm_i915_gem_create create;
432 uint32_t buf[10], *b;
Chris Wilson86055df2014-08-29 17:36:29 +0100433 uint32_t tiling, swizzle;
434
435 drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
Chris Wilson6c428a62014-08-29 13:11:37 +0100436
437 memset(reloc, 0, sizeof(reloc));
438 memset(gem_exec, 0, sizeof(gem_exec));
439 memset(&execbuf, 0, sizeof(execbuf));
440
441 b = buf;
442 *b++ = XY_COLOR_BLT_CMD_NOLEN |
443 ((gen >= 8) ? 5 : 4) |
444 COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB;
Chris Wilson86055df2014-08-29 17:36:29 +0100445 if (gen >= 4 && tiling) {
446 b[-1] |= XY_COLOR_BLT_TILED;
447 *b = width;
448 } else
449 *b = width << 2;
450 *b++ |= 0xf0 << 16 | 1 << 25 | 1 << 24;
Chris Wilson6c428a62014-08-29 13:11:37 +0100451 *b++ = 0;
452 *b++ = height << 16 | width;
453 reloc[0].offset = (b - buf) * sizeof(uint32_t);
454 reloc[0].target_handle = bo->handle;
455 reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
456 reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
457 *b++ = 0;
458 if (gen >= 8)
459 *b++ = 0;
460 *b++ = val;
461 *b++ = MI_BATCH_BUFFER_END;
462 if ((b - buf) & 1)
463 *b++ = 0;
464
465 gem_exec[0].handle = bo->handle;
466 gem_exec[0].flags = EXEC_OBJECT_NEEDS_FENCE;
467
468 create.handle = 0;
469 create.size = 4096;
470 drmIoctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
471 gem_exec[1].handle = create.handle;
472 gem_exec[1].relocation_count = 1;
473 gem_exec[1].relocs_ptr = (uintptr_t)reloc;
474
475 execbuf.buffers_ptr = (uintptr_t)gem_exec;
476 execbuf.buffer_count = 2;
477 execbuf.batch_len = (b - buf) * sizeof(buf[0]);
Chris Wilson86055df2014-08-29 17:36:29 +0100478 if (gen >= 6)
479 execbuf.flags = I915_EXEC_BLT;
Chris Wilson6c428a62014-08-29 13:11:37 +0100480
481 gem_pwrite.handle = gem_exec[1].handle;
482 gem_pwrite.offset = 0;
483 gem_pwrite.size = execbuf.batch_len;
484 gem_pwrite.data_ptr = (uintptr_t)buf;
Daniel Stonede7ccdd2015-10-01 14:16:48 +0100485 do_ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite);
486 do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
Chris Wilson6c428a62014-08-29 13:11:37 +0100487
488 drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &create.handle);
489}
490
491static void
Chris Wilsonc12f2922014-08-31 16:14:40 +0100492gpu_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
Chris Wilson6c428a62014-08-29 13:11:37 +0100493{
Chris Wilson99b5ee82015-01-22 10:03:45 +0000494 intel_blt_copy(batch,
495 bo, 0, 0, 4*width,
496 tmp, 0, 0, 4*width,
497 width, height, 32);
Chris Wilsonc12f2922014-08-31 16:14:40 +0100498 cpu_cmp_bo(tmp, val, width, height, NULL);
Chris Wilson6c428a62014-08-29 13:11:37 +0100499}
500
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530501const struct access_mode {
502 const char *name;
Chris Wilson42291f22016-01-07 11:19:26 +0000503 bool (*require)(void);
Daniel Vetter43779e32013-08-14 14:50:50 +0200504 void (*set_bo)(drm_intel_bo *bo, uint32_t val, int w, int h);
Chris Wilsonc12f2922014-08-31 16:14:40 +0100505 void (*cmp_bo)(drm_intel_bo *bo, uint32_t val, int w, int h, drm_intel_bo *tmp);
Chris Wilson86055df2014-08-29 17:36:29 +0100506 drm_intel_bo *(*create_bo)(drm_intel_bufmgr *bufmgr, int width, int height);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530507 void (*release_bo)(drm_intel_bo *bo);
508} access_modes[] = {
509 {
510 .name = "prw",
511 .set_bo = prw_set_bo,
512 .cmp_bo = prw_cmp_bo,
513 .create_bo = unmapped_create_bo,
514 .release_bo = nop_release_bo,
515 },
516 {
Chris Wilson1c61c0f2016-01-08 10:51:09 +0000517 .name = "partial",
518 .set_bo = partial_set_bo,
519 .cmp_bo = partial_cmp_bo,
520 .create_bo = unmapped_create_bo,
521 .release_bo = nop_release_bo,
522 },
523 {
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530524 .name = "cpu",
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000525 .require = create_cpu_require,
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530526 .set_bo = cpu_set_bo,
527 .cmp_bo = cpu_cmp_bo,
528 .create_bo = unmapped_create_bo,
529 .release_bo = nop_release_bo,
530 },
531 {
Chris Wilson46ec33e2015-10-20 14:40:50 +0100532 .name = "snoop",
Chris Wilson0143d4f2016-01-21 09:53:50 +0000533 .require = create_snoop_require,
Chris Wilson46ec33e2015-10-20 14:40:50 +0100534 .set_bo = cpu_set_bo,
535 .cmp_bo = cpu_cmp_bo,
536 .create_bo = snoop_create_bo,
537 .release_bo = nop_release_bo,
538 },
539 {
Chris Wilson0143d4f2016-01-21 09:53:50 +0000540 .name = "userptr",
541 .require = create_userptr_require,
542 .set_bo = userptr_set_bo,
543 .cmp_bo = userptr_cmp_bo,
544 .create_bo = userptr_create_bo,
545 .release_bo = userptr_release_bo,
546 },
547 {
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530548 .name = "gtt",
549 .set_bo = gtt_set_bo,
550 .cmp_bo = gtt_cmp_bo,
551 .create_bo = gtt_create_bo,
552 .release_bo = nop_release_bo,
553 },
554 {
555 .name = "gttX",
556 .set_bo = gtt_set_bo,
557 .cmp_bo = gtt_cmp_bo,
558 .create_bo = gttX_create_bo,
559 .release_bo = nop_release_bo,
560 },
561 {
562 .name = "wc",
563 .set_bo = gtt_set_bo,
564 .cmp_bo = gtt_cmp_bo,
565 .create_bo = wc_create_bo,
566 .release_bo = wc_release_bo,
567 },
568 {
569 .name = "gpu",
570 .set_bo = gpu_set_bo,
571 .cmp_bo = gpu_cmp_bo,
572 .create_bo = gpu_create_bo,
573 .release_bo = nop_release_bo,
574 },
575 {
576 .name = "gpuX",
577 .set_bo = gpu_set_bo,
578 .cmp_bo = gpu_cmp_bo,
579 .create_bo = gpuX_create_bo,
580 .release_bo = nop_release_bo,
581 },
Daniel Vetter43779e32013-08-14 14:50:50 +0200582};
583
Chris Wilson2d08e9e2015-12-11 09:25:03 +0000584int num_buffers;
Chris Wilson86055df2014-08-29 17:36:29 +0100585const int width = 512, height = 512;
Chris Wilson59c55622014-08-29 13:11:37 +0100586igt_render_copyfunc_t rendercopy;
587
Chris Wilson99b5ee82015-01-22 10:03:45 +0000588struct buffers {
589 const struct access_mode *mode;
590 drm_intel_bufmgr *bufmgr;
Chris Wilson2d08e9e2015-12-11 09:25:03 +0000591 drm_intel_bo **src, **dst;
Chris Wilson35b0ac92015-03-16 11:55:46 +0000592 drm_intel_bo *dummy, *spare;
Chris Wilson99b5ee82015-01-22 10:03:45 +0000593 int count;
594};
595
596static void *buffers_init(struct buffers *data,
597 const struct access_mode *mode,
598 int _fd)
599{
600 data->mode = mode;
601 data->count = 0;
602
603 data->bufmgr = drm_intel_bufmgr_gem_init(_fd, 4096);
604 igt_assert(data->bufmgr);
605
Chris Wilson2d08e9e2015-12-11 09:25:03 +0000606 data->src = malloc(2*sizeof(drm_intel_bo *)*num_buffers);
607 igt_assert(data->src);
608 data->dst = data->src + num_buffers;
609
Chris Wilson99b5ee82015-01-22 10:03:45 +0000610 drm_intel_bufmgr_gem_enable_reuse(data->bufmgr);
611 return intel_batchbuffer_alloc(data->bufmgr, devid);
612}
613
614static void buffers_destroy(struct buffers *data)
615{
616 if (data->count == 0)
617 return;
618
619 for (int i = 0; i < data->count; i++) {
620 data->mode->release_bo(data->src[i]);
621 data->mode->release_bo(data->dst[i]);
622 }
623 data->mode->release_bo(data->dummy);
Chris Wilson35b0ac92015-03-16 11:55:46 +0000624 data->mode->release_bo(data->spare);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000625 data->count = 0;
626}
627
628static void buffers_create(struct buffers *data,
629 int count)
630{
631 igt_assert(data->bufmgr);
632
633 buffers_destroy(data);
634
635 for (int i = 0; i < count; i++) {
636 data->src[i] =
637 data->mode->create_bo(data->bufmgr, width, height);
638 data->dst[i] =
639 data->mode->create_bo(data->bufmgr, width, height);
640 }
641 data->dummy = data->mode->create_bo(data->bufmgr, width, height);
Chris Wilson35b0ac92015-03-16 11:55:46 +0000642 data->spare = data->mode->create_bo(data->bufmgr, width, height);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000643 data->count = count;
644}
645
646static void buffers_fini(struct buffers *data)
647{
648 if (data->bufmgr == NULL)
649 return;
650
651 buffers_destroy(data);
652
Chris Wilson2d08e9e2015-12-11 09:25:03 +0000653 free(data->src);
654 data->src = NULL;
655 data->dst = NULL;
656
Chris Wilson99b5ee82015-01-22 10:03:45 +0000657 intel_batchbuffer_free(batch);
658 drm_intel_bufmgr_destroy(data->bufmgr);
659 data->bufmgr = NULL;
660}
661
Chris Wilson59c55622014-08-29 13:11:37 +0100662typedef void (*do_copy)(drm_intel_bo *dst, drm_intel_bo *src);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100663typedef struct igt_hang_ring (*do_hang)(void);
Chris Wilson59c55622014-08-29 13:11:37 +0100664
665static void render_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
666{
667 struct igt_buf d = {
668 .bo = dst,
669 .size = width * height * 4,
Chris Wilson59c55622014-08-29 13:11:37 +0100670 .num_tiles = width * height * 4,
671 .stride = width * 4,
672 }, s = {
673 .bo = src,
674 .size = width * height * 4,
Chris Wilson59c55622014-08-29 13:11:37 +0100675 .num_tiles = width * height * 4,
676 .stride = width * 4,
677 };
Chris Wilson86055df2014-08-29 17:36:29 +0100678 uint32_t swizzle;
679
680 drm_intel_bo_get_tiling(dst, &d.tiling, &swizzle);
681 drm_intel_bo_get_tiling(src, &s.tiling, &swizzle);
682
Chris Wilson59c55622014-08-29 13:11:37 +0100683 rendercopy(batch, NULL,
684 &s, 0, 0,
685 width, height,
686 &d, 0, 0);
687}
688
689static void blt_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
690{
Chris Wilson86055df2014-08-29 17:36:29 +0100691 intel_blt_copy(batch,
692 src, 0, 0, 4*width,
693 dst, 0, 0, 4*width,
694 width, height, 32);
Chris Wilson59c55622014-08-29 13:11:37 +0100695}
Daniel Vetter5a598c92013-08-14 15:08:05 +0200696
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530697static void cpu_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
698{
699 const int size = width * height * sizeof(uint32_t);
700 void *d, *s;
701
702 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_CPU, 0);
703 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
Ville Syrjäläf52e7ec2015-10-09 19:11:39 +0300704 s = gem_mmap__cpu(fd, src->handle, 0, size, PROT_READ);
705 d = gem_mmap__cpu(fd, dst->handle, 0, size, PROT_WRITE);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530706
707 memcpy(d, s, size);
708
709 munmap(d, size);
710 munmap(s, size);
711}
712
713static void gtt_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
714{
715 const int size = width * height * sizeof(uint32_t);
716 void *d, *s;
717
718 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0);
719 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
720
Ville Syrjäläf52e7ec2015-10-09 19:11:39 +0300721 s = gem_mmap__gtt(fd, src->handle, size, PROT_READ);
722 d = gem_mmap__gtt(fd, dst->handle, size, PROT_WRITE);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530723
724 memcpy(d, s, size);
725
726 munmap(d, size);
727 munmap(s, size);
728}
729
730static void wc_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
731{
732 const int size = width * height * sizeof(uint32_t);
733 void *d, *s;
734
735 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0);
736 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
737
Ville Syrjäläf52e7ec2015-10-09 19:11:39 +0300738 s = gem_mmap__wc(fd, src->handle, 0, size, PROT_READ);
739 d = gem_mmap__wc(fd, dst->handle, 0, size, PROT_WRITE);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530740
741 memcpy(d, s, size);
742
743 munmap(d, size);
744 munmap(s, size);
745}
746
Chris Wilson16bafdf2014-09-04 09:26:24 +0100747static struct igt_hang_ring no_hang(void)
748{
749 return (struct igt_hang_ring){0, 0};
750}
751
752static struct igt_hang_ring bcs_hang(void)
753{
Daniel Vetter3cd45de2015-02-10 17:46:43 +0100754 return igt_hang_ring(fd, I915_EXEC_BLT);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100755}
756
757static struct igt_hang_ring rcs_hang(void)
758{
Daniel Vetter3cd45de2015-02-10 17:46:43 +0100759 return igt_hang_ring(fd, I915_EXEC_RENDER);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100760}
761
Chris Wilson8bf09f32015-12-17 09:16:42 +0000762static void do_basic0(struct buffers *buffers,
763 do_copy do_copy_func,
764 do_hang do_hang_func)
765{
766 gem_quiescent_gpu(fd);
767
768 buffers->mode->set_bo(buffers->src[0], 0xdeadbeef, width, height);
769 for (int i = 0; i < buffers->count; i++) {
770 struct igt_hang_ring hang = do_hang_func();
771
772 do_copy_func(buffers->dst[i], buffers->src[0]);
773 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef, width, height, buffers->dummy);
774
775 igt_post_hang_ring(fd, hang);
776 }
777}
778
779static void do_basic1(struct buffers *buffers,
780 do_copy do_copy_func,
781 do_hang do_hang_func)
Chris Wilson197db862015-12-09 20:54:10 +0000782{
783 gem_quiescent_gpu(fd);
784
785 for (int i = 0; i < buffers->count; i++) {
786 struct igt_hang_ring hang = do_hang_func();
787
788 buffers->mode->set_bo(buffers->src[i], i, width, height);
789 buffers->mode->set_bo(buffers->dst[i], ~i, width, height);
Chris Wilson8bf09f32015-12-17 09:16:42 +0000790
Chris Wilson197db862015-12-09 20:54:10 +0000791 do_copy_func(buffers->dst[i], buffers->src[i]);
Chris Wilson8bf09f32015-12-17 09:16:42 +0000792 usleep(0); /* let someone else claim the mutex */
Chris Wilson197db862015-12-09 20:54:10 +0000793 buffers->mode->cmp_bo(buffers->dst[i], i, width, height, buffers->dummy);
794
795 igt_post_hang_ring(fd, hang);
796 }
797}
798
Chris Wilson8bf09f32015-12-17 09:16:42 +0000799static void do_basicN(struct buffers *buffers,
800 do_copy do_copy_func,
801 do_hang do_hang_func)
802{
803 struct igt_hang_ring hang;
804
805 gem_quiescent_gpu(fd);
806
807 for (int i = 0; i < buffers->count; i++) {
808 buffers->mode->set_bo(buffers->src[i], i, width, height);
809 buffers->mode->set_bo(buffers->dst[i], ~i, width, height);
810 }
811
812 hang = do_hang_func();
813
814 for (int i = 0; i < buffers->count; i++) {
815 do_copy_func(buffers->dst[i], buffers->src[i]);
816 usleep(0); /* let someone else claim the mutex */
817 }
818
819 for (int i = 0; i < buffers->count; i++)
820 buffers->mode->cmp_bo(buffers->dst[i], i, width, height, buffers->dummy);
821
822 igt_post_hang_ring(fd, hang);
823}
824
Chris Wilson99b5ee82015-01-22 10:03:45 +0000825static void do_overwrite_source(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100826 do_copy do_copy_func,
827 do_hang do_hang_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +0200828{
Chris Wilson16bafdf2014-09-04 09:26:24 +0100829 struct igt_hang_ring hang;
Daniel Vetter5a598c92013-08-14 15:08:05 +0200830 int i;
831
832 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000833 for (i = 0; i < buffers->count; i++) {
834 buffers->mode->set_bo(buffers->src[i], i, width, height);
835 buffers->mode->set_bo(buffers->dst[i], ~i, width, height);
Daniel Vetter5a598c92013-08-14 15:08:05 +0200836 }
Chris Wilson99b5ee82015-01-22 10:03:45 +0000837 for (i = 0; i < buffers->count; i++)
838 do_copy_func(buffers->dst[i], buffers->src[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100839 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000840 for (i = buffers->count; i--; )
841 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef, width, height);
842 for (i = 0; i < buffers->count; i++)
843 buffers->mode->cmp_bo(buffers->dst[i], i, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100844 igt_post_hang_ring(fd, hang);
845}
846
Chris Wilsona1726762015-03-16 16:29:57 +0000847static void do_overwrite_source_read(struct buffers *buffers,
848 do_copy do_copy_func,
849 do_hang do_hang_func,
850 int do_rcs)
851{
852 const int half = buffers->count/2;
853 struct igt_hang_ring hang;
854 int i;
855
856 gem_quiescent_gpu(fd);
857 for (i = 0; i < half; i++) {
858 buffers->mode->set_bo(buffers->src[i], i, width, height);
859 buffers->mode->set_bo(buffers->dst[i], ~i, width, height);
860 buffers->mode->set_bo(buffers->dst[i+half], ~i, width, height);
861 }
862 for (i = 0; i < half; i++) {
863 do_copy_func(buffers->dst[i], buffers->src[i]);
864 if (do_rcs)
865 render_copy_bo(buffers->dst[i+half], buffers->src[i]);
866 else
867 blt_copy_bo(buffers->dst[i+half], buffers->src[i]);
868 }
869 hang = do_hang_func();
870 for (i = half; i--; )
871 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef, width, height);
872 for (i = 0; i < half; i++) {
873 buffers->mode->cmp_bo(buffers->dst[i], i, width, height, buffers->dummy);
874 buffers->mode->cmp_bo(buffers->dst[i+half], i, width, height, buffers->dummy);
875 }
876 igt_post_hang_ring(fd, hang);
877}
878
879static void do_overwrite_source_read_bcs(struct buffers *buffers,
880 do_copy do_copy_func,
881 do_hang do_hang_func)
882{
883 do_overwrite_source_read(buffers, do_copy_func, do_hang_func, 0);
884}
885
886static void do_overwrite_source_read_rcs(struct buffers *buffers,
887 do_copy do_copy_func,
888 do_hang do_hang_func)
889{
890 do_overwrite_source_read(buffers, do_copy_func, do_hang_func, 1);
891}
892
Chris Wilson99b5ee82015-01-22 10:03:45 +0000893static void do_overwrite_source__rev(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100894 do_copy do_copy_func,
895 do_hang do_hang_func)
896{
897 struct igt_hang_ring hang;
898 int i;
899
900 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000901 for (i = 0; i < buffers->count; i++) {
902 buffers->mode->set_bo(buffers->src[i], i, width, height);
903 buffers->mode->set_bo(buffers->dst[i], ~i, width, height);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100904 }
Chris Wilson99b5ee82015-01-22 10:03:45 +0000905 for (i = 0; i < buffers->count; i++)
906 do_copy_func(buffers->dst[i], buffers->src[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100907 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000908 for (i = 0; i < buffers->count; i++)
909 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef, width, height);
910 for (i = buffers->count; i--; )
911 buffers->mode->cmp_bo(buffers->dst[i], i, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100912 igt_post_hang_ring(fd, hang);
913}
914
Chris Wilson99b5ee82015-01-22 10:03:45 +0000915static void do_overwrite_source__one(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100916 do_copy do_copy_func,
917 do_hang do_hang_func)
918{
919 struct igt_hang_ring hang;
920
921 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000922 buffers->mode->set_bo(buffers->src[0], 0, width, height);
923 buffers->mode->set_bo(buffers->dst[0], ~0, width, height);
924 do_copy_func(buffers->dst[0], buffers->src[0]);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100925 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000926 buffers->mode->set_bo(buffers->src[0], 0xdeadbeef, width, height);
927 buffers->mode->cmp_bo(buffers->dst[0], 0, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100928 igt_post_hang_ring(fd, hang);
Daniel Vetter5a598c92013-08-14 15:08:05 +0200929}
930
Chris Wilsona72d4052015-03-18 14:15:22 +0000931static void do_intermix(struct buffers *buffers,
932 do_copy do_copy_func,
933 do_hang do_hang_func,
934 int do_rcs)
935{
936 const int half = buffers->count/2;
937 struct igt_hang_ring hang;
938 int i;
939
940 gem_quiescent_gpu(fd);
941 for (i = 0; i < buffers->count; i++) {
942 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef^~i, width, height);
943 buffers->mode->set_bo(buffers->dst[i], i, width, height);
944 }
945 for (i = 0; i < half; i++) {
946 if (do_rcs == 1 || (do_rcs == -1 && i & 1))
947 render_copy_bo(buffers->dst[i], buffers->src[i]);
948 else
949 blt_copy_bo(buffers->dst[i], buffers->src[i]);
950
951 do_copy_func(buffers->dst[i+half], buffers->src[i]);
952
953 if (do_rcs == 1 || (do_rcs == -1 && (i & 1) == 0))
954 render_copy_bo(buffers->dst[i], buffers->dst[i+half]);
955 else
956 blt_copy_bo(buffers->dst[i], buffers->dst[i+half]);
957
958 do_copy_func(buffers->dst[i+half], buffers->src[i+half]);
959 }
960 hang = do_hang_func();
961 for (i = 0; i < 2*half; i++)
962 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef^~i, width, height, buffers->dummy);
963 igt_post_hang_ring(fd, hang);
964}
965
966static void do_intermix_rcs(struct buffers *buffers,
967 do_copy do_copy_func,
968 do_hang do_hang_func)
969{
970 do_intermix(buffers, do_copy_func, do_hang_func, 1);
971}
972
973static void do_intermix_bcs(struct buffers *buffers,
974 do_copy do_copy_func,
975 do_hang do_hang_func)
976{
977 do_intermix(buffers, do_copy_func, do_hang_func, 0);
978}
979
980static void do_intermix_both(struct buffers *buffers,
981 do_copy do_copy_func,
982 do_hang do_hang_func)
983{
984 do_intermix(buffers, do_copy_func, do_hang_func, -1);
985}
986
Chris Wilson99b5ee82015-01-22 10:03:45 +0000987static void do_early_read(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100988 do_copy do_copy_func,
989 do_hang do_hang_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +0200990{
Chris Wilson16bafdf2014-09-04 09:26:24 +0100991 struct igt_hang_ring hang;
Daniel Vetter5a598c92013-08-14 15:08:05 +0200992 int i;
993
994 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000995 for (i = buffers->count; i--; )
996 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef, width, height);
997 for (i = 0; i < buffers->count; i++)
998 do_copy_func(buffers->dst[i], buffers->src[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100999 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001000 for (i = buffers->count; i--; )
1001 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001002 igt_post_hang_ring(fd, hang);
Daniel Vetter5a598c92013-08-14 15:08:05 +02001003}
1004
Chris Wilson35b0ac92015-03-16 11:55:46 +00001005static void do_read_read_bcs(struct buffers *buffers,
1006 do_copy do_copy_func,
1007 do_hang do_hang_func)
1008{
1009 struct igt_hang_ring hang;
1010 int i;
1011
1012 gem_quiescent_gpu(fd);
1013 for (i = buffers->count; i--; )
1014 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef ^ i, width, height);
1015 for (i = 0; i < buffers->count; i++) {
1016 do_copy_func(buffers->dst[i], buffers->src[i]);
1017 blt_copy_bo(buffers->spare, buffers->src[i]);
1018 }
Chris Wilson1c61c0f2016-01-08 10:51:09 +00001019 buffers->mode->cmp_bo(buffers->spare, 0xdeadbeef^(buffers->count-1), width, height, buffers->dummy);
Chris Wilson35b0ac92015-03-16 11:55:46 +00001020 hang = do_hang_func();
1021 for (i = buffers->count; i--; )
1022 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef ^ i, width, height, buffers->dummy);
1023 igt_post_hang_ring(fd, hang);
1024}
1025
Chris Wilson0c266522015-11-11 16:37:16 +00001026static void do_write_read_bcs(struct buffers *buffers,
1027 do_copy do_copy_func,
1028 do_hang do_hang_func)
1029{
1030 struct igt_hang_ring hang;
1031 int i;
1032
1033 gem_quiescent_gpu(fd);
1034 for (i = buffers->count; i--; )
1035 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef ^ i, width, height);
1036 for (i = 0; i < buffers->count; i++) {
1037 blt_copy_bo(buffers->spare, buffers->src[i]);
1038 do_copy_func(buffers->dst[i], buffers->spare);
1039 }
1040 hang = do_hang_func();
1041 for (i = buffers->count; i--; )
1042 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef ^ i, width, height, buffers->dummy);
1043 igt_post_hang_ring(fd, hang);
1044}
1045
Chris Wilson35b0ac92015-03-16 11:55:46 +00001046static void do_read_read_rcs(struct buffers *buffers,
1047 do_copy do_copy_func,
1048 do_hang do_hang_func)
1049{
1050 struct igt_hang_ring hang;
1051 int i;
1052
1053 gem_quiescent_gpu(fd);
1054 for (i = buffers->count; i--; )
1055 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef ^ i, width, height);
1056 for (i = 0; i < buffers->count; i++) {
1057 do_copy_func(buffers->dst[i], buffers->src[i]);
1058 render_copy_bo(buffers->spare, buffers->src[i]);
1059 }
Chris Wilson1c61c0f2016-01-08 10:51:09 +00001060 buffers->mode->cmp_bo(buffers->spare, 0xdeadbeef^(buffers->count-1), width, height, buffers->dummy);
Chris Wilson35b0ac92015-03-16 11:55:46 +00001061 hang = do_hang_func();
1062 for (i = buffers->count; i--; )
1063 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef ^ i, width, height, buffers->dummy);
1064 igt_post_hang_ring(fd, hang);
1065}
1066
Chris Wilson0c266522015-11-11 16:37:16 +00001067static void do_write_read_rcs(struct buffers *buffers,
1068 do_copy do_copy_func,
1069 do_hang do_hang_func)
1070{
1071 struct igt_hang_ring hang;
1072 int i;
1073
1074 gem_quiescent_gpu(fd);
1075 for (i = buffers->count; i--; )
1076 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef ^ i, width, height);
1077 for (i = 0; i < buffers->count; i++) {
1078 render_copy_bo(buffers->spare, buffers->src[i]);
1079 do_copy_func(buffers->dst[i], buffers->spare);
1080 }
1081 hang = do_hang_func();
1082 for (i = buffers->count; i--; )
1083 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef ^ i, width, height, buffers->dummy);
1084 igt_post_hang_ring(fd, hang);
1085}
1086
Chris Wilson99b5ee82015-01-22 10:03:45 +00001087static void do_gpu_read_after_write(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001088 do_copy do_copy_func,
1089 do_hang do_hang_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +02001090{
Chris Wilson16bafdf2014-09-04 09:26:24 +01001091 struct igt_hang_ring hang;
Daniel Vetter5a598c92013-08-14 15:08:05 +02001092 int i;
1093
1094 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001095 for (i = buffers->count; i--; )
1096 buffers->mode->set_bo(buffers->src[i], 0xabcdabcd, width, height);
1097 for (i = 0; i < buffers->count; i++)
1098 do_copy_func(buffers->dst[i], buffers->src[i]);
1099 for (i = buffers->count; i--; )
1100 do_copy_func(buffers->dummy, buffers->dst[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001101 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001102 for (i = buffers->count; i--; )
1103 buffers->mode->cmp_bo(buffers->dst[i], 0xabcdabcd, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001104 igt_post_hang_ring(fd, hang);
Daniel Vetter5a598c92013-08-14 15:08:05 +02001105}
1106
Chris Wilson99b5ee82015-01-22 10:03:45 +00001107typedef void (*do_test)(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001108 do_copy do_copy_func,
1109 do_hang do_hang_func);
Daniel Vetterec283d62013-08-14 15:18:37 +02001110
Chris Wilson99b5ee82015-01-22 10:03:45 +00001111typedef void (*run_wrap)(struct buffers *buffers,
Chris Wilson59c55622014-08-29 13:11:37 +01001112 do_test do_test_func,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001113 do_copy do_copy_func,
1114 do_hang do_hang_func);
Daniel Vetterec283d62013-08-14 15:18:37 +02001115
Chris Wilson99b5ee82015-01-22 10:03:45 +00001116static void run_single(struct buffers *buffers,
Chris Wilson59c55622014-08-29 13:11:37 +01001117 do_test do_test_func,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001118 do_copy do_copy_func,
1119 do_hang do_hang_func)
Daniel Vetterec283d62013-08-14 15:18:37 +02001120{
Chris Wilson99b5ee82015-01-22 10:03:45 +00001121 do_test_func(buffers, do_copy_func, do_hang_func);
Chris Wilson5b675f72016-01-22 17:33:40 +00001122 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
Daniel Vetterec283d62013-08-14 15:18:37 +02001123}
1124
Chris Wilson99b5ee82015-01-22 10:03:45 +00001125static void run_interruptible(struct buffers *buffers,
Chris Wilson59c55622014-08-29 13:11:37 +01001126 do_test do_test_func,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001127 do_copy do_copy_func,
1128 do_hang do_hang_func)
Daniel Vetterec283d62013-08-14 15:18:37 +02001129{
Chris Wilson1c61c0f2016-01-08 10:51:09 +00001130 for (pass = 0; pass < 10; pass++)
Chris Wilson99b5ee82015-01-22 10:03:45 +00001131 do_test_func(buffers, do_copy_func, do_hang_func);
Chris Wilson1c61c0f2016-01-08 10:51:09 +00001132 pass = 0;
Chris Wilson5b675f72016-01-22 17:33:40 +00001133 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
Daniel Vetterec283d62013-08-14 15:18:37 +02001134}
1135
Chris Wilson46456302016-01-22 19:29:07 +00001136static void run_child(struct buffers *buffers,
1137 do_test do_test_func,
1138 do_copy do_copy_func,
1139 do_hang do_hang_func)
1140
1141{
Chris Wilson69ecede2016-01-22 22:14:33 +00001142 /* We inherit the buffers from the parent, but the bufmgr/batch
1143 * needs to be local as the cache of reusable itself will be COWed,
1144 * leading to the child closing an object without the parent knowing.
1145 */
1146 igt_fork(child, 1) {
1147 buffers->bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
1148 drm_intel_bufmgr_gem_enable_reuse(buffers->bufmgr);
1149 batch = intel_batchbuffer_alloc(buffers->bufmgr, devid);
1150
Chris Wilson46456302016-01-22 19:29:07 +00001151 do_test_func(buffers, do_copy_func, do_hang_func);
1152
Chris Wilson69ecede2016-01-22 22:14:33 +00001153 intel_batchbuffer_free(batch);
1154 drm_intel_bufmgr_destroy(buffers->bufmgr);
1155 }
1156
Chris Wilson46456302016-01-22 19:29:07 +00001157 igt_waitchildren();
1158 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
1159}
1160
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001161static void __run_forked(struct buffers *buffers,
1162 int num_children, int loops,
1163 do_test do_test_func,
1164 do_copy do_copy_func,
1165 do_hang do_hang_func)
1166
Daniel Vetterec283d62013-08-14 15:18:37 +02001167{
Chris Wilson1ca607b2013-08-16 09:44:13 +01001168 const int old_num_buffers = num_buffers;
Daniel Vetterec283d62013-08-14 15:18:37 +02001169
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001170 num_buffers /= num_children;
Chris Wilson571b8762016-01-08 11:51:56 +00001171 num_buffers += MIN_BUFFERS;
Chris Wilson1ca607b2013-08-16 09:44:13 +01001172
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001173 igt_fork(child, num_children) {
Daniel Vettercd1f2202013-08-29 10:06:51 +02001174 /* recreate process local variables */
Chris Wilson99b5ee82015-01-22 10:03:45 +00001175 buffers->count = 0;
Micah Fedkec81d2932015-07-22 21:54:02 +00001176 fd = drm_open_driver(DRIVER_INTEL);
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301177
Chris Wilson99b5ee82015-01-22 10:03:45 +00001178 batch = buffers_init(buffers, buffers->mode, fd);
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301179
Chris Wilson99b5ee82015-01-22 10:03:45 +00001180 buffers_create(buffers, num_buffers);
Chris Wilson1c61c0f2016-01-08 10:51:09 +00001181 for (pass = 0; pass < loops; pass++)
Chris Wilson99b5ee82015-01-22 10:03:45 +00001182 do_test_func(buffers, do_copy_func, do_hang_func);
Chris Wilson1c61c0f2016-01-08 10:51:09 +00001183 pass = 0;
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301184
Chris Wilson99b5ee82015-01-22 10:03:45 +00001185 buffers_fini(buffers);
Daniel Vetterec283d62013-08-14 15:18:37 +02001186 }
Daniel Vettercd1f2202013-08-29 10:06:51 +02001187
1188 igt_waitchildren();
Chris Wilson5b675f72016-01-22 17:33:40 +00001189 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
Chris Wilson1ca607b2013-08-16 09:44:13 +01001190
Chris Wilson1ca607b2013-08-16 09:44:13 +01001191 num_buffers = old_num_buffers;
Daniel Vetterec283d62013-08-14 15:18:37 +02001192}
Daniel Vetter5a598c92013-08-14 15:08:05 +02001193
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001194static void run_forked(struct buffers *buffers,
1195 do_test do_test_func,
1196 do_copy do_copy_func,
1197 do_hang do_hang_func)
1198{
1199 __run_forked(buffers, sysconf(_SC_NPROCESSORS_ONLN), 10,
1200 do_test_func, do_copy_func, do_hang_func);
1201}
1202
1203static void run_bomb(struct buffers *buffers,
1204 do_test do_test_func,
1205 do_copy do_copy_func,
1206 do_hang do_hang_func)
1207{
1208 __run_forked(buffers, 8*sysconf(_SC_NPROCESSORS_ONLN), 10,
1209 do_test_func, do_copy_func, do_hang_func);
1210}
1211
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301212static void bit17_require(void)
1213{
1214 struct drm_i915_gem_get_tiling2 {
1215 uint32_t handle;
1216 uint32_t tiling_mode;
1217 uint32_t swizzle_mode;
1218 uint32_t phys_swizzle_mode;
1219 } arg;
1220#define DRM_IOCTL_I915_GEM_GET_TILING2 DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling2)
1221
1222 memset(&arg, 0, sizeof(arg));
1223 arg.handle = gem_create(fd, 4096);
1224 gem_set_tiling(fd, arg.handle, I915_TILING_X, 512);
1225
Daniel Stonede7ccdd2015-10-01 14:16:48 +01001226 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING2, &arg);
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301227 gem_close(fd, arg.handle);
1228 igt_require(arg.phys_swizzle_mode == arg.swizzle_mode);
1229}
1230
1231static void cpu_require(void)
1232{
1233 bit17_require();
1234}
1235
1236static void gtt_require(void)
1237{
1238}
1239
1240static void wc_require(void)
1241{
1242 bit17_require();
Daniel Vettera3e34ce2015-02-06 11:05:28 +01001243 gem_require_mmap_wc(fd);
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301244}
1245
Chris Wilson08188752014-09-03 13:38:30 +01001246static void bcs_require(void)
1247{
1248}
1249
1250static void rcs_require(void)
1251{
1252 igt_require(rendercopy);
1253}
1254
Daniel Vetter5a598c92013-08-14 15:08:05 +02001255static void
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001256run_basic_modes(const char *prefix,
1257 const struct access_mode *mode,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001258 const char *suffix,
Daniel Vetterec283d62013-08-14 15:18:37 +02001259 run_wrap run_wrap_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +02001260{
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301261 const struct {
Chris Wilson59c55622014-08-29 13:11:37 +01001262 const char *prefix;
1263 do_copy copy;
Chris Wilson08188752014-09-03 13:38:30 +01001264 void (*require)(void);
Chris Wilson59c55622014-08-29 13:11:37 +01001265 } pipelines[] = {
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301266 { "cpu", cpu_copy_bo, cpu_require },
1267 { "gtt", gtt_copy_bo, gtt_require },
1268 { "wc", wc_copy_bo, wc_require },
Daniel Vetter3e9b4e32015-02-06 23:10:26 +01001269 { "blt", blt_copy_bo, bcs_require },
1270 { "render", render_copy_bo, rcs_require },
Chris Wilson59c55622014-08-29 13:11:37 +01001271 { NULL, NULL }
Chris Wilson77633492015-03-26 08:11:43 +00001272 }, *pskip = pipelines + 3, *p;
Chris Wilson16bafdf2014-09-04 09:26:24 +01001273 const struct {
1274 const char *suffix;
1275 do_hang hang;
Chris Wilson16bafdf2014-09-04 09:26:24 +01001276 } hangs[] = {
Chris Wilson92caf132015-12-16 09:23:56 +00001277 { "", no_hang },
1278 { "-hang-blt", bcs_hang },
1279 { "-hang-render", rcs_hang },
Chris Wilson16bafdf2014-09-04 09:26:24 +01001280 { NULL, NULL },
1281 }, *h;
Chris Wilson99b5ee82015-01-22 10:03:45 +00001282 struct buffers buffers;
Daniel Vetter5a598c92013-08-14 15:08:05 +02001283
Chris Wilson16bafdf2014-09-04 09:26:24 +01001284 for (h = hangs; h->suffix; h++) {
Chris Wilson77633492015-03-26 08:11:43 +00001285 if (!all && *h->suffix)
1286 continue;
1287
1288 for (p = all ? pipelines : pskip; p->prefix; p++) {
Chris Wilson16bafdf2014-09-04 09:26:24 +01001289 igt_fixture {
Chris Wilson99b5ee82015-01-22 10:03:45 +00001290 batch = buffers_init(&buffers, mode, fd);
Daniel Vetter60115082015-01-22 10:01:28 +01001291 }
Chris Wilson16bafdf2014-09-04 09:26:24 +01001292
Chris Wilson8bf09f32015-12-17 09:16:42 +00001293 igt_subtest_f("%s-%s-%s-sanitycheck0%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson197db862015-12-09 20:54:10 +00001294 p->require();
1295 buffers_create(&buffers, num_buffers);
Chris Wilson8bf09f32015-12-17 09:16:42 +00001296 run_wrap_func(&buffers, do_basic0,
1297 p->copy, h->hang);
1298 }
1299
1300 igt_subtest_f("%s-%s-%s-sanitycheck1%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson8bf09f32015-12-17 09:16:42 +00001301 p->require();
1302 buffers_create(&buffers, num_buffers);
1303 run_wrap_func(&buffers, do_basic1,
1304 p->copy, h->hang);
1305 }
1306
1307 igt_subtest_f("%s-%s-%s-sanitycheckN%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson8bf09f32015-12-17 09:16:42 +00001308 p->require();
1309 buffers_create(&buffers, num_buffers);
1310 run_wrap_func(&buffers, do_basicN,
Chris Wilson197db862015-12-09 20:54:10 +00001311 p->copy, h->hang);
1312 }
1313
Chris Wilson16bafdf2014-09-04 09:26:24 +01001314 /* try to overwrite the source values */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001315 igt_subtest_f("%s-%s-%s-overwrite-source-one%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson16bafdf2014-09-04 09:26:24 +01001316 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001317 buffers_create(&buffers, num_buffers);
1318 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001319 do_overwrite_source__one,
1320 p->copy, h->hang);
1321 }
1322
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001323 igt_subtest_f("%s-%s-%s-overwrite-source%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson16bafdf2014-09-04 09:26:24 +01001324 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001325 buffers_create(&buffers, num_buffers);
1326 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001327 do_overwrite_source,
1328 p->copy, h->hang);
1329 }
Chris Wilsona1726762015-03-16 16:29:57 +00001330
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001331 igt_subtest_f("%s-%s-%s-overwrite-source-read-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilsona1726762015-03-16 16:29:57 +00001332 p->require();
1333 buffers_create(&buffers, num_buffers);
1334 run_wrap_func(&buffers,
1335 do_overwrite_source_read_bcs,
1336 p->copy, h->hang);
1337 }
1338
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001339 igt_subtest_f("%s-%s-%s-overwrite-source-read-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilsona1726762015-03-16 16:29:57 +00001340 p->require();
1341 igt_require(rendercopy);
1342 buffers_create(&buffers, num_buffers);
1343 run_wrap_func(&buffers,
1344 do_overwrite_source_read_rcs,
1345 p->copy, h->hang);
1346 }
1347
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001348 igt_subtest_f("%s-%s-%s-overwrite-source-rev%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson16bafdf2014-09-04 09:26:24 +01001349 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001350 buffers_create(&buffers, num_buffers);
1351 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001352 do_overwrite_source__rev,
1353 p->copy, h->hang);
1354 }
1355
Chris Wilsona72d4052015-03-18 14:15:22 +00001356 /* try to intermix copies with GPU copies*/
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001357 igt_subtest_f("%s-%s-%s-intermix-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilsona72d4052015-03-18 14:15:22 +00001358 p->require();
1359 igt_require(rendercopy);
1360 buffers_create(&buffers, num_buffers);
1361 run_wrap_func(&buffers,
1362 do_intermix_rcs,
1363 p->copy, h->hang);
1364 }
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001365 igt_subtest_f("%s-%s-%s-intermix-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilsona72d4052015-03-18 14:15:22 +00001366 p->require();
1367 igt_require(rendercopy);
1368 buffers_create(&buffers, num_buffers);
1369 run_wrap_func(&buffers,
1370 do_intermix_bcs,
1371 p->copy, h->hang);
1372 }
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001373 igt_subtest_f("%s-%s-%s-intermix-both%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilsona72d4052015-03-18 14:15:22 +00001374 p->require();
1375 igt_require(rendercopy);
1376 buffers_create(&buffers, num_buffers);
1377 run_wrap_func(&buffers,
1378 do_intermix_both,
1379 p->copy, h->hang);
1380 }
1381
Chris Wilson16bafdf2014-09-04 09:26:24 +01001382 /* try to read the results before the copy completes */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001383 igt_subtest_f("%s-%s-%s-early-read%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson16bafdf2014-09-04 09:26:24 +01001384 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001385 buffers_create(&buffers, num_buffers);
1386 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001387 do_early_read,
1388 p->copy, h->hang);
1389 }
1390
Chris Wilson35b0ac92015-03-16 11:55:46 +00001391 /* concurrent reads */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001392 igt_subtest_f("%s-%s-%s-read-read-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson35b0ac92015-03-16 11:55:46 +00001393 p->require();
1394 buffers_create(&buffers, num_buffers);
1395 run_wrap_func(&buffers,
1396 do_read_read_bcs,
1397 p->copy, h->hang);
1398 }
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001399 igt_subtest_f("%s-%s-%s-read-read-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson35b0ac92015-03-16 11:55:46 +00001400 p->require();
1401 igt_require(rendercopy);
1402 buffers_create(&buffers, num_buffers);
1403 run_wrap_func(&buffers,
1404 do_read_read_rcs,
1405 p->copy, h->hang);
1406 }
1407
Chris Wilson0c266522015-11-11 16:37:16 +00001408 /* split copying between rings */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001409 igt_subtest_f("%s-%s-%s-write-read-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson0c266522015-11-11 16:37:16 +00001410 p->require();
1411 buffers_create(&buffers, num_buffers);
1412 run_wrap_func(&buffers,
1413 do_write_read_bcs,
1414 p->copy, h->hang);
1415 }
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001416 igt_subtest_f("%s-%s-%s-write-read-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson0c266522015-11-11 16:37:16 +00001417 p->require();
1418 igt_require(rendercopy);
1419 buffers_create(&buffers, num_buffers);
1420 run_wrap_func(&buffers,
1421 do_write_read_rcs,
1422 p->copy, h->hang);
1423 }
1424
Chris Wilson16bafdf2014-09-04 09:26:24 +01001425 /* and finally try to trick the kernel into loosing the pending write */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001426 igt_subtest_f("%s-%s-%s-gpu-read-after-write%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson16bafdf2014-09-04 09:26:24 +01001427 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001428 buffers_create(&buffers, num_buffers);
1429 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001430 do_gpu_read_after_write,
1431 p->copy, h->hang);
1432 }
1433
1434 igt_fixture {
Chris Wilson99b5ee82015-01-22 10:03:45 +00001435 buffers_fini(&buffers);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001436 }
Chris Wilson08188752014-09-03 13:38:30 +01001437 }
Chris Wilson59c55622014-08-29 13:11:37 +01001438 }
Daniel Vetter5a598c92013-08-14 15:08:05 +02001439}
Daniel Vetter43779e32013-08-14 14:50:50 +02001440
1441static void
Chris Wilson42291f22016-01-07 11:19:26 +00001442run_modes(const char *style, const struct access_mode *mode, unsigned allow_mem)
Daniel Vetter43779e32013-08-14 14:50:50 +02001443{
Chris Wilson42291f22016-01-07 11:19:26 +00001444 if (mode->require && !mode->require())
1445 return;
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001446
Chris Wilson42291f22016-01-07 11:19:26 +00001447 igt_debug("%s: using 2x%d buffers, each 1MiB\n",
1448 style, num_buffers);
1449 if (!__intel_check_memory(2*num_buffers, 1024*1024, allow_mem,
1450 NULL, NULL))
1451 return;
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001452
Chris Wilson42291f22016-01-07 11:19:26 +00001453 run_basic_modes(style, mode, "", run_single);
Chris Wilsond4a05bc2016-01-23 09:07:12 +00001454 run_basic_modes(style, mode, "-child", run_child);
Chris Wilson42291f22016-01-07 11:19:26 +00001455 run_basic_modes(style, mode, "-forked", run_forked);
Daniel Vetter3dba47e2013-08-06 22:27:37 +02001456
Chris Wilson6c428a62014-08-29 13:11:37 +01001457 igt_fork_signal_helper();
Chris Wilson42291f22016-01-07 11:19:26 +00001458 run_basic_modes(style, mode, "-interruptible", run_interruptible);
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001459 run_basic_modes(style, mode, "-bomb", run_bomb);
Chris Wilson6c428a62014-08-29 13:11:37 +01001460 igt_stop_signal_helper();
Daniel Vetter43779e32013-08-14 14:50:50 +02001461}
1462
Daniel Vetter071e9ca2013-10-31 16:23:26 +01001463igt_main
Daniel Vetter43779e32013-08-14 14:50:50 +02001464{
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001465 const struct {
1466 const char *name;
1467 drm_intel_bo *(*create)(drm_intel_bufmgr *, uint64_t size);
1468 bool (*require)(void);
1469 } create[] = {
1470 { "", create_normal_bo, can_create_normal},
1471 { "private-", create_private_bo, can_create_private },
1472 { "stolen-", create_stolen_bo, can_create_stolen },
1473 { NULL, NULL }
1474 }, *c;
Chris Wilson42291f22016-01-07 11:19:26 +00001475 uint64_t pin_sz = 0;
1476 void *pinned = NULL;
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001477 int i;
Daniel Vetter43779e32013-08-14 14:50:50 +02001478
Daniel Vetter43779e32013-08-14 14:50:50 +02001479 igt_skip_on_simulation();
1480
Chris Wilson77633492015-03-26 08:11:43 +00001481 if (strstr(igt_test_name(), "all"))
1482 all = true;
1483
Daniel Vetter2dbd9982013-08-14 15:48:54 +02001484 igt_fixture {
Micah Fedkec81d2932015-07-22 21:54:02 +00001485 fd = drm_open_driver(DRIVER_INTEL);
Chris Wilson5b675f72016-01-22 17:33:40 +00001486 intel_detect_and_clear_missed_interrupts(fd);
Chris Wilson6c428a62014-08-29 13:11:37 +01001487 devid = intel_get_drm_devid(fd);
1488 gen = intel_gen(devid);
Chris Wilson59c55622014-08-29 13:11:37 +01001489 rendercopy = igt_get_render_copyfunc(devid);
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001490 }
Daniel Vetter43779e32013-08-14 14:50:50 +02001491
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001492 for (c = create; c->name; c++) {
1493 char name[80];
1494
1495 create_func = c->create;
1496
Chris Wilson571b8762016-01-08 11:51:56 +00001497 num_buffers = MIN_BUFFERS;
1498 if (c->require()) {
1499 snprintf(name, sizeof(name), "%s%s", c->name, "tiny");
1500 for (i = 0; i < ARRAY_SIZE(access_modes); i++)
1501 run_modes(name, &access_modes[i], CHECK_RAM);
1502 }
1503
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001504 igt_fixture {
1505 num_buffers = gem_mappable_aperture_size() / (1024 * 1024) / 4;
1506 }
1507
1508 if (c->require()) {
1509 snprintf(name, sizeof(name), "%s%s", c->name, "small");
1510 for (i = 0; i < ARRAY_SIZE(access_modes); i++)
Chris Wilson42291f22016-01-07 11:19:26 +00001511 run_modes(name, &access_modes[i], CHECK_RAM);
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001512 }
1513
1514 igt_fixture {
1515 num_buffers = gem_mappable_aperture_size() / (1024 * 1024);
1516 }
1517
1518 if (c->require()) {
1519 snprintf(name, sizeof(name), "%s%s", c->name, "thrash");
1520 for (i = 0; i < ARRAY_SIZE(access_modes); i++)
Chris Wilson42291f22016-01-07 11:19:26 +00001521 run_modes(name, &access_modes[i], CHECK_RAM);
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001522 }
1523
1524 igt_fixture {
1525 num_buffers = gem_aperture_size(fd) / (1024 * 1024);
1526 }
1527
1528 if (c->require()) {
1529 snprintf(name, sizeof(name), "%s%s", c->name, "full");
1530 for (i = 0; i < ARRAY_SIZE(access_modes); i++)
Chris Wilson42291f22016-01-07 11:19:26 +00001531 run_modes(name, &access_modes[i], CHECK_RAM);
1532 }
1533
1534 igt_fixture {
1535 num_buffers = gem_mappable_aperture_size() / (1024 * 1024);
1536 pin_sz = intel_get_avail_ram_mb() - num_buffers;
1537
1538 igt_debug("Pinning %ld MiB\n", pin_sz);
1539 pin_sz *= 1024 * 1024;
1540
1541 if (posix_memalign(&pinned, 4096, pin_sz) ||
1542 mlock(pinned, pin_sz) ||
1543 madvise(pinned, pin_sz, MADV_DONTFORK)) {
1544 free(pinned);
1545 pinned = NULL;
1546 }
1547 igt_require(pinned);
1548 }
1549
1550 if (c->require()) {
1551 snprintf(name, sizeof(name), "%s%s", c->name, "swap");
1552 for (i = 0; i < ARRAY_SIZE(access_modes); i++)
1553 run_modes(name, &access_modes[i], CHECK_RAM | CHECK_SWAP);
1554 }
1555
1556 igt_fixture {
1557 if (pinned) {
1558 munlock(pinned, pin_sz);
1559 free(pinned);
1560 pinned = NULL;
1561 }
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001562 }
Daniel Vetter2dbd9982013-08-14 15:48:54 +02001563 }
Daniel Vetter3dba47e2013-08-06 22:27:37 +02001564}