blob: cc0e4565c064b85ab80d83645de00c8bb760d8d4 [file] [log] [blame]
Daniel Vetter3dba47e2013-08-06 22:27:37 +02001/*
2 * Copyright © 2009,2012,2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 * Daniel Vetter <daniel.vetter@ffwll.ch>
27 *
28 */
29
Chris Wilson77633492015-03-26 08:11:43 +000030/** @file gem_concurrent.c
Daniel Vetter3dba47e2013-08-06 22:27:37 +020031 *
Chris Wilson77633492015-03-26 08:11:43 +000032 * This is a test of pread/pwrite/mmap behavior when writing to active
Daniel Vetter3dba47e2013-08-06 22:27:37 +020033 * buffers.
34 *
35 * Based on gem_gtt_concurrent_blt.
36 */
37
Thomas Wood804e11f2015-08-17 17:57:43 +010038#include "igt.h"
Daniel Vetter3dba47e2013-08-06 22:27:37 +020039#include <stdlib.h>
40#include <stdio.h>
41#include <string.h>
Daniel Vetter3dba47e2013-08-06 22:27:37 +020042#include <fcntl.h>
43#include <inttypes.h>
44#include <errno.h>
45#include <sys/stat.h>
46#include <sys/time.h>
Chris Wilson99431a42013-08-14 11:03:34 +010047#include <sys/wait.h>
Daniel Vetterf5daeec2014-03-23 13:35:09 +010048
49#include <drm.h>
50
Daniel Vetter3dba47e2013-08-06 22:27:37 +020051#include "intel_bufmgr.h"
Daniel Vetter3dba47e2013-08-06 22:27:37 +020052
Chris Wilson77633492015-03-26 08:11:43 +000053IGT_TEST_DESCRIPTION("Test of pread/pwrite/mmap behavior when writing to active"
Thomas Woodb2ac2642014-11-28 11:02:44 +000054 " buffers.");
55
Chris Wilson0143d4f2016-01-21 09:53:50 +000056#define LOCAL_I915_GEM_USERPTR 0x33
57#define LOCAL_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_USERPTR, struct local_i915_gem_userptr)
58struct local_i915_gem_userptr {
59 uint64_t user_ptr;
60 uint64_t user_size;
61 uint32_t flags;
62 uint32_t handle;
63};
64
Chris Wilson6c428a62014-08-29 13:11:37 +010065int fd, devid, gen;
66struct intel_batchbuffer *batch;
Chris Wilson77633492015-03-26 08:11:43 +000067int all;
Chris Wilson1c61c0f2016-01-08 10:51:09 +000068int pass;
Chris Wilson6c428a62014-08-29 13:11:37 +010069
Chris Wilson571b8762016-01-08 11:51:56 +000070#define MIN_BUFFERS 3
71
Daniel Vetter3dba47e2013-08-06 22:27:37 +020072static void
Chris Wilsonf2a045f2015-01-02 16:33:33 +053073nop_release_bo(drm_intel_bo *bo)
74{
75 drm_intel_bo_unreference(bo);
76}
77
78static void
Daniel Vetter43779e32013-08-14 14:50:50 +020079prw_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
Daniel Vetter3dba47e2013-08-06 22:27:37 +020080{
Chris Wilson0b4c33f2014-01-26 14:36:32 +000081 int size = width * height, i;
82 uint32_t *tmp;
Daniel Vetter3dba47e2013-08-06 22:27:37 +020083
Chris Wilson0b4c33f2014-01-26 14:36:32 +000084 tmp = malloc(4*size);
85 if (tmp) {
86 for (i = 0; i < size; i++)
87 tmp[i] = val;
88 drm_intel_bo_subdata(bo, 0, 4*size, tmp);
89 free(tmp);
90 } else {
91 for (i = 0; i < size; i++)
92 drm_intel_bo_subdata(bo, 4*i, 4, &val);
93 }
Daniel Vetter3dba47e2013-08-06 22:27:37 +020094}
95
96static void
Chris Wilsonc12f2922014-08-31 16:14:40 +010097prw_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
Daniel Vetter3dba47e2013-08-06 22:27:37 +020098{
Chris Wilson0b4c33f2014-01-26 14:36:32 +000099 int size = width * height, i;
Chris Wilsonc12f2922014-08-31 16:14:40 +0100100 uint32_t *vaddr;
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200101
Chris Wilsonc12f2922014-08-31 16:14:40 +0100102 do_or_die(drm_intel_bo_map(tmp, true));
103 do_or_die(drm_intel_bo_get_subdata(bo, 0, 4*size, tmp->virtual));
104 vaddr = tmp->virtual;
105 for (i = 0; i < size; i++)
106 igt_assert_eq_u32(vaddr[i], val);
107 drm_intel_bo_unmap(tmp);
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200108}
109
Chris Wilson1c61c0f2016-01-08 10:51:09 +0000110#define pixel(y, width) ((y)*(width) + (((y) + pass)%(width)))
111
112static void
113partial_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
114{
115 int y;
116
117 for (y = 0; y < height; y++)
118 do_or_die(drm_intel_bo_subdata(bo, 4*pixel(y, width), 4, &val));
119}
120
121static void
122partial_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
123{
124 int y;
125
126 for (y = 0; y < height; y++) {
127 uint32_t buf;
128 do_or_die(drm_intel_bo_get_subdata(bo, 4*pixel(y, width), 4, &buf));
129 igt_assert_eq_u32(buf, val);
130 }
131}
132
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200133static drm_intel_bo *
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000134create_normal_bo(drm_intel_bufmgr *bufmgr, uint64_t size)
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200135{
136 drm_intel_bo *bo;
137
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000138 bo = drm_intel_bo_alloc(bufmgr, "bo", size, 0);
Daniel Vetter83440952013-08-13 12:35:58 +0200139 igt_assert(bo);
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200140
141 return bo;
142}
143
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000144static bool can_create_normal(void)
145{
146 return true;
147}
148
149static drm_intel_bo *
150create_private_bo(drm_intel_bufmgr *bufmgr, uint64_t size)
151{
152 drm_intel_bo *bo;
153 uint32_t handle;
154
155 /* XXX gem_create_with_flags(fd, size, I915_CREATE_PRIVATE); */
156
157 handle = gem_create(fd, size);
158 bo = gem_handle_to_libdrm_bo(bufmgr, fd, "stolen", handle);
159 gem_close(fd, handle);
160
161 return bo;
162}
163
164static bool can_create_private(void)
165{
166 return false;
167}
168
169static drm_intel_bo *
170create_stolen_bo(drm_intel_bufmgr *bufmgr, uint64_t size)
171{
172 drm_intel_bo *bo;
173 uint32_t handle;
174
175 /* XXX gem_create_with_flags(fd, size, I915_CREATE_STOLEN); */
176
177 handle = gem_create(fd, size);
178 bo = gem_handle_to_libdrm_bo(bufmgr, fd, "stolen", handle);
179 gem_close(fd, handle);
180
181 return bo;
182}
183
184static bool can_create_stolen(void)
185{
186 /* XXX check num_buffers against available stolen */
187 return false;
188}
189
190static drm_intel_bo *
191(*create_func)(drm_intel_bufmgr *bufmgr, uint64_t size);
192
Chris Wilson42291f22016-01-07 11:19:26 +0000193static bool create_cpu_require(void)
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000194{
Chris Wilson42291f22016-01-07 11:19:26 +0000195 return create_func != create_stolen_bo;
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000196}
197
198static drm_intel_bo *
199unmapped_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
200{
201 return create_func(bufmgr, (uint64_t)4*width*height);
202}
203
Chris Wilson0143d4f2016-01-21 09:53:50 +0000204static bool create_snoop_require(void)
205{
206 if (!create_cpu_require())
207 return false;
208
209 return !gem_has_llc(fd);
210}
211
Chris Wilson46ec33e2015-10-20 14:40:50 +0100212static drm_intel_bo *
213snoop_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
214{
215 drm_intel_bo *bo;
216
217 igt_skip_on(gem_has_llc(fd));
218
219 bo = unmapped_create_bo(bufmgr, width, height);
220 gem_set_caching(fd, bo->handle, I915_CACHING_CACHED);
221 drm_intel_bo_disable_reuse(bo);
222
223 return bo;
224}
225
Chris Wilson0143d4f2016-01-21 09:53:50 +0000226static bool create_userptr_require(void)
227{
228 static int found = -1;
229 if (found < 0) {
230 struct drm_i915_gem_userptr arg;
231
232 found = 0;
233
234 memset(&arg, 0, sizeof(arg));
235 arg.user_ptr = -4096ULL;
236 arg.user_size = 8192;
237 errno = 0;
238 drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &arg);
239 if (errno == EFAULT) {
240 igt_assert(posix_memalign((void **)&arg.user_ptr,
241 4096, arg.user_size) == 0);
242 found = drmIoctl(fd,
243 LOCAL_IOCTL_I915_GEM_USERPTR,
244 &arg) == 0;
245 free((void *)(uintptr_t)arg.user_ptr);
246 }
247
248 }
249 return found;
250}
251
252static drm_intel_bo *
253userptr_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
254{
255 struct local_i915_gem_userptr userptr;
256 drm_intel_bo *bo;
257
258 memset(&userptr, 0, sizeof(userptr));
259 userptr.user_size = width * height * 4;
260 userptr.user_size = (userptr.user_size + 4095) & -4096;
261 igt_assert(posix_memalign((void **)&userptr.user_ptr,
262 4096, userptr.user_size) == 0);
263
264 do_or_die(drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr));
265 bo = gem_handle_to_libdrm_bo(bufmgr, fd, "userptr", userptr.handle);
266 bo->virtual = (void *)(uintptr_t)userptr.user_ptr;
267
268 return bo;
269}
270
271static void
272userptr_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
273{
274 int size = width * height;
275 uint32_t *vaddr = bo->virtual;
276
277 gem_set_domain(fd, bo->handle,
278 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
279 while (size--)
280 *vaddr++ = val;
281}
282
283static void
284userptr_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
285{
286 int size = width * height;
287 uint32_t *vaddr = bo->virtual;
288
289 gem_set_domain(fd, bo->handle,
290 I915_GEM_DOMAIN_CPU, 0);
291 while (size--)
292 igt_assert_eq_u32(*vaddr++, val);
293}
294
295static void
296userptr_release_bo(drm_intel_bo *bo)
297{
298 free(bo->virtual);
299 bo->virtual = NULL;
300
301 drm_intel_bo_unreference(bo);
302}
303
Daniel Vetter43779e32013-08-14 14:50:50 +0200304static void
305gtt_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200306{
Chris Wilson3e766b82014-09-26 07:55:49 +0100307 uint32_t *vaddr = bo->virtual;
Daniel Vetter43779e32013-08-14 14:50:50 +0200308 int size = width * height;
Daniel Vetter43779e32013-08-14 14:50:50 +0200309
310 drm_intel_gem_bo_start_gtt_access(bo, true);
Daniel Vetter43779e32013-08-14 14:50:50 +0200311 while (size--)
312 *vaddr++ = val;
313}
314
315static void
Chris Wilsonc12f2922014-08-31 16:14:40 +0100316gtt_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
Daniel Vetter43779e32013-08-14 14:50:50 +0200317{
Chris Wilson3e766b82014-09-26 07:55:49 +0100318 uint32_t *vaddr = bo->virtual;
319 int y;
Daniel Vetter43779e32013-08-14 14:50:50 +0200320
Chris Wilson3e766b82014-09-26 07:55:49 +0100321 /* GTT access is slow. So we just compare a few points */
Daniel Vetter43779e32013-08-14 14:50:50 +0200322 drm_intel_gem_bo_start_gtt_access(bo, false);
Chris Wilson3e766b82014-09-26 07:55:49 +0100323 for (y = 0; y < height; y++)
Chris Wilson1c61c0f2016-01-08 10:51:09 +0000324 igt_assert_eq_u32(vaddr[pixel(y, width)], val);
Daniel Vetter43779e32013-08-14 14:50:50 +0200325}
326
327static drm_intel_bo *
Chris Wilson86055df2014-08-29 17:36:29 +0100328map_bo(drm_intel_bo *bo)
Daniel Vetter43779e32013-08-14 14:50:50 +0200329{
Daniel Vetter43779e32013-08-14 14:50:50 +0200330 /* gtt map doesn't have a write parameter, so just keep the mapping
331 * around (to avoid the set_domain with the gtt write domain set) and
332 * manually tell the kernel when we start access the gtt. */
333 do_or_die(drm_intel_gem_bo_map_gtt(bo));
334
335 return bo;
336}
337
Chris Wilson86055df2014-08-29 17:36:29 +0100338static drm_intel_bo *
339tile_bo(drm_intel_bo *bo, int width)
340{
341 uint32_t tiling = I915_TILING_X;
342 uint32_t stride = width * 4;
343
344 do_or_die(drm_intel_bo_set_tiling(bo, &tiling, stride));
345
346 return bo;
347}
348
349static drm_intel_bo *
350gtt_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
351{
352 return map_bo(unmapped_create_bo(bufmgr, width, height));
353}
354
355static drm_intel_bo *
356gttX_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
357{
358 return tile_bo(gtt_create_bo(bufmgr, width, height), width);
359}
360
361static drm_intel_bo *
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530362wc_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
363{
364 drm_intel_bo *bo;
365
Daniel Vettera3e34ce2015-02-06 11:05:28 +0100366 gem_require_mmap_wc(fd);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530367
368 bo = unmapped_create_bo(bufmgr, width, height);
Ville Syrjäläb8a77dd2015-10-09 18:29:28 +0300369 bo->virtual = __gem_mmap__wc(fd, bo->handle, 0, bo->size, PROT_READ | PROT_WRITE);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530370 return bo;
371}
372
373static void
374wc_release_bo(drm_intel_bo *bo)
375{
376 munmap(bo->virtual, bo->size);
377 bo->virtual = NULL;
378
379 nop_release_bo(bo);
380}
381
382static drm_intel_bo *
Chris Wilson86055df2014-08-29 17:36:29 +0100383gpu_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
384{
385 return unmapped_create_bo(bufmgr, width, height);
386}
387
Chris Wilson86055df2014-08-29 17:36:29 +0100388static drm_intel_bo *
389gpuX_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
390{
391 return tile_bo(gpu_create_bo(bufmgr, width, height), width);
392}
393
Daniel Vetter43779e32013-08-14 14:50:50 +0200394static void
395cpu_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
396{
397 int size = width * height;
398 uint32_t *vaddr;
399
400 do_or_die(drm_intel_bo_map(bo, true));
401 vaddr = bo->virtual;
402 while (size--)
403 *vaddr++ = val;
404 drm_intel_bo_unmap(bo);
405}
406
407static void
Chris Wilsonc12f2922014-08-31 16:14:40 +0100408cpu_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
Daniel Vetter43779e32013-08-14 14:50:50 +0200409{
410 int size = width * height;
411 uint32_t *vaddr;
412
413 do_or_die(drm_intel_bo_map(bo, false));
414 vaddr = bo->virtual;
415 while (size--)
Chris Wilson6c428a62014-08-29 13:11:37 +0100416 igt_assert_eq_u32(*vaddr++, val);
Daniel Vetter43779e32013-08-14 14:50:50 +0200417 drm_intel_bo_unmap(bo);
418}
419
Chris Wilson6c428a62014-08-29 13:11:37 +0100420static void
421gpu_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
422{
423 struct drm_i915_gem_relocation_entry reloc[1];
424 struct drm_i915_gem_exec_object2 gem_exec[2];
425 struct drm_i915_gem_execbuffer2 execbuf;
426 struct drm_i915_gem_pwrite gem_pwrite;
427 struct drm_i915_gem_create create;
428 uint32_t buf[10], *b;
Chris Wilson86055df2014-08-29 17:36:29 +0100429 uint32_t tiling, swizzle;
430
431 drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
Chris Wilson6c428a62014-08-29 13:11:37 +0100432
433 memset(reloc, 0, sizeof(reloc));
434 memset(gem_exec, 0, sizeof(gem_exec));
435 memset(&execbuf, 0, sizeof(execbuf));
436
437 b = buf;
438 *b++ = XY_COLOR_BLT_CMD_NOLEN |
439 ((gen >= 8) ? 5 : 4) |
440 COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB;
Chris Wilson86055df2014-08-29 17:36:29 +0100441 if (gen >= 4 && tiling) {
442 b[-1] |= XY_COLOR_BLT_TILED;
443 *b = width;
444 } else
445 *b = width << 2;
446 *b++ |= 0xf0 << 16 | 1 << 25 | 1 << 24;
Chris Wilson6c428a62014-08-29 13:11:37 +0100447 *b++ = 0;
448 *b++ = height << 16 | width;
449 reloc[0].offset = (b - buf) * sizeof(uint32_t);
450 reloc[0].target_handle = bo->handle;
451 reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
452 reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
453 *b++ = 0;
454 if (gen >= 8)
455 *b++ = 0;
456 *b++ = val;
457 *b++ = MI_BATCH_BUFFER_END;
458 if ((b - buf) & 1)
459 *b++ = 0;
460
461 gem_exec[0].handle = bo->handle;
462 gem_exec[0].flags = EXEC_OBJECT_NEEDS_FENCE;
463
464 create.handle = 0;
465 create.size = 4096;
466 drmIoctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
467 gem_exec[1].handle = create.handle;
468 gem_exec[1].relocation_count = 1;
469 gem_exec[1].relocs_ptr = (uintptr_t)reloc;
470
471 execbuf.buffers_ptr = (uintptr_t)gem_exec;
472 execbuf.buffer_count = 2;
473 execbuf.batch_len = (b - buf) * sizeof(buf[0]);
Chris Wilson86055df2014-08-29 17:36:29 +0100474 if (gen >= 6)
475 execbuf.flags = I915_EXEC_BLT;
Chris Wilson6c428a62014-08-29 13:11:37 +0100476
477 gem_pwrite.handle = gem_exec[1].handle;
478 gem_pwrite.offset = 0;
479 gem_pwrite.size = execbuf.batch_len;
480 gem_pwrite.data_ptr = (uintptr_t)buf;
Daniel Stonede7ccdd2015-10-01 14:16:48 +0100481 do_ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite);
482 do_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
Chris Wilson6c428a62014-08-29 13:11:37 +0100483
484 drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &create.handle);
485}
486
487static void
Chris Wilsonc12f2922014-08-31 16:14:40 +0100488gpu_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
Chris Wilson6c428a62014-08-29 13:11:37 +0100489{
Chris Wilson99b5ee82015-01-22 10:03:45 +0000490 intel_blt_copy(batch,
491 bo, 0, 0, 4*width,
492 tmp, 0, 0, 4*width,
493 width, height, 32);
Chris Wilsonc12f2922014-08-31 16:14:40 +0100494 cpu_cmp_bo(tmp, val, width, height, NULL);
Chris Wilson6c428a62014-08-29 13:11:37 +0100495}
496
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530497const struct access_mode {
498 const char *name;
Chris Wilson42291f22016-01-07 11:19:26 +0000499 bool (*require)(void);
Daniel Vetter43779e32013-08-14 14:50:50 +0200500 void (*set_bo)(drm_intel_bo *bo, uint32_t val, int w, int h);
Chris Wilsonc12f2922014-08-31 16:14:40 +0100501 void (*cmp_bo)(drm_intel_bo *bo, uint32_t val, int w, int h, drm_intel_bo *tmp);
Chris Wilson86055df2014-08-29 17:36:29 +0100502 drm_intel_bo *(*create_bo)(drm_intel_bufmgr *bufmgr, int width, int height);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530503 void (*release_bo)(drm_intel_bo *bo);
504} access_modes[] = {
505 {
506 .name = "prw",
507 .set_bo = prw_set_bo,
508 .cmp_bo = prw_cmp_bo,
509 .create_bo = unmapped_create_bo,
510 .release_bo = nop_release_bo,
511 },
512 {
Chris Wilson1c61c0f2016-01-08 10:51:09 +0000513 .name = "partial",
514 .set_bo = partial_set_bo,
515 .cmp_bo = partial_cmp_bo,
516 .create_bo = unmapped_create_bo,
517 .release_bo = nop_release_bo,
518 },
519 {
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530520 .name = "cpu",
Chris Wilson1d6e5d32016-01-03 13:44:17 +0000521 .require = create_cpu_require,
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530522 .set_bo = cpu_set_bo,
523 .cmp_bo = cpu_cmp_bo,
524 .create_bo = unmapped_create_bo,
525 .release_bo = nop_release_bo,
526 },
527 {
Chris Wilson46ec33e2015-10-20 14:40:50 +0100528 .name = "snoop",
Chris Wilson0143d4f2016-01-21 09:53:50 +0000529 .require = create_snoop_require,
Chris Wilson46ec33e2015-10-20 14:40:50 +0100530 .set_bo = cpu_set_bo,
531 .cmp_bo = cpu_cmp_bo,
532 .create_bo = snoop_create_bo,
533 .release_bo = nop_release_bo,
534 },
535 {
Chris Wilson0143d4f2016-01-21 09:53:50 +0000536 .name = "userptr",
537 .require = create_userptr_require,
538 .set_bo = userptr_set_bo,
539 .cmp_bo = userptr_cmp_bo,
540 .create_bo = userptr_create_bo,
541 .release_bo = userptr_release_bo,
542 },
543 {
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530544 .name = "gtt",
545 .set_bo = gtt_set_bo,
546 .cmp_bo = gtt_cmp_bo,
547 .create_bo = gtt_create_bo,
548 .release_bo = nop_release_bo,
549 },
550 {
551 .name = "gttX",
552 .set_bo = gtt_set_bo,
553 .cmp_bo = gtt_cmp_bo,
554 .create_bo = gttX_create_bo,
555 .release_bo = nop_release_bo,
556 },
557 {
558 .name = "wc",
559 .set_bo = gtt_set_bo,
560 .cmp_bo = gtt_cmp_bo,
561 .create_bo = wc_create_bo,
562 .release_bo = wc_release_bo,
563 },
564 {
565 .name = "gpu",
566 .set_bo = gpu_set_bo,
567 .cmp_bo = gpu_cmp_bo,
568 .create_bo = gpu_create_bo,
569 .release_bo = nop_release_bo,
570 },
571 {
572 .name = "gpuX",
573 .set_bo = gpu_set_bo,
574 .cmp_bo = gpu_cmp_bo,
575 .create_bo = gpuX_create_bo,
576 .release_bo = nop_release_bo,
577 },
Daniel Vetter43779e32013-08-14 14:50:50 +0200578};
579
Chris Wilson2d08e9e2015-12-11 09:25:03 +0000580int num_buffers;
Chris Wilson86055df2014-08-29 17:36:29 +0100581const int width = 512, height = 512;
Chris Wilson59c55622014-08-29 13:11:37 +0100582igt_render_copyfunc_t rendercopy;
583
Chris Wilson99b5ee82015-01-22 10:03:45 +0000584struct buffers {
585 const struct access_mode *mode;
586 drm_intel_bufmgr *bufmgr;
Chris Wilson2d08e9e2015-12-11 09:25:03 +0000587 drm_intel_bo **src, **dst;
Chris Wilson35b0ac92015-03-16 11:55:46 +0000588 drm_intel_bo *dummy, *spare;
Chris Wilson99b5ee82015-01-22 10:03:45 +0000589 int count;
590};
591
592static void *buffers_init(struct buffers *data,
593 const struct access_mode *mode,
594 int _fd)
595{
596 data->mode = mode;
597 data->count = 0;
598
599 data->bufmgr = drm_intel_bufmgr_gem_init(_fd, 4096);
600 igt_assert(data->bufmgr);
601
Chris Wilson2d08e9e2015-12-11 09:25:03 +0000602 data->src = malloc(2*sizeof(drm_intel_bo *)*num_buffers);
603 igt_assert(data->src);
604 data->dst = data->src + num_buffers;
605
Chris Wilson99b5ee82015-01-22 10:03:45 +0000606 drm_intel_bufmgr_gem_enable_reuse(data->bufmgr);
607 return intel_batchbuffer_alloc(data->bufmgr, devid);
608}
609
610static void buffers_destroy(struct buffers *data)
611{
612 if (data->count == 0)
613 return;
614
615 for (int i = 0; i < data->count; i++) {
616 data->mode->release_bo(data->src[i]);
617 data->mode->release_bo(data->dst[i]);
618 }
619 data->mode->release_bo(data->dummy);
Chris Wilson35b0ac92015-03-16 11:55:46 +0000620 data->mode->release_bo(data->spare);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000621 data->count = 0;
622}
623
624static void buffers_create(struct buffers *data,
625 int count)
626{
627 igt_assert(data->bufmgr);
628
629 buffers_destroy(data);
630
631 for (int i = 0; i < count; i++) {
632 data->src[i] =
633 data->mode->create_bo(data->bufmgr, width, height);
634 data->dst[i] =
635 data->mode->create_bo(data->bufmgr, width, height);
636 }
637 data->dummy = data->mode->create_bo(data->bufmgr, width, height);
Chris Wilson35b0ac92015-03-16 11:55:46 +0000638 data->spare = data->mode->create_bo(data->bufmgr, width, height);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000639 data->count = count;
640}
641
642static void buffers_fini(struct buffers *data)
643{
644 if (data->bufmgr == NULL)
645 return;
646
647 buffers_destroy(data);
648
Chris Wilson2d08e9e2015-12-11 09:25:03 +0000649 free(data->src);
650 data->src = NULL;
651 data->dst = NULL;
652
Chris Wilson99b5ee82015-01-22 10:03:45 +0000653 intel_batchbuffer_free(batch);
654 drm_intel_bufmgr_destroy(data->bufmgr);
655 data->bufmgr = NULL;
656}
657
Chris Wilson59c55622014-08-29 13:11:37 +0100658typedef void (*do_copy)(drm_intel_bo *dst, drm_intel_bo *src);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100659typedef struct igt_hang_ring (*do_hang)(void);
Chris Wilson59c55622014-08-29 13:11:37 +0100660
661static void render_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
662{
663 struct igt_buf d = {
664 .bo = dst,
665 .size = width * height * 4,
Chris Wilson59c55622014-08-29 13:11:37 +0100666 .num_tiles = width * height * 4,
667 .stride = width * 4,
668 }, s = {
669 .bo = src,
670 .size = width * height * 4,
Chris Wilson59c55622014-08-29 13:11:37 +0100671 .num_tiles = width * height * 4,
672 .stride = width * 4,
673 };
Chris Wilson86055df2014-08-29 17:36:29 +0100674 uint32_t swizzle;
675
676 drm_intel_bo_get_tiling(dst, &d.tiling, &swizzle);
677 drm_intel_bo_get_tiling(src, &s.tiling, &swizzle);
678
Chris Wilson59c55622014-08-29 13:11:37 +0100679 rendercopy(batch, NULL,
680 &s, 0, 0,
681 width, height,
682 &d, 0, 0);
683}
684
685static void blt_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
686{
Chris Wilson86055df2014-08-29 17:36:29 +0100687 intel_blt_copy(batch,
688 src, 0, 0, 4*width,
689 dst, 0, 0, 4*width,
690 width, height, 32);
Chris Wilson59c55622014-08-29 13:11:37 +0100691}
Daniel Vetter5a598c92013-08-14 15:08:05 +0200692
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530693static void cpu_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
694{
695 const int size = width * height * sizeof(uint32_t);
696 void *d, *s;
697
698 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_CPU, 0);
699 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
Ville Syrjäläf52e7ec2015-10-09 19:11:39 +0300700 s = gem_mmap__cpu(fd, src->handle, 0, size, PROT_READ);
701 d = gem_mmap__cpu(fd, dst->handle, 0, size, PROT_WRITE);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530702
703 memcpy(d, s, size);
704
705 munmap(d, size);
706 munmap(s, size);
707}
708
709static void gtt_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
710{
711 const int size = width * height * sizeof(uint32_t);
712 void *d, *s;
713
714 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0);
715 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
716
Ville Syrjäläf52e7ec2015-10-09 19:11:39 +0300717 s = gem_mmap__gtt(fd, src->handle, size, PROT_READ);
718 d = gem_mmap__gtt(fd, dst->handle, size, PROT_WRITE);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530719
720 memcpy(d, s, size);
721
722 munmap(d, size);
723 munmap(s, size);
724}
725
726static void wc_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
727{
728 const int size = width * height * sizeof(uint32_t);
729 void *d, *s;
730
731 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0);
732 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
733
Ville Syrjäläf52e7ec2015-10-09 19:11:39 +0300734 s = gem_mmap__wc(fd, src->handle, 0, size, PROT_READ);
735 d = gem_mmap__wc(fd, dst->handle, 0, size, PROT_WRITE);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530736
737 memcpy(d, s, size);
738
739 munmap(d, size);
740 munmap(s, size);
741}
742
Chris Wilson16bafdf2014-09-04 09:26:24 +0100743static struct igt_hang_ring no_hang(void)
744{
745 return (struct igt_hang_ring){0, 0};
746}
747
748static struct igt_hang_ring bcs_hang(void)
749{
Daniel Vetter3cd45de2015-02-10 17:46:43 +0100750 return igt_hang_ring(fd, I915_EXEC_BLT);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100751}
752
753static struct igt_hang_ring rcs_hang(void)
754{
Daniel Vetter3cd45de2015-02-10 17:46:43 +0100755 return igt_hang_ring(fd, I915_EXEC_RENDER);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100756}
757
Chris Wilson8bf09f32015-12-17 09:16:42 +0000758static void do_basic0(struct buffers *buffers,
759 do_copy do_copy_func,
760 do_hang do_hang_func)
761{
762 gem_quiescent_gpu(fd);
763
764 buffers->mode->set_bo(buffers->src[0], 0xdeadbeef, width, height);
765 for (int i = 0; i < buffers->count; i++) {
766 struct igt_hang_ring hang = do_hang_func();
767
768 do_copy_func(buffers->dst[i], buffers->src[0]);
769 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef, width, height, buffers->dummy);
770
771 igt_post_hang_ring(fd, hang);
772 }
773}
774
775static void do_basic1(struct buffers *buffers,
776 do_copy do_copy_func,
777 do_hang do_hang_func)
Chris Wilson197db862015-12-09 20:54:10 +0000778{
779 gem_quiescent_gpu(fd);
780
781 for (int i = 0; i < buffers->count; i++) {
782 struct igt_hang_ring hang = do_hang_func();
783
784 buffers->mode->set_bo(buffers->src[i], i, width, height);
785 buffers->mode->set_bo(buffers->dst[i], ~i, width, height);
Chris Wilson8bf09f32015-12-17 09:16:42 +0000786
Chris Wilson197db862015-12-09 20:54:10 +0000787 do_copy_func(buffers->dst[i], buffers->src[i]);
Chris Wilson8bf09f32015-12-17 09:16:42 +0000788 usleep(0); /* let someone else claim the mutex */
Chris Wilson197db862015-12-09 20:54:10 +0000789 buffers->mode->cmp_bo(buffers->dst[i], i, width, height, buffers->dummy);
790
791 igt_post_hang_ring(fd, hang);
792 }
793}
794
Chris Wilson8bf09f32015-12-17 09:16:42 +0000795static void do_basicN(struct buffers *buffers,
796 do_copy do_copy_func,
797 do_hang do_hang_func)
798{
799 struct igt_hang_ring hang;
800
801 gem_quiescent_gpu(fd);
802
803 for (int i = 0; i < buffers->count; i++) {
804 buffers->mode->set_bo(buffers->src[i], i, width, height);
805 buffers->mode->set_bo(buffers->dst[i], ~i, width, height);
806 }
807
808 hang = do_hang_func();
809
810 for (int i = 0; i < buffers->count; i++) {
811 do_copy_func(buffers->dst[i], buffers->src[i]);
812 usleep(0); /* let someone else claim the mutex */
813 }
814
815 for (int i = 0; i < buffers->count; i++)
816 buffers->mode->cmp_bo(buffers->dst[i], i, width, height, buffers->dummy);
817
818 igt_post_hang_ring(fd, hang);
819}
820
Chris Wilson99b5ee82015-01-22 10:03:45 +0000821static void do_overwrite_source(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100822 do_copy do_copy_func,
823 do_hang do_hang_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +0200824{
Chris Wilson16bafdf2014-09-04 09:26:24 +0100825 struct igt_hang_ring hang;
Daniel Vetter5a598c92013-08-14 15:08:05 +0200826 int i;
827
828 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000829 for (i = 0; i < buffers->count; i++) {
830 buffers->mode->set_bo(buffers->src[i], i, width, height);
831 buffers->mode->set_bo(buffers->dst[i], ~i, width, height);
Daniel Vetter5a598c92013-08-14 15:08:05 +0200832 }
Chris Wilson99b5ee82015-01-22 10:03:45 +0000833 for (i = 0; i < buffers->count; i++)
834 do_copy_func(buffers->dst[i], buffers->src[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100835 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000836 for (i = buffers->count; i--; )
837 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef, width, height);
838 for (i = 0; i < buffers->count; i++)
839 buffers->mode->cmp_bo(buffers->dst[i], i, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100840 igt_post_hang_ring(fd, hang);
841}
842
Chris Wilsona1726762015-03-16 16:29:57 +0000843static void do_overwrite_source_read(struct buffers *buffers,
844 do_copy do_copy_func,
845 do_hang do_hang_func,
846 int do_rcs)
847{
848 const int half = buffers->count/2;
849 struct igt_hang_ring hang;
850 int i;
851
852 gem_quiescent_gpu(fd);
853 for (i = 0; i < half; i++) {
854 buffers->mode->set_bo(buffers->src[i], i, width, height);
855 buffers->mode->set_bo(buffers->dst[i], ~i, width, height);
856 buffers->mode->set_bo(buffers->dst[i+half], ~i, width, height);
857 }
858 for (i = 0; i < half; i++) {
859 do_copy_func(buffers->dst[i], buffers->src[i]);
860 if (do_rcs)
861 render_copy_bo(buffers->dst[i+half], buffers->src[i]);
862 else
863 blt_copy_bo(buffers->dst[i+half], buffers->src[i]);
864 }
865 hang = do_hang_func();
866 for (i = half; i--; )
867 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef, width, height);
868 for (i = 0; i < half; i++) {
869 buffers->mode->cmp_bo(buffers->dst[i], i, width, height, buffers->dummy);
870 buffers->mode->cmp_bo(buffers->dst[i+half], i, width, height, buffers->dummy);
871 }
872 igt_post_hang_ring(fd, hang);
873}
874
875static void do_overwrite_source_read_bcs(struct buffers *buffers,
876 do_copy do_copy_func,
877 do_hang do_hang_func)
878{
879 do_overwrite_source_read(buffers, do_copy_func, do_hang_func, 0);
880}
881
882static void do_overwrite_source_read_rcs(struct buffers *buffers,
883 do_copy do_copy_func,
884 do_hang do_hang_func)
885{
886 do_overwrite_source_read(buffers, do_copy_func, do_hang_func, 1);
887}
888
Chris Wilson99b5ee82015-01-22 10:03:45 +0000889static void do_overwrite_source__rev(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100890 do_copy do_copy_func,
891 do_hang do_hang_func)
892{
893 struct igt_hang_ring hang;
894 int i;
895
896 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000897 for (i = 0; i < buffers->count; i++) {
898 buffers->mode->set_bo(buffers->src[i], i, width, height);
899 buffers->mode->set_bo(buffers->dst[i], ~i, width, height);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100900 }
Chris Wilson99b5ee82015-01-22 10:03:45 +0000901 for (i = 0; i < buffers->count; i++)
902 do_copy_func(buffers->dst[i], buffers->src[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100903 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000904 for (i = 0; i < buffers->count; i++)
905 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef, width, height);
906 for (i = buffers->count; i--; )
907 buffers->mode->cmp_bo(buffers->dst[i], i, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100908 igt_post_hang_ring(fd, hang);
909}
910
Chris Wilson99b5ee82015-01-22 10:03:45 +0000911static void do_overwrite_source__one(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100912 do_copy do_copy_func,
913 do_hang do_hang_func)
914{
915 struct igt_hang_ring hang;
916
917 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000918 buffers->mode->set_bo(buffers->src[0], 0, width, height);
919 buffers->mode->set_bo(buffers->dst[0], ~0, width, height);
920 do_copy_func(buffers->dst[0], buffers->src[0]);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100921 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000922 buffers->mode->set_bo(buffers->src[0], 0xdeadbeef, width, height);
923 buffers->mode->cmp_bo(buffers->dst[0], 0, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100924 igt_post_hang_ring(fd, hang);
Daniel Vetter5a598c92013-08-14 15:08:05 +0200925}
926
Chris Wilsona72d4052015-03-18 14:15:22 +0000927static void do_intermix(struct buffers *buffers,
928 do_copy do_copy_func,
929 do_hang do_hang_func,
930 int do_rcs)
931{
932 const int half = buffers->count/2;
933 struct igt_hang_ring hang;
934 int i;
935
936 gem_quiescent_gpu(fd);
937 for (i = 0; i < buffers->count; i++) {
938 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef^~i, width, height);
939 buffers->mode->set_bo(buffers->dst[i], i, width, height);
940 }
941 for (i = 0; i < half; i++) {
942 if (do_rcs == 1 || (do_rcs == -1 && i & 1))
943 render_copy_bo(buffers->dst[i], buffers->src[i]);
944 else
945 blt_copy_bo(buffers->dst[i], buffers->src[i]);
946
947 do_copy_func(buffers->dst[i+half], buffers->src[i]);
948
949 if (do_rcs == 1 || (do_rcs == -1 && (i & 1) == 0))
950 render_copy_bo(buffers->dst[i], buffers->dst[i+half]);
951 else
952 blt_copy_bo(buffers->dst[i], buffers->dst[i+half]);
953
954 do_copy_func(buffers->dst[i+half], buffers->src[i+half]);
955 }
956 hang = do_hang_func();
957 for (i = 0; i < 2*half; i++)
958 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef^~i, width, height, buffers->dummy);
959 igt_post_hang_ring(fd, hang);
960}
961
962static void do_intermix_rcs(struct buffers *buffers,
963 do_copy do_copy_func,
964 do_hang do_hang_func)
965{
966 do_intermix(buffers, do_copy_func, do_hang_func, 1);
967}
968
969static void do_intermix_bcs(struct buffers *buffers,
970 do_copy do_copy_func,
971 do_hang do_hang_func)
972{
973 do_intermix(buffers, do_copy_func, do_hang_func, 0);
974}
975
976static void do_intermix_both(struct buffers *buffers,
977 do_copy do_copy_func,
978 do_hang do_hang_func)
979{
980 do_intermix(buffers, do_copy_func, do_hang_func, -1);
981}
982
Chris Wilson99b5ee82015-01-22 10:03:45 +0000983static void do_early_read(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100984 do_copy do_copy_func,
985 do_hang do_hang_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +0200986{
Chris Wilson16bafdf2014-09-04 09:26:24 +0100987 struct igt_hang_ring hang;
Daniel Vetter5a598c92013-08-14 15:08:05 +0200988 int i;
989
990 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000991 for (i = buffers->count; i--; )
992 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef, width, height);
993 for (i = 0; i < buffers->count; i++)
994 do_copy_func(buffers->dst[i], buffers->src[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100995 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000996 for (i = buffers->count; i--; )
997 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100998 igt_post_hang_ring(fd, hang);
Daniel Vetter5a598c92013-08-14 15:08:05 +0200999}
1000
Chris Wilson35b0ac92015-03-16 11:55:46 +00001001static void do_read_read_bcs(struct buffers *buffers,
1002 do_copy do_copy_func,
1003 do_hang do_hang_func)
1004{
1005 struct igt_hang_ring hang;
1006 int i;
1007
1008 gem_quiescent_gpu(fd);
1009 for (i = buffers->count; i--; )
1010 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef ^ i, width, height);
1011 for (i = 0; i < buffers->count; i++) {
1012 do_copy_func(buffers->dst[i], buffers->src[i]);
1013 blt_copy_bo(buffers->spare, buffers->src[i]);
1014 }
Chris Wilson1c61c0f2016-01-08 10:51:09 +00001015 buffers->mode->cmp_bo(buffers->spare, 0xdeadbeef^(buffers->count-1), width, height, buffers->dummy);
Chris Wilson35b0ac92015-03-16 11:55:46 +00001016 hang = do_hang_func();
1017 for (i = buffers->count; i--; )
1018 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef ^ i, width, height, buffers->dummy);
1019 igt_post_hang_ring(fd, hang);
1020}
1021
Chris Wilson0c266522015-11-11 16:37:16 +00001022static void do_write_read_bcs(struct buffers *buffers,
1023 do_copy do_copy_func,
1024 do_hang do_hang_func)
1025{
1026 struct igt_hang_ring hang;
1027 int i;
1028
1029 gem_quiescent_gpu(fd);
1030 for (i = buffers->count; i--; )
1031 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef ^ i, width, height);
1032 for (i = 0; i < buffers->count; i++) {
1033 blt_copy_bo(buffers->spare, buffers->src[i]);
1034 do_copy_func(buffers->dst[i], buffers->spare);
1035 }
1036 hang = do_hang_func();
1037 for (i = buffers->count; i--; )
1038 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef ^ i, width, height, buffers->dummy);
1039 igt_post_hang_ring(fd, hang);
1040}
1041
Chris Wilson35b0ac92015-03-16 11:55:46 +00001042static void do_read_read_rcs(struct buffers *buffers,
1043 do_copy do_copy_func,
1044 do_hang do_hang_func)
1045{
1046 struct igt_hang_ring hang;
1047 int i;
1048
1049 gem_quiescent_gpu(fd);
1050 for (i = buffers->count; i--; )
1051 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef ^ i, width, height);
1052 for (i = 0; i < buffers->count; i++) {
1053 do_copy_func(buffers->dst[i], buffers->src[i]);
1054 render_copy_bo(buffers->spare, buffers->src[i]);
1055 }
Chris Wilson1c61c0f2016-01-08 10:51:09 +00001056 buffers->mode->cmp_bo(buffers->spare, 0xdeadbeef^(buffers->count-1), width, height, buffers->dummy);
Chris Wilson35b0ac92015-03-16 11:55:46 +00001057 hang = do_hang_func();
1058 for (i = buffers->count; i--; )
1059 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef ^ i, width, height, buffers->dummy);
1060 igt_post_hang_ring(fd, hang);
1061}
1062
Chris Wilson0c266522015-11-11 16:37:16 +00001063static void do_write_read_rcs(struct buffers *buffers,
1064 do_copy do_copy_func,
1065 do_hang do_hang_func)
1066{
1067 struct igt_hang_ring hang;
1068 int i;
1069
1070 gem_quiescent_gpu(fd);
1071 for (i = buffers->count; i--; )
1072 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef ^ i, width, height);
1073 for (i = 0; i < buffers->count; i++) {
1074 render_copy_bo(buffers->spare, buffers->src[i]);
1075 do_copy_func(buffers->dst[i], buffers->spare);
1076 }
1077 hang = do_hang_func();
1078 for (i = buffers->count; i--; )
1079 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef ^ i, width, height, buffers->dummy);
1080 igt_post_hang_ring(fd, hang);
1081}
1082
Chris Wilson99b5ee82015-01-22 10:03:45 +00001083static void do_gpu_read_after_write(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001084 do_copy do_copy_func,
1085 do_hang do_hang_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +02001086{
Chris Wilson16bafdf2014-09-04 09:26:24 +01001087 struct igt_hang_ring hang;
Daniel Vetter5a598c92013-08-14 15:08:05 +02001088 int i;
1089
1090 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +00001091 for (i = buffers->count; i--; )
1092 buffers->mode->set_bo(buffers->src[i], 0xabcdabcd, width, height);
1093 for (i = 0; i < buffers->count; i++)
1094 do_copy_func(buffers->dst[i], buffers->src[i]);
1095 for (i = buffers->count; i--; )
1096 do_copy_func(buffers->dummy, buffers->dst[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001097 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001098 for (i = buffers->count; i--; )
1099 buffers->mode->cmp_bo(buffers->dst[i], 0xabcdabcd, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001100 igt_post_hang_ring(fd, hang);
Daniel Vetter5a598c92013-08-14 15:08:05 +02001101}
1102
Chris Wilson99b5ee82015-01-22 10:03:45 +00001103typedef void (*do_test)(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001104 do_copy do_copy_func,
1105 do_hang do_hang_func);
Daniel Vetterec283d62013-08-14 15:18:37 +02001106
Chris Wilson99b5ee82015-01-22 10:03:45 +00001107typedef void (*run_wrap)(struct buffers *buffers,
Chris Wilson59c55622014-08-29 13:11:37 +01001108 do_test do_test_func,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001109 do_copy do_copy_func,
1110 do_hang do_hang_func);
Daniel Vetterec283d62013-08-14 15:18:37 +02001111
Chris Wilson99b5ee82015-01-22 10:03:45 +00001112static void run_single(struct buffers *buffers,
Chris Wilson59c55622014-08-29 13:11:37 +01001113 do_test do_test_func,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001114 do_copy do_copy_func,
1115 do_hang do_hang_func)
Daniel Vetterec283d62013-08-14 15:18:37 +02001116{
Chris Wilson99b5ee82015-01-22 10:03:45 +00001117 do_test_func(buffers, do_copy_func, do_hang_func);
Chris Wilson5b675f72016-01-22 17:33:40 +00001118 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
Daniel Vetterec283d62013-08-14 15:18:37 +02001119}
1120
Chris Wilson99b5ee82015-01-22 10:03:45 +00001121static void run_interruptible(struct buffers *buffers,
Chris Wilson59c55622014-08-29 13:11:37 +01001122 do_test do_test_func,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001123 do_copy do_copy_func,
1124 do_hang do_hang_func)
Daniel Vetterec283d62013-08-14 15:18:37 +02001125{
Chris Wilson1c61c0f2016-01-08 10:51:09 +00001126 for (pass = 0; pass < 10; pass++)
Chris Wilson99b5ee82015-01-22 10:03:45 +00001127 do_test_func(buffers, do_copy_func, do_hang_func);
Chris Wilson1c61c0f2016-01-08 10:51:09 +00001128 pass = 0;
Chris Wilson5b675f72016-01-22 17:33:40 +00001129 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
Daniel Vetterec283d62013-08-14 15:18:37 +02001130}
1131
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001132static void __run_forked(struct buffers *buffers,
1133 int num_children, int loops,
1134 do_test do_test_func,
1135 do_copy do_copy_func,
1136 do_hang do_hang_func)
1137
Daniel Vetterec283d62013-08-14 15:18:37 +02001138{
Chris Wilson1ca607b2013-08-16 09:44:13 +01001139 const int old_num_buffers = num_buffers;
Daniel Vetterec283d62013-08-14 15:18:37 +02001140
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001141 num_buffers /= num_children;
Chris Wilson571b8762016-01-08 11:51:56 +00001142 num_buffers += MIN_BUFFERS;
Chris Wilson1ca607b2013-08-16 09:44:13 +01001143
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001144 igt_fork(child, num_children) {
Daniel Vettercd1f2202013-08-29 10:06:51 +02001145 /* recreate process local variables */
Chris Wilson99b5ee82015-01-22 10:03:45 +00001146 buffers->count = 0;
Micah Fedkec81d2932015-07-22 21:54:02 +00001147 fd = drm_open_driver(DRIVER_INTEL);
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301148
Chris Wilson99b5ee82015-01-22 10:03:45 +00001149 batch = buffers_init(buffers, buffers->mode, fd);
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301150
Chris Wilson99b5ee82015-01-22 10:03:45 +00001151 buffers_create(buffers, num_buffers);
Chris Wilson1c61c0f2016-01-08 10:51:09 +00001152 for (pass = 0; pass < loops; pass++)
Chris Wilson99b5ee82015-01-22 10:03:45 +00001153 do_test_func(buffers, do_copy_func, do_hang_func);
Chris Wilson1c61c0f2016-01-08 10:51:09 +00001154 pass = 0;
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301155
Chris Wilson99b5ee82015-01-22 10:03:45 +00001156 buffers_fini(buffers);
Daniel Vetterec283d62013-08-14 15:18:37 +02001157 }
Daniel Vettercd1f2202013-08-29 10:06:51 +02001158
1159 igt_waitchildren();
Chris Wilson5b675f72016-01-22 17:33:40 +00001160 igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
Chris Wilson1ca607b2013-08-16 09:44:13 +01001161
Chris Wilson1ca607b2013-08-16 09:44:13 +01001162 num_buffers = old_num_buffers;
Daniel Vetterec283d62013-08-14 15:18:37 +02001163}
Daniel Vetter5a598c92013-08-14 15:08:05 +02001164
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001165static void run_forked(struct buffers *buffers,
1166 do_test do_test_func,
1167 do_copy do_copy_func,
1168 do_hang do_hang_func)
1169{
1170 __run_forked(buffers, sysconf(_SC_NPROCESSORS_ONLN), 10,
1171 do_test_func, do_copy_func, do_hang_func);
1172}
1173
1174static void run_bomb(struct buffers *buffers,
1175 do_test do_test_func,
1176 do_copy do_copy_func,
1177 do_hang do_hang_func)
1178{
1179 __run_forked(buffers, 8*sysconf(_SC_NPROCESSORS_ONLN), 10,
1180 do_test_func, do_copy_func, do_hang_func);
1181}
1182
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301183static void bit17_require(void)
1184{
1185 struct drm_i915_gem_get_tiling2 {
1186 uint32_t handle;
1187 uint32_t tiling_mode;
1188 uint32_t swizzle_mode;
1189 uint32_t phys_swizzle_mode;
1190 } arg;
1191#define DRM_IOCTL_I915_GEM_GET_TILING2 DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling2)
1192
1193 memset(&arg, 0, sizeof(arg));
1194 arg.handle = gem_create(fd, 4096);
1195 gem_set_tiling(fd, arg.handle, I915_TILING_X, 512);
1196
Daniel Stonede7ccdd2015-10-01 14:16:48 +01001197 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING2, &arg);
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301198 gem_close(fd, arg.handle);
1199 igt_require(arg.phys_swizzle_mode == arg.swizzle_mode);
1200}
1201
1202static void cpu_require(void)
1203{
1204 bit17_require();
1205}
1206
1207static void gtt_require(void)
1208{
1209}
1210
1211static void wc_require(void)
1212{
1213 bit17_require();
Daniel Vettera3e34ce2015-02-06 11:05:28 +01001214 gem_require_mmap_wc(fd);
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301215}
1216
Chris Wilson08188752014-09-03 13:38:30 +01001217static void bcs_require(void)
1218{
1219}
1220
1221static void rcs_require(void)
1222{
1223 igt_require(rendercopy);
1224}
1225
Daniel Vetter5a598c92013-08-14 15:08:05 +02001226static void
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001227run_basic_modes(const char *prefix,
1228 const struct access_mode *mode,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001229 const char *suffix,
Daniel Vetterec283d62013-08-14 15:18:37 +02001230 run_wrap run_wrap_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +02001231{
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301232 const struct {
Chris Wilson59c55622014-08-29 13:11:37 +01001233 const char *prefix;
1234 do_copy copy;
Chris Wilson08188752014-09-03 13:38:30 +01001235 void (*require)(void);
Chris Wilson59c55622014-08-29 13:11:37 +01001236 } pipelines[] = {
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301237 { "cpu", cpu_copy_bo, cpu_require },
1238 { "gtt", gtt_copy_bo, gtt_require },
1239 { "wc", wc_copy_bo, wc_require },
Daniel Vetter3e9b4e32015-02-06 23:10:26 +01001240 { "blt", blt_copy_bo, bcs_require },
1241 { "render", render_copy_bo, rcs_require },
Chris Wilson59c55622014-08-29 13:11:37 +01001242 { NULL, NULL }
Chris Wilson77633492015-03-26 08:11:43 +00001243 }, *pskip = pipelines + 3, *p;
Chris Wilson16bafdf2014-09-04 09:26:24 +01001244 const struct {
1245 const char *suffix;
1246 do_hang hang;
Chris Wilson16bafdf2014-09-04 09:26:24 +01001247 } hangs[] = {
Chris Wilson92caf132015-12-16 09:23:56 +00001248 { "", no_hang },
1249 { "-hang-blt", bcs_hang },
1250 { "-hang-render", rcs_hang },
Chris Wilson16bafdf2014-09-04 09:26:24 +01001251 { NULL, NULL },
1252 }, *h;
Chris Wilson99b5ee82015-01-22 10:03:45 +00001253 struct buffers buffers;
Daniel Vetter5a598c92013-08-14 15:08:05 +02001254
Chris Wilson16bafdf2014-09-04 09:26:24 +01001255 for (h = hangs; h->suffix; h++) {
Chris Wilson77633492015-03-26 08:11:43 +00001256 if (!all && *h->suffix)
1257 continue;
1258
1259 for (p = all ? pipelines : pskip; p->prefix; p++) {
Chris Wilson16bafdf2014-09-04 09:26:24 +01001260 igt_fixture {
Chris Wilson99b5ee82015-01-22 10:03:45 +00001261 batch = buffers_init(&buffers, mode, fd);
Daniel Vetter60115082015-01-22 10:01:28 +01001262 }
Chris Wilson16bafdf2014-09-04 09:26:24 +01001263
Chris Wilson8bf09f32015-12-17 09:16:42 +00001264 igt_subtest_f("%s-%s-%s-sanitycheck0%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson197db862015-12-09 20:54:10 +00001265 p->require();
1266 buffers_create(&buffers, num_buffers);
Chris Wilson8bf09f32015-12-17 09:16:42 +00001267 run_wrap_func(&buffers, do_basic0,
1268 p->copy, h->hang);
1269 }
1270
1271 igt_subtest_f("%s-%s-%s-sanitycheck1%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson8bf09f32015-12-17 09:16:42 +00001272 p->require();
1273 buffers_create(&buffers, num_buffers);
1274 run_wrap_func(&buffers, do_basic1,
1275 p->copy, h->hang);
1276 }
1277
1278 igt_subtest_f("%s-%s-%s-sanitycheckN%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson8bf09f32015-12-17 09:16:42 +00001279 p->require();
1280 buffers_create(&buffers, num_buffers);
1281 run_wrap_func(&buffers, do_basicN,
Chris Wilson197db862015-12-09 20:54:10 +00001282 p->copy, h->hang);
1283 }
1284
Chris Wilson16bafdf2014-09-04 09:26:24 +01001285 /* try to overwrite the source values */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001286 igt_subtest_f("%s-%s-%s-overwrite-source-one%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson16bafdf2014-09-04 09:26:24 +01001287 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001288 buffers_create(&buffers, num_buffers);
1289 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001290 do_overwrite_source__one,
1291 p->copy, h->hang);
1292 }
1293
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001294 igt_subtest_f("%s-%s-%s-overwrite-source%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson16bafdf2014-09-04 09:26:24 +01001295 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001296 buffers_create(&buffers, num_buffers);
1297 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001298 do_overwrite_source,
1299 p->copy, h->hang);
1300 }
Chris Wilsona1726762015-03-16 16:29:57 +00001301
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001302 igt_subtest_f("%s-%s-%s-overwrite-source-read-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilsona1726762015-03-16 16:29:57 +00001303 p->require();
1304 buffers_create(&buffers, num_buffers);
1305 run_wrap_func(&buffers,
1306 do_overwrite_source_read_bcs,
1307 p->copy, h->hang);
1308 }
1309
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001310 igt_subtest_f("%s-%s-%s-overwrite-source-read-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilsona1726762015-03-16 16:29:57 +00001311 p->require();
1312 igt_require(rendercopy);
1313 buffers_create(&buffers, num_buffers);
1314 run_wrap_func(&buffers,
1315 do_overwrite_source_read_rcs,
1316 p->copy, h->hang);
1317 }
1318
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001319 igt_subtest_f("%s-%s-%s-overwrite-source-rev%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson16bafdf2014-09-04 09:26:24 +01001320 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001321 buffers_create(&buffers, num_buffers);
1322 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001323 do_overwrite_source__rev,
1324 p->copy, h->hang);
1325 }
1326
Chris Wilsona72d4052015-03-18 14:15:22 +00001327 /* try to intermix copies with GPU copies*/
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001328 igt_subtest_f("%s-%s-%s-intermix-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilsona72d4052015-03-18 14:15:22 +00001329 p->require();
1330 igt_require(rendercopy);
1331 buffers_create(&buffers, num_buffers);
1332 run_wrap_func(&buffers,
1333 do_intermix_rcs,
1334 p->copy, h->hang);
1335 }
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001336 igt_subtest_f("%s-%s-%s-intermix-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilsona72d4052015-03-18 14:15:22 +00001337 p->require();
1338 igt_require(rendercopy);
1339 buffers_create(&buffers, num_buffers);
1340 run_wrap_func(&buffers,
1341 do_intermix_bcs,
1342 p->copy, h->hang);
1343 }
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001344 igt_subtest_f("%s-%s-%s-intermix-both%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilsona72d4052015-03-18 14:15:22 +00001345 p->require();
1346 igt_require(rendercopy);
1347 buffers_create(&buffers, num_buffers);
1348 run_wrap_func(&buffers,
1349 do_intermix_both,
1350 p->copy, h->hang);
1351 }
1352
Chris Wilson16bafdf2014-09-04 09:26:24 +01001353 /* try to read the results before the copy completes */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001354 igt_subtest_f("%s-%s-%s-early-read%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson16bafdf2014-09-04 09:26:24 +01001355 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001356 buffers_create(&buffers, num_buffers);
1357 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001358 do_early_read,
1359 p->copy, h->hang);
1360 }
1361
Chris Wilson35b0ac92015-03-16 11:55:46 +00001362 /* concurrent reads */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001363 igt_subtest_f("%s-%s-%s-read-read-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson35b0ac92015-03-16 11:55:46 +00001364 p->require();
1365 buffers_create(&buffers, num_buffers);
1366 run_wrap_func(&buffers,
1367 do_read_read_bcs,
1368 p->copy, h->hang);
1369 }
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001370 igt_subtest_f("%s-%s-%s-read-read-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson35b0ac92015-03-16 11:55:46 +00001371 p->require();
1372 igt_require(rendercopy);
1373 buffers_create(&buffers, num_buffers);
1374 run_wrap_func(&buffers,
1375 do_read_read_rcs,
1376 p->copy, h->hang);
1377 }
1378
Chris Wilson0c266522015-11-11 16:37:16 +00001379 /* split copying between rings */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001380 igt_subtest_f("%s-%s-%s-write-read-bcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson0c266522015-11-11 16:37:16 +00001381 p->require();
1382 buffers_create(&buffers, num_buffers);
1383 run_wrap_func(&buffers,
1384 do_write_read_bcs,
1385 p->copy, h->hang);
1386 }
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001387 igt_subtest_f("%s-%s-%s-write-read-rcs%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson0c266522015-11-11 16:37:16 +00001388 p->require();
1389 igt_require(rendercopy);
1390 buffers_create(&buffers, num_buffers);
1391 run_wrap_func(&buffers,
1392 do_write_read_rcs,
1393 p->copy, h->hang);
1394 }
1395
Chris Wilson16bafdf2014-09-04 09:26:24 +01001396 /* and finally try to trick the kernel into loosing the pending write */
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001397 igt_subtest_f("%s-%s-%s-gpu-read-after-write%s%s", prefix, mode->name, p->prefix, suffix, h->suffix) {
Chris Wilson16bafdf2014-09-04 09:26:24 +01001398 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001399 buffers_create(&buffers, num_buffers);
1400 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001401 do_gpu_read_after_write,
1402 p->copy, h->hang);
1403 }
1404
1405 igt_fixture {
Chris Wilson99b5ee82015-01-22 10:03:45 +00001406 buffers_fini(&buffers);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001407 }
Chris Wilson08188752014-09-03 13:38:30 +01001408 }
Chris Wilson59c55622014-08-29 13:11:37 +01001409 }
Daniel Vetter5a598c92013-08-14 15:08:05 +02001410}
Daniel Vetter43779e32013-08-14 14:50:50 +02001411
1412static void
Chris Wilson42291f22016-01-07 11:19:26 +00001413run_modes(const char *style, const struct access_mode *mode, unsigned allow_mem)
Daniel Vetter43779e32013-08-14 14:50:50 +02001414{
Chris Wilson42291f22016-01-07 11:19:26 +00001415 if (mode->require && !mode->require())
1416 return;
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001417
Chris Wilson42291f22016-01-07 11:19:26 +00001418 igt_debug("%s: using 2x%d buffers, each 1MiB\n",
1419 style, num_buffers);
1420 if (!__intel_check_memory(2*num_buffers, 1024*1024, allow_mem,
1421 NULL, NULL))
1422 return;
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001423
Chris Wilson42291f22016-01-07 11:19:26 +00001424 run_basic_modes(style, mode, "", run_single);
1425 run_basic_modes(style, mode, "-forked", run_forked);
Daniel Vetter3dba47e2013-08-06 22:27:37 +02001426
Chris Wilson6c428a62014-08-29 13:11:37 +01001427 igt_fork_signal_helper();
Chris Wilson42291f22016-01-07 11:19:26 +00001428 run_basic_modes(style, mode, "-interruptible", run_interruptible);
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001429 run_basic_modes(style, mode, "-bomb", run_bomb);
Chris Wilson6c428a62014-08-29 13:11:37 +01001430 igt_stop_signal_helper();
Daniel Vetter43779e32013-08-14 14:50:50 +02001431}
1432
Daniel Vetter071e9ca2013-10-31 16:23:26 +01001433igt_main
Daniel Vetter43779e32013-08-14 14:50:50 +02001434{
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001435 const struct {
1436 const char *name;
1437 drm_intel_bo *(*create)(drm_intel_bufmgr *, uint64_t size);
1438 bool (*require)(void);
1439 } create[] = {
1440 { "", create_normal_bo, can_create_normal},
1441 { "private-", create_private_bo, can_create_private },
1442 { "stolen-", create_stolen_bo, can_create_stolen },
1443 { NULL, NULL }
1444 }, *c;
Chris Wilson42291f22016-01-07 11:19:26 +00001445 uint64_t pin_sz = 0;
1446 void *pinned = NULL;
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001447 int i;
Daniel Vetter43779e32013-08-14 14:50:50 +02001448
Daniel Vetter43779e32013-08-14 14:50:50 +02001449 igt_skip_on_simulation();
1450
Chris Wilson77633492015-03-26 08:11:43 +00001451 if (strstr(igt_test_name(), "all"))
1452 all = true;
1453
Daniel Vetter2dbd9982013-08-14 15:48:54 +02001454 igt_fixture {
Micah Fedkec81d2932015-07-22 21:54:02 +00001455 fd = drm_open_driver(DRIVER_INTEL);
Chris Wilson5b675f72016-01-22 17:33:40 +00001456 intel_detect_and_clear_missed_interrupts(fd);
Chris Wilson6c428a62014-08-29 13:11:37 +01001457 devid = intel_get_drm_devid(fd);
1458 gen = intel_gen(devid);
Chris Wilson59c55622014-08-29 13:11:37 +01001459 rendercopy = igt_get_render_copyfunc(devid);
Chris Wilson2d08e9e2015-12-11 09:25:03 +00001460 }
Daniel Vetter43779e32013-08-14 14:50:50 +02001461
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001462 for (c = create; c->name; c++) {
1463 char name[80];
1464
1465 create_func = c->create;
1466
Chris Wilson571b8762016-01-08 11:51:56 +00001467 num_buffers = MIN_BUFFERS;
1468 if (c->require()) {
1469 snprintf(name, sizeof(name), "%s%s", c->name, "tiny");
1470 for (i = 0; i < ARRAY_SIZE(access_modes); i++)
1471 run_modes(name, &access_modes[i], CHECK_RAM);
1472 }
1473
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001474 igt_fixture {
1475 num_buffers = gem_mappable_aperture_size() / (1024 * 1024) / 4;
1476 }
1477
1478 if (c->require()) {
1479 snprintf(name, sizeof(name), "%s%s", c->name, "small");
1480 for (i = 0; i < ARRAY_SIZE(access_modes); i++)
Chris Wilson42291f22016-01-07 11:19:26 +00001481 run_modes(name, &access_modes[i], CHECK_RAM);
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001482 }
1483
1484 igt_fixture {
1485 num_buffers = gem_mappable_aperture_size() / (1024 * 1024);
1486 }
1487
1488 if (c->require()) {
1489 snprintf(name, sizeof(name), "%s%s", c->name, "thrash");
1490 for (i = 0; i < ARRAY_SIZE(access_modes); i++)
Chris Wilson42291f22016-01-07 11:19:26 +00001491 run_modes(name, &access_modes[i], CHECK_RAM);
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001492 }
1493
1494 igt_fixture {
1495 num_buffers = gem_aperture_size(fd) / (1024 * 1024);
1496 }
1497
1498 if (c->require()) {
1499 snprintf(name, sizeof(name), "%s%s", c->name, "full");
1500 for (i = 0; i < ARRAY_SIZE(access_modes); i++)
Chris Wilson42291f22016-01-07 11:19:26 +00001501 run_modes(name, &access_modes[i], CHECK_RAM);
1502 }
1503
1504 igt_fixture {
1505 num_buffers = gem_mappable_aperture_size() / (1024 * 1024);
1506 pin_sz = intel_get_avail_ram_mb() - num_buffers;
1507
1508 igt_debug("Pinning %ld MiB\n", pin_sz);
1509 pin_sz *= 1024 * 1024;
1510
1511 if (posix_memalign(&pinned, 4096, pin_sz) ||
1512 mlock(pinned, pin_sz) ||
1513 madvise(pinned, pin_sz, MADV_DONTFORK)) {
1514 free(pinned);
1515 pinned = NULL;
1516 }
1517 igt_require(pinned);
1518 }
1519
1520 if (c->require()) {
1521 snprintf(name, sizeof(name), "%s%s", c->name, "swap");
1522 for (i = 0; i < ARRAY_SIZE(access_modes); i++)
1523 run_modes(name, &access_modes[i], CHECK_RAM | CHECK_SWAP);
1524 }
1525
1526 igt_fixture {
1527 if (pinned) {
1528 munlock(pinned, pin_sz);
1529 free(pinned);
1530 pinned = NULL;
1531 }
Chris Wilson1d6e5d32016-01-03 13:44:17 +00001532 }
Daniel Vetter2dbd9982013-08-14 15:48:54 +02001533 }
Daniel Vetter3dba47e2013-08-06 22:27:37 +02001534}