blob: dc0761eaf16b81410d9a693e7c0922cc156982f5 [file] [log] [blame]
Daniel Vetter3dba47e2013-08-06 22:27:37 +02001/*
2 * Copyright © 2009,2012,2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 * Daniel Vetter <daniel.vetter@ffwll.ch>
27 *
28 */
29
Chris Wilson77633492015-03-26 08:11:43 +000030/** @file gem_concurrent.c
Daniel Vetter3dba47e2013-08-06 22:27:37 +020031 *
Chris Wilson77633492015-03-26 08:11:43 +000032 * This is a test of pread/pwrite/mmap behavior when writing to active
Daniel Vetter3dba47e2013-08-06 22:27:37 +020033 * buffers.
34 *
35 * Based on gem_gtt_concurrent_blt.
36 */
37
Thomas Wood804e11f2015-08-17 17:57:43 +010038#include "igt.h"
Daniel Vetter3dba47e2013-08-06 22:27:37 +020039#include <stdlib.h>
40#include <stdio.h>
41#include <string.h>
Daniel Vetter3dba47e2013-08-06 22:27:37 +020042#include <fcntl.h>
43#include <inttypes.h>
44#include <errno.h>
45#include <sys/stat.h>
46#include <sys/time.h>
Chris Wilson99431a42013-08-14 11:03:34 +010047#include <sys/wait.h>
Daniel Vetterf5daeec2014-03-23 13:35:09 +010048
49#include <drm.h>
50
Daniel Vetter3dba47e2013-08-06 22:27:37 +020051#include "intel_bufmgr.h"
Daniel Vetter3dba47e2013-08-06 22:27:37 +020052
Chris Wilson77633492015-03-26 08:11:43 +000053IGT_TEST_DESCRIPTION("Test of pread/pwrite/mmap behavior when writing to active"
Thomas Woodb2ac2642014-11-28 11:02:44 +000054 " buffers.");
55
Chris Wilson6c428a62014-08-29 13:11:37 +010056int fd, devid, gen;
57struct intel_batchbuffer *batch;
Chris Wilson77633492015-03-26 08:11:43 +000058int all;
Chris Wilson6c428a62014-08-29 13:11:37 +010059
Daniel Vetter3dba47e2013-08-06 22:27:37 +020060static void
Chris Wilsonf2a045f2015-01-02 16:33:33 +053061nop_release_bo(drm_intel_bo *bo)
62{
63 drm_intel_bo_unreference(bo);
64}
65
66static void
Daniel Vetter43779e32013-08-14 14:50:50 +020067prw_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
Daniel Vetter3dba47e2013-08-06 22:27:37 +020068{
Chris Wilson0b4c33f2014-01-26 14:36:32 +000069 int size = width * height, i;
70 uint32_t *tmp;
Daniel Vetter3dba47e2013-08-06 22:27:37 +020071
Chris Wilson0b4c33f2014-01-26 14:36:32 +000072 tmp = malloc(4*size);
73 if (tmp) {
74 for (i = 0; i < size; i++)
75 tmp[i] = val;
76 drm_intel_bo_subdata(bo, 0, 4*size, tmp);
77 free(tmp);
78 } else {
79 for (i = 0; i < size; i++)
80 drm_intel_bo_subdata(bo, 4*i, 4, &val);
81 }
Daniel Vetter3dba47e2013-08-06 22:27:37 +020082}
83
84static void
Chris Wilsonc12f2922014-08-31 16:14:40 +010085prw_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
Daniel Vetter3dba47e2013-08-06 22:27:37 +020086{
Chris Wilson0b4c33f2014-01-26 14:36:32 +000087 int size = width * height, i;
Chris Wilsonc12f2922014-08-31 16:14:40 +010088 uint32_t *vaddr;
Daniel Vetter3dba47e2013-08-06 22:27:37 +020089
Chris Wilsonc12f2922014-08-31 16:14:40 +010090 do_or_die(drm_intel_bo_map(tmp, true));
91 do_or_die(drm_intel_bo_get_subdata(bo, 0, 4*size, tmp->virtual));
92 vaddr = tmp->virtual;
93 for (i = 0; i < size; i++)
94 igt_assert_eq_u32(vaddr[i], val);
95 drm_intel_bo_unmap(tmp);
Daniel Vetter3dba47e2013-08-06 22:27:37 +020096}
97
98static drm_intel_bo *
Chris Wilson86055df2014-08-29 17:36:29 +010099unmapped_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200100{
101 drm_intel_bo *bo;
102
103 bo = drm_intel_bo_alloc(bufmgr, "bo", 4*width*height, 0);
Daniel Vetter83440952013-08-13 12:35:58 +0200104 igt_assert(bo);
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200105
106 return bo;
107}
108
Daniel Vetter43779e32013-08-14 14:50:50 +0200109static void
110gtt_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
Daniel Vetter3dba47e2013-08-06 22:27:37 +0200111{
Chris Wilson3e766b82014-09-26 07:55:49 +0100112 uint32_t *vaddr = bo->virtual;
Daniel Vetter43779e32013-08-14 14:50:50 +0200113 int size = width * height;
Daniel Vetter43779e32013-08-14 14:50:50 +0200114
115 drm_intel_gem_bo_start_gtt_access(bo, true);
Daniel Vetter43779e32013-08-14 14:50:50 +0200116 while (size--)
117 *vaddr++ = val;
118}
119
120static void
Chris Wilsonc12f2922014-08-31 16:14:40 +0100121gtt_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
Daniel Vetter43779e32013-08-14 14:50:50 +0200122{
Chris Wilson3e766b82014-09-26 07:55:49 +0100123 uint32_t *vaddr = bo->virtual;
124 int y;
Daniel Vetter43779e32013-08-14 14:50:50 +0200125
Chris Wilson3e766b82014-09-26 07:55:49 +0100126 /* GTT access is slow. So we just compare a few points */
Daniel Vetter43779e32013-08-14 14:50:50 +0200127 drm_intel_gem_bo_start_gtt_access(bo, false);
Chris Wilson3e766b82014-09-26 07:55:49 +0100128 for (y = 0; y < height; y++)
129 igt_assert_eq_u32(vaddr[y*width+y], val);
Daniel Vetter43779e32013-08-14 14:50:50 +0200130}
131
132static drm_intel_bo *
Chris Wilson86055df2014-08-29 17:36:29 +0100133map_bo(drm_intel_bo *bo)
Daniel Vetter43779e32013-08-14 14:50:50 +0200134{
Daniel Vetter43779e32013-08-14 14:50:50 +0200135 /* gtt map doesn't have a write parameter, so just keep the mapping
136 * around (to avoid the set_domain with the gtt write domain set) and
137 * manually tell the kernel when we start access the gtt. */
138 do_or_die(drm_intel_gem_bo_map_gtt(bo));
139
140 return bo;
141}
142
Chris Wilson86055df2014-08-29 17:36:29 +0100143static drm_intel_bo *
144tile_bo(drm_intel_bo *bo, int width)
145{
146 uint32_t tiling = I915_TILING_X;
147 uint32_t stride = width * 4;
148
149 do_or_die(drm_intel_bo_set_tiling(bo, &tiling, stride));
150
151 return bo;
152}
153
154static drm_intel_bo *
155gtt_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
156{
157 return map_bo(unmapped_create_bo(bufmgr, width, height));
158}
159
160static drm_intel_bo *
161gttX_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
162{
163 return tile_bo(gtt_create_bo(bufmgr, width, height), width);
164}
165
166static drm_intel_bo *
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530167wc_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
168{
169 drm_intel_bo *bo;
170
Daniel Vettera3e34ce2015-02-06 11:05:28 +0100171 gem_require_mmap_wc(fd);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530172
173 bo = unmapped_create_bo(bufmgr, width, height);
174 bo->virtual = gem_mmap__wc(fd, bo->handle, 0, bo->size, PROT_READ | PROT_WRITE);
175 return bo;
176}
177
178static void
179wc_release_bo(drm_intel_bo *bo)
180{
181 munmap(bo->virtual, bo->size);
182 bo->virtual = NULL;
183
184 nop_release_bo(bo);
185}
186
187static drm_intel_bo *
Chris Wilson86055df2014-08-29 17:36:29 +0100188gpu_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
189{
190 return unmapped_create_bo(bufmgr, width, height);
191}
192
193
194static drm_intel_bo *
195gpuX_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
196{
197 return tile_bo(gpu_create_bo(bufmgr, width, height), width);
198}
199
Daniel Vetter43779e32013-08-14 14:50:50 +0200200static void
201cpu_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
202{
203 int size = width * height;
204 uint32_t *vaddr;
205
206 do_or_die(drm_intel_bo_map(bo, true));
207 vaddr = bo->virtual;
208 while (size--)
209 *vaddr++ = val;
210 drm_intel_bo_unmap(bo);
211}
212
213static void
Chris Wilsonc12f2922014-08-31 16:14:40 +0100214cpu_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
Daniel Vetter43779e32013-08-14 14:50:50 +0200215{
216 int size = width * height;
217 uint32_t *vaddr;
218
219 do_or_die(drm_intel_bo_map(bo, false));
220 vaddr = bo->virtual;
221 while (size--)
Chris Wilson6c428a62014-08-29 13:11:37 +0100222 igt_assert_eq_u32(*vaddr++, val);
Daniel Vetter43779e32013-08-14 14:50:50 +0200223 drm_intel_bo_unmap(bo);
224}
225
Chris Wilson6c428a62014-08-29 13:11:37 +0100226static void
227gpu_set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
228{
229 struct drm_i915_gem_relocation_entry reloc[1];
230 struct drm_i915_gem_exec_object2 gem_exec[2];
231 struct drm_i915_gem_execbuffer2 execbuf;
232 struct drm_i915_gem_pwrite gem_pwrite;
233 struct drm_i915_gem_create create;
234 uint32_t buf[10], *b;
Chris Wilson86055df2014-08-29 17:36:29 +0100235 uint32_t tiling, swizzle;
236
237 drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
Chris Wilson6c428a62014-08-29 13:11:37 +0100238
239 memset(reloc, 0, sizeof(reloc));
240 memset(gem_exec, 0, sizeof(gem_exec));
241 memset(&execbuf, 0, sizeof(execbuf));
242
243 b = buf;
244 *b++ = XY_COLOR_BLT_CMD_NOLEN |
245 ((gen >= 8) ? 5 : 4) |
246 COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB;
Chris Wilson86055df2014-08-29 17:36:29 +0100247 if (gen >= 4 && tiling) {
248 b[-1] |= XY_COLOR_BLT_TILED;
249 *b = width;
250 } else
251 *b = width << 2;
252 *b++ |= 0xf0 << 16 | 1 << 25 | 1 << 24;
Chris Wilson6c428a62014-08-29 13:11:37 +0100253 *b++ = 0;
254 *b++ = height << 16 | width;
255 reloc[0].offset = (b - buf) * sizeof(uint32_t);
256 reloc[0].target_handle = bo->handle;
257 reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
258 reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
259 *b++ = 0;
260 if (gen >= 8)
261 *b++ = 0;
262 *b++ = val;
263 *b++ = MI_BATCH_BUFFER_END;
264 if ((b - buf) & 1)
265 *b++ = 0;
266
267 gem_exec[0].handle = bo->handle;
268 gem_exec[0].flags = EXEC_OBJECT_NEEDS_FENCE;
269
270 create.handle = 0;
271 create.size = 4096;
272 drmIoctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
273 gem_exec[1].handle = create.handle;
274 gem_exec[1].relocation_count = 1;
275 gem_exec[1].relocs_ptr = (uintptr_t)reloc;
276
277 execbuf.buffers_ptr = (uintptr_t)gem_exec;
278 execbuf.buffer_count = 2;
279 execbuf.batch_len = (b - buf) * sizeof(buf[0]);
Chris Wilson86055df2014-08-29 17:36:29 +0100280 if (gen >= 6)
281 execbuf.flags = I915_EXEC_BLT;
Chris Wilson6c428a62014-08-29 13:11:37 +0100282
283 gem_pwrite.handle = gem_exec[1].handle;
284 gem_pwrite.offset = 0;
285 gem_pwrite.size = execbuf.batch_len;
286 gem_pwrite.data_ptr = (uintptr_t)buf;
Chris Wilsonc12f2922014-08-31 16:14:40 +0100287 do_or_die(drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite));
288 do_or_die(drmIoctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf));
Chris Wilson6c428a62014-08-29 13:11:37 +0100289
290 drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &create.handle);
291}
292
293static void
Chris Wilsonc12f2922014-08-31 16:14:40 +0100294gpu_cmp_bo(drm_intel_bo *bo, uint32_t val, int width, int height, drm_intel_bo *tmp)
Chris Wilson6c428a62014-08-29 13:11:37 +0100295{
Chris Wilson99b5ee82015-01-22 10:03:45 +0000296 intel_blt_copy(batch,
297 bo, 0, 0, 4*width,
298 tmp, 0, 0, 4*width,
299 width, height, 32);
Chris Wilsonc12f2922014-08-31 16:14:40 +0100300 cpu_cmp_bo(tmp, val, width, height, NULL);
Chris Wilson6c428a62014-08-29 13:11:37 +0100301}
302
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530303const struct access_mode {
304 const char *name;
Daniel Vetter43779e32013-08-14 14:50:50 +0200305 void (*set_bo)(drm_intel_bo *bo, uint32_t val, int w, int h);
Chris Wilsonc12f2922014-08-31 16:14:40 +0100306 void (*cmp_bo)(drm_intel_bo *bo, uint32_t val, int w, int h, drm_intel_bo *tmp);
Chris Wilson86055df2014-08-29 17:36:29 +0100307 drm_intel_bo *(*create_bo)(drm_intel_bufmgr *bufmgr, int width, int height);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530308 void (*release_bo)(drm_intel_bo *bo);
309} access_modes[] = {
310 {
311 .name = "prw",
312 .set_bo = prw_set_bo,
313 .cmp_bo = prw_cmp_bo,
314 .create_bo = unmapped_create_bo,
315 .release_bo = nop_release_bo,
316 },
317 {
318 .name = "cpu",
319 .set_bo = cpu_set_bo,
320 .cmp_bo = cpu_cmp_bo,
321 .create_bo = unmapped_create_bo,
322 .release_bo = nop_release_bo,
323 },
324 {
325 .name = "gtt",
326 .set_bo = gtt_set_bo,
327 .cmp_bo = gtt_cmp_bo,
328 .create_bo = gtt_create_bo,
329 .release_bo = nop_release_bo,
330 },
331 {
332 .name = "gttX",
333 .set_bo = gtt_set_bo,
334 .cmp_bo = gtt_cmp_bo,
335 .create_bo = gttX_create_bo,
336 .release_bo = nop_release_bo,
337 },
338 {
339 .name = "wc",
340 .set_bo = gtt_set_bo,
341 .cmp_bo = gtt_cmp_bo,
342 .create_bo = wc_create_bo,
343 .release_bo = wc_release_bo,
344 },
345 {
346 .name = "gpu",
347 .set_bo = gpu_set_bo,
348 .cmp_bo = gpu_cmp_bo,
349 .create_bo = gpu_create_bo,
350 .release_bo = nop_release_bo,
351 },
352 {
353 .name = "gpuX",
354 .set_bo = gpu_set_bo,
355 .cmp_bo = gpu_cmp_bo,
356 .create_bo = gpuX_create_bo,
357 .release_bo = nop_release_bo,
358 },
Daniel Vetter43779e32013-08-14 14:50:50 +0200359};
360
Chris Wilson1ca607b2013-08-16 09:44:13 +0100361#define MAX_NUM_BUFFERS 1024
Chris Wilson6c428a62014-08-29 13:11:37 +0100362int num_buffers = MAX_NUM_BUFFERS;
Chris Wilson86055df2014-08-29 17:36:29 +0100363const int width = 512, height = 512;
Chris Wilson59c55622014-08-29 13:11:37 +0100364igt_render_copyfunc_t rendercopy;
365
Chris Wilson99b5ee82015-01-22 10:03:45 +0000366struct buffers {
367 const struct access_mode *mode;
368 drm_intel_bufmgr *bufmgr;
Chris Wilson35b0ac92015-03-16 11:55:46 +0000369 drm_intel_bo *src[MAX_NUM_BUFFERS], *dst[MAX_NUM_BUFFERS];
370 drm_intel_bo *dummy, *spare;
Chris Wilson99b5ee82015-01-22 10:03:45 +0000371 int count;
372};
373
374static void *buffers_init(struct buffers *data,
375 const struct access_mode *mode,
376 int _fd)
377{
378 data->mode = mode;
379 data->count = 0;
380
381 data->bufmgr = drm_intel_bufmgr_gem_init(_fd, 4096);
382 igt_assert(data->bufmgr);
383
384 drm_intel_bufmgr_gem_enable_reuse(data->bufmgr);
385 return intel_batchbuffer_alloc(data->bufmgr, devid);
386}
387
388static void buffers_destroy(struct buffers *data)
389{
390 if (data->count == 0)
391 return;
392
393 for (int i = 0; i < data->count; i++) {
394 data->mode->release_bo(data->src[i]);
395 data->mode->release_bo(data->dst[i]);
396 }
397 data->mode->release_bo(data->dummy);
Chris Wilson35b0ac92015-03-16 11:55:46 +0000398 data->mode->release_bo(data->spare);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000399 data->count = 0;
400}
401
402static void buffers_create(struct buffers *data,
403 int count)
404{
405 igt_assert(data->bufmgr);
406
407 buffers_destroy(data);
408
409 for (int i = 0; i < count; i++) {
410 data->src[i] =
411 data->mode->create_bo(data->bufmgr, width, height);
412 data->dst[i] =
413 data->mode->create_bo(data->bufmgr, width, height);
414 }
415 data->dummy = data->mode->create_bo(data->bufmgr, width, height);
Chris Wilson35b0ac92015-03-16 11:55:46 +0000416 data->spare = data->mode->create_bo(data->bufmgr, width, height);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000417 data->count = count;
418}
419
420static void buffers_fini(struct buffers *data)
421{
422 if (data->bufmgr == NULL)
423 return;
424
425 buffers_destroy(data);
426
427 intel_batchbuffer_free(batch);
428 drm_intel_bufmgr_destroy(data->bufmgr);
429 data->bufmgr = NULL;
430}
431
Chris Wilson59c55622014-08-29 13:11:37 +0100432typedef void (*do_copy)(drm_intel_bo *dst, drm_intel_bo *src);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100433typedef struct igt_hang_ring (*do_hang)(void);
Chris Wilson59c55622014-08-29 13:11:37 +0100434
435static void render_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
436{
437 struct igt_buf d = {
438 .bo = dst,
439 .size = width * height * 4,
Chris Wilson59c55622014-08-29 13:11:37 +0100440 .num_tiles = width * height * 4,
441 .stride = width * 4,
442 }, s = {
443 .bo = src,
444 .size = width * height * 4,
Chris Wilson59c55622014-08-29 13:11:37 +0100445 .num_tiles = width * height * 4,
446 .stride = width * 4,
447 };
Chris Wilson86055df2014-08-29 17:36:29 +0100448 uint32_t swizzle;
449
450 drm_intel_bo_get_tiling(dst, &d.tiling, &swizzle);
451 drm_intel_bo_get_tiling(src, &s.tiling, &swizzle);
452
Chris Wilson59c55622014-08-29 13:11:37 +0100453 rendercopy(batch, NULL,
454 &s, 0, 0,
455 width, height,
456 &d, 0, 0);
457}
458
459static void blt_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
460{
Chris Wilson86055df2014-08-29 17:36:29 +0100461 intel_blt_copy(batch,
462 src, 0, 0, 4*width,
463 dst, 0, 0, 4*width,
464 width, height, 32);
Chris Wilson59c55622014-08-29 13:11:37 +0100465}
Daniel Vetter5a598c92013-08-14 15:08:05 +0200466
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530467static void cpu_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
468{
469 const int size = width * height * sizeof(uint32_t);
470 void *d, *s;
471
472 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_CPU, 0);
473 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
474 s = gem_mmap__cpu(fd, src->handle, 0, size, PROT_READ);
475 igt_assert(s != NULL);
476 d = gem_mmap__cpu(fd, dst->handle, 0, size, PROT_WRITE);
477 igt_assert(d != NULL);
478
479 memcpy(d, s, size);
480
481 munmap(d, size);
482 munmap(s, size);
483}
484
485static void gtt_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
486{
487 const int size = width * height * sizeof(uint32_t);
488 void *d, *s;
489
490 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0);
491 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
492
493 s = gem_mmap__gtt(fd, src->handle, size, PROT_READ);
494 igt_assert(s != NULL);
495 d = gem_mmap__gtt(fd, dst->handle, size, PROT_WRITE);
496 igt_assert(d != NULL);
497
498 memcpy(d, s, size);
499
500 munmap(d, size);
501 munmap(s, size);
502}
503
504static void wc_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
505{
506 const int size = width * height * sizeof(uint32_t);
507 void *d, *s;
508
509 gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0);
510 gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
511
512 s = gem_mmap__wc(fd, src->handle, 0, size, PROT_READ);
513 igt_assert(s != NULL);
514 d = gem_mmap__wc(fd, dst->handle, 0, size, PROT_WRITE);
515 igt_assert(d != NULL);
516
517 memcpy(d, s, size);
518
519 munmap(d, size);
520 munmap(s, size);
521}
522
Chris Wilson16bafdf2014-09-04 09:26:24 +0100523static struct igt_hang_ring no_hang(void)
524{
525 return (struct igt_hang_ring){0, 0};
526}
527
528static struct igt_hang_ring bcs_hang(void)
529{
Daniel Vetter3cd45de2015-02-10 17:46:43 +0100530 return igt_hang_ring(fd, I915_EXEC_BLT);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100531}
532
533static struct igt_hang_ring rcs_hang(void)
534{
Daniel Vetter3cd45de2015-02-10 17:46:43 +0100535 return igt_hang_ring(fd, I915_EXEC_RENDER);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100536}
537
538static void hang_require(void)
539{
Daniel Vetterc66b2422015-02-06 10:49:20 +0100540 igt_require_hang_ring(fd, -1);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100541}
542
Chris Wilson99b5ee82015-01-22 10:03:45 +0000543static void do_overwrite_source(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100544 do_copy do_copy_func,
545 do_hang do_hang_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +0200546{
Chris Wilson16bafdf2014-09-04 09:26:24 +0100547 struct igt_hang_ring hang;
Daniel Vetter5a598c92013-08-14 15:08:05 +0200548 int i;
549
550 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000551 for (i = 0; i < buffers->count; i++) {
552 buffers->mode->set_bo(buffers->src[i], i, width, height);
553 buffers->mode->set_bo(buffers->dst[i], ~i, width, height);
Daniel Vetter5a598c92013-08-14 15:08:05 +0200554 }
Chris Wilson99b5ee82015-01-22 10:03:45 +0000555 for (i = 0; i < buffers->count; i++)
556 do_copy_func(buffers->dst[i], buffers->src[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100557 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000558 for (i = buffers->count; i--; )
559 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef, width, height);
560 for (i = 0; i < buffers->count; i++)
561 buffers->mode->cmp_bo(buffers->dst[i], i, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100562 igt_post_hang_ring(fd, hang);
563}
564
Chris Wilsona1726762015-03-16 16:29:57 +0000565static void do_overwrite_source_read(struct buffers *buffers,
566 do_copy do_copy_func,
567 do_hang do_hang_func,
568 int do_rcs)
569{
570 const int half = buffers->count/2;
571 struct igt_hang_ring hang;
572 int i;
573
574 gem_quiescent_gpu(fd);
575 for (i = 0; i < half; i++) {
576 buffers->mode->set_bo(buffers->src[i], i, width, height);
577 buffers->mode->set_bo(buffers->dst[i], ~i, width, height);
578 buffers->mode->set_bo(buffers->dst[i+half], ~i, width, height);
579 }
580 for (i = 0; i < half; i++) {
581 do_copy_func(buffers->dst[i], buffers->src[i]);
582 if (do_rcs)
583 render_copy_bo(buffers->dst[i+half], buffers->src[i]);
584 else
585 blt_copy_bo(buffers->dst[i+half], buffers->src[i]);
586 }
587 hang = do_hang_func();
588 for (i = half; i--; )
589 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef, width, height);
590 for (i = 0; i < half; i++) {
591 buffers->mode->cmp_bo(buffers->dst[i], i, width, height, buffers->dummy);
592 buffers->mode->cmp_bo(buffers->dst[i+half], i, width, height, buffers->dummy);
593 }
594 igt_post_hang_ring(fd, hang);
595}
596
597static void do_overwrite_source_read_bcs(struct buffers *buffers,
598 do_copy do_copy_func,
599 do_hang do_hang_func)
600{
601 do_overwrite_source_read(buffers, do_copy_func, do_hang_func, 0);
602}
603
604static void do_overwrite_source_read_rcs(struct buffers *buffers,
605 do_copy do_copy_func,
606 do_hang do_hang_func)
607{
608 do_overwrite_source_read(buffers, do_copy_func, do_hang_func, 1);
609}
610
Chris Wilson99b5ee82015-01-22 10:03:45 +0000611static void do_overwrite_source__rev(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100612 do_copy do_copy_func,
613 do_hang do_hang_func)
614{
615 struct igt_hang_ring hang;
616 int i;
617
618 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000619 for (i = 0; i < buffers->count; i++) {
620 buffers->mode->set_bo(buffers->src[i], i, width, height);
621 buffers->mode->set_bo(buffers->dst[i], ~i, width, height);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100622 }
Chris Wilson99b5ee82015-01-22 10:03:45 +0000623 for (i = 0; i < buffers->count; i++)
624 do_copy_func(buffers->dst[i], buffers->src[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100625 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000626 for (i = 0; i < buffers->count; i++)
627 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef, width, height);
628 for (i = buffers->count; i--; )
629 buffers->mode->cmp_bo(buffers->dst[i], i, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100630 igt_post_hang_ring(fd, hang);
631}
632
Chris Wilson99b5ee82015-01-22 10:03:45 +0000633static void do_overwrite_source__one(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100634 do_copy do_copy_func,
635 do_hang do_hang_func)
636{
637 struct igt_hang_ring hang;
638
639 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000640 buffers->mode->set_bo(buffers->src[0], 0, width, height);
641 buffers->mode->set_bo(buffers->dst[0], ~0, width, height);
642 do_copy_func(buffers->dst[0], buffers->src[0]);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100643 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000644 buffers->mode->set_bo(buffers->src[0], 0xdeadbeef, width, height);
645 buffers->mode->cmp_bo(buffers->dst[0], 0, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100646 igt_post_hang_ring(fd, hang);
Daniel Vetter5a598c92013-08-14 15:08:05 +0200647}
648
Chris Wilsona72d4052015-03-18 14:15:22 +0000649static void do_intermix(struct buffers *buffers,
650 do_copy do_copy_func,
651 do_hang do_hang_func,
652 int do_rcs)
653{
654 const int half = buffers->count/2;
655 struct igt_hang_ring hang;
656 int i;
657
658 gem_quiescent_gpu(fd);
659 for (i = 0; i < buffers->count; i++) {
660 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef^~i, width, height);
661 buffers->mode->set_bo(buffers->dst[i], i, width, height);
662 }
663 for (i = 0; i < half; i++) {
664 if (do_rcs == 1 || (do_rcs == -1 && i & 1))
665 render_copy_bo(buffers->dst[i], buffers->src[i]);
666 else
667 blt_copy_bo(buffers->dst[i], buffers->src[i]);
668
669 do_copy_func(buffers->dst[i+half], buffers->src[i]);
670
671 if (do_rcs == 1 || (do_rcs == -1 && (i & 1) == 0))
672 render_copy_bo(buffers->dst[i], buffers->dst[i+half]);
673 else
674 blt_copy_bo(buffers->dst[i], buffers->dst[i+half]);
675
676 do_copy_func(buffers->dst[i+half], buffers->src[i+half]);
677 }
678 hang = do_hang_func();
679 for (i = 0; i < 2*half; i++)
680 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef^~i, width, height, buffers->dummy);
681 igt_post_hang_ring(fd, hang);
682}
683
684static void do_intermix_rcs(struct buffers *buffers,
685 do_copy do_copy_func,
686 do_hang do_hang_func)
687{
688 do_intermix(buffers, do_copy_func, do_hang_func, 1);
689}
690
691static void do_intermix_bcs(struct buffers *buffers,
692 do_copy do_copy_func,
693 do_hang do_hang_func)
694{
695 do_intermix(buffers, do_copy_func, do_hang_func, 0);
696}
697
698static void do_intermix_both(struct buffers *buffers,
699 do_copy do_copy_func,
700 do_hang do_hang_func)
701{
702 do_intermix(buffers, do_copy_func, do_hang_func, -1);
703}
704
Chris Wilson99b5ee82015-01-22 10:03:45 +0000705static void do_early_read(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100706 do_copy do_copy_func,
707 do_hang do_hang_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +0200708{
Chris Wilson16bafdf2014-09-04 09:26:24 +0100709 struct igt_hang_ring hang;
Daniel Vetter5a598c92013-08-14 15:08:05 +0200710 int i;
711
712 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000713 for (i = buffers->count; i--; )
714 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef, width, height);
715 for (i = 0; i < buffers->count; i++)
716 do_copy_func(buffers->dst[i], buffers->src[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100717 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000718 for (i = buffers->count; i--; )
719 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100720 igt_post_hang_ring(fd, hang);
Daniel Vetter5a598c92013-08-14 15:08:05 +0200721}
722
Chris Wilson35b0ac92015-03-16 11:55:46 +0000723static void do_read_read_bcs(struct buffers *buffers,
724 do_copy do_copy_func,
725 do_hang do_hang_func)
726{
727 struct igt_hang_ring hang;
728 int i;
729
730 gem_quiescent_gpu(fd);
731 for (i = buffers->count; i--; )
732 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef ^ i, width, height);
733 for (i = 0; i < buffers->count; i++) {
734 do_copy_func(buffers->dst[i], buffers->src[i]);
735 blt_copy_bo(buffers->spare, buffers->src[i]);
736 }
737 cpu_cmp_bo(buffers->spare, 0xdeadbeef^(buffers->count-1), width, height, NULL);
738 hang = do_hang_func();
739 for (i = buffers->count; i--; )
740 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef ^ i, width, height, buffers->dummy);
741 igt_post_hang_ring(fd, hang);
742}
743
744static void do_read_read_rcs(struct buffers *buffers,
745 do_copy do_copy_func,
746 do_hang do_hang_func)
747{
748 struct igt_hang_ring hang;
749 int i;
750
751 gem_quiescent_gpu(fd);
752 for (i = buffers->count; i--; )
753 buffers->mode->set_bo(buffers->src[i], 0xdeadbeef ^ i, width, height);
754 for (i = 0; i < buffers->count; i++) {
755 do_copy_func(buffers->dst[i], buffers->src[i]);
756 render_copy_bo(buffers->spare, buffers->src[i]);
757 }
758 cpu_cmp_bo(buffers->spare, 0xdeadbeef^(buffers->count-1), width, height, NULL);
759 hang = do_hang_func();
760 for (i = buffers->count; i--; )
761 buffers->mode->cmp_bo(buffers->dst[i], 0xdeadbeef ^ i, width, height, buffers->dummy);
762 igt_post_hang_ring(fd, hang);
763}
764
Chris Wilson99b5ee82015-01-22 10:03:45 +0000765static void do_gpu_read_after_write(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100766 do_copy do_copy_func,
767 do_hang do_hang_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +0200768{
Chris Wilson16bafdf2014-09-04 09:26:24 +0100769 struct igt_hang_ring hang;
Daniel Vetter5a598c92013-08-14 15:08:05 +0200770 int i;
771
772 gem_quiescent_gpu(fd);
Chris Wilson99b5ee82015-01-22 10:03:45 +0000773 for (i = buffers->count; i--; )
774 buffers->mode->set_bo(buffers->src[i], 0xabcdabcd, width, height);
775 for (i = 0; i < buffers->count; i++)
776 do_copy_func(buffers->dst[i], buffers->src[i]);
777 for (i = buffers->count; i--; )
778 do_copy_func(buffers->dummy, buffers->dst[i]);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100779 hang = do_hang_func();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000780 for (i = buffers->count; i--; )
781 buffers->mode->cmp_bo(buffers->dst[i], 0xabcdabcd, width, height, buffers->dummy);
Chris Wilson16bafdf2014-09-04 09:26:24 +0100782 igt_post_hang_ring(fd, hang);
Daniel Vetter5a598c92013-08-14 15:08:05 +0200783}
784
Chris Wilson99b5ee82015-01-22 10:03:45 +0000785typedef void (*do_test)(struct buffers *buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100786 do_copy do_copy_func,
787 do_hang do_hang_func);
Daniel Vetterec283d62013-08-14 15:18:37 +0200788
Chris Wilson99b5ee82015-01-22 10:03:45 +0000789typedef void (*run_wrap)(struct buffers *buffers,
Chris Wilson59c55622014-08-29 13:11:37 +0100790 do_test do_test_func,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100791 do_copy do_copy_func,
792 do_hang do_hang_func);
Daniel Vetterec283d62013-08-14 15:18:37 +0200793
Chris Wilson99b5ee82015-01-22 10:03:45 +0000794static void run_single(struct buffers *buffers,
Chris Wilson59c55622014-08-29 13:11:37 +0100795 do_test do_test_func,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100796 do_copy do_copy_func,
797 do_hang do_hang_func)
Daniel Vetterec283d62013-08-14 15:18:37 +0200798{
Chris Wilson99b5ee82015-01-22 10:03:45 +0000799 do_test_func(buffers, do_copy_func, do_hang_func);
Daniel Vetterec283d62013-08-14 15:18:37 +0200800}
801
Chris Wilson99b5ee82015-01-22 10:03:45 +0000802static void run_interruptible(struct buffers *buffers,
Chris Wilson59c55622014-08-29 13:11:37 +0100803 do_test do_test_func,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100804 do_copy do_copy_func,
805 do_hang do_hang_func)
Daniel Vetterec283d62013-08-14 15:18:37 +0200806{
807 int loop;
808
809 for (loop = 0; loop < 10; loop++)
Chris Wilson99b5ee82015-01-22 10:03:45 +0000810 do_test_func(buffers, do_copy_func, do_hang_func);
Daniel Vetterec283d62013-08-14 15:18:37 +0200811}
812
Chris Wilson99b5ee82015-01-22 10:03:45 +0000813static void run_forked(struct buffers *buffers,
Chris Wilson59c55622014-08-29 13:11:37 +0100814 do_test do_test_func,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100815 do_copy do_copy_func,
816 do_hang do_hang_func)
Daniel Vetterec283d62013-08-14 15:18:37 +0200817{
Chris Wilson1ca607b2013-08-16 09:44:13 +0100818 const int old_num_buffers = num_buffers;
Daniel Vetterec283d62013-08-14 15:18:37 +0200819
Daniel Vettercd1f2202013-08-29 10:06:51 +0200820 num_buffers /= 16;
Chris Wilson1ca607b2013-08-16 09:44:13 +0100821 num_buffers += 2;
822
Daniel Vettercd1f2202013-08-29 10:06:51 +0200823 igt_fork(child, 16) {
Daniel Vettercd1f2202013-08-29 10:06:51 +0200824 /* recreate process local variables */
Chris Wilson99b5ee82015-01-22 10:03:45 +0000825 buffers->count = 0;
Chris Wilson16bafdf2014-09-04 09:26:24 +0100826 fd = drm_open_any();
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530827
Chris Wilson99b5ee82015-01-22 10:03:45 +0000828 batch = buffers_init(buffers, buffers->mode, fd);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530829
Chris Wilson99b5ee82015-01-22 10:03:45 +0000830 buffers_create(buffers, num_buffers);
Daniel Vettercd1f2202013-08-29 10:06:51 +0200831 for (int loop = 0; loop < 10; loop++)
Chris Wilson99b5ee82015-01-22 10:03:45 +0000832 do_test_func(buffers, do_copy_func, do_hang_func);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530833
Chris Wilson99b5ee82015-01-22 10:03:45 +0000834 buffers_fini(buffers);
Daniel Vetterec283d62013-08-14 15:18:37 +0200835 }
Daniel Vettercd1f2202013-08-29 10:06:51 +0200836
837 igt_waitchildren();
Chris Wilson1ca607b2013-08-16 09:44:13 +0100838
Chris Wilson1ca607b2013-08-16 09:44:13 +0100839 num_buffers = old_num_buffers;
Daniel Vetterec283d62013-08-14 15:18:37 +0200840}
Daniel Vetter5a598c92013-08-14 15:08:05 +0200841
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530842static void bit17_require(void)
843{
844 struct drm_i915_gem_get_tiling2 {
845 uint32_t handle;
846 uint32_t tiling_mode;
847 uint32_t swizzle_mode;
848 uint32_t phys_swizzle_mode;
849 } arg;
850#define DRM_IOCTL_I915_GEM_GET_TILING2 DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling2)
851
852 memset(&arg, 0, sizeof(arg));
853 arg.handle = gem_create(fd, 4096);
854 gem_set_tiling(fd, arg.handle, I915_TILING_X, 512);
855
856 do_or_die(drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_TILING2, &arg));
857 gem_close(fd, arg.handle);
858 igt_require(arg.phys_swizzle_mode == arg.swizzle_mode);
859}
860
861static void cpu_require(void)
862{
863 bit17_require();
864}
865
866static void gtt_require(void)
867{
868}
869
870static void wc_require(void)
871{
872 bit17_require();
Daniel Vettera3e34ce2015-02-06 11:05:28 +0100873 gem_require_mmap_wc(fd);
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530874}
875
Chris Wilson08188752014-09-03 13:38:30 +0100876static void bcs_require(void)
877{
878}
879
880static void rcs_require(void)
881{
882 igt_require(rendercopy);
883}
884
Chris Wilson16bafdf2014-09-04 09:26:24 +0100885static void no_require(void)
886{
887}
888
Daniel Vetter5a598c92013-08-14 15:08:05 +0200889static void
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530890run_basic_modes(const struct access_mode *mode,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100891 const char *suffix,
Daniel Vetterec283d62013-08-14 15:18:37 +0200892 run_wrap run_wrap_func)
Daniel Vetter5a598c92013-08-14 15:08:05 +0200893{
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530894 const struct {
Chris Wilson59c55622014-08-29 13:11:37 +0100895 const char *prefix;
896 do_copy copy;
Chris Wilson08188752014-09-03 13:38:30 +0100897 void (*require)(void);
Chris Wilson59c55622014-08-29 13:11:37 +0100898 } pipelines[] = {
Chris Wilsonf2a045f2015-01-02 16:33:33 +0530899 { "cpu", cpu_copy_bo, cpu_require },
900 { "gtt", gtt_copy_bo, gtt_require },
901 { "wc", wc_copy_bo, wc_require },
Daniel Vetter3e9b4e32015-02-06 23:10:26 +0100902 { "blt", blt_copy_bo, bcs_require },
903 { "render", render_copy_bo, rcs_require },
Chris Wilson59c55622014-08-29 13:11:37 +0100904 { NULL, NULL }
Chris Wilson77633492015-03-26 08:11:43 +0000905 }, *pskip = pipelines + 3, *p;
Chris Wilson16bafdf2014-09-04 09:26:24 +0100906 const struct {
907 const char *suffix;
908 do_hang hang;
909 void (*require)(void);
910 } hangs[] = {
911 { "", no_hang, no_require },
Daniel Vetterfbcc7ba2015-01-22 09:43:10 +0100912 { "-hang-blt", bcs_hang, hang_require },
913 { "-hang-render", rcs_hang, hang_require },
Chris Wilson16bafdf2014-09-04 09:26:24 +0100914 { NULL, NULL },
915 }, *h;
Chris Wilson99b5ee82015-01-22 10:03:45 +0000916 struct buffers buffers;
Daniel Vetter5a598c92013-08-14 15:08:05 +0200917
Chris Wilson16bafdf2014-09-04 09:26:24 +0100918 for (h = hangs; h->suffix; h++) {
Chris Wilson77633492015-03-26 08:11:43 +0000919 if (!all && *h->suffix)
920 continue;
921
922 for (p = all ? pipelines : pskip; p->prefix; p++) {
Chris Wilson16bafdf2014-09-04 09:26:24 +0100923 igt_fixture {
Chris Wilson99b5ee82015-01-22 10:03:45 +0000924 batch = buffers_init(&buffers, mode, fd);
Daniel Vetter60115082015-01-22 10:01:28 +0100925 }
Chris Wilson16bafdf2014-09-04 09:26:24 +0100926
927 /* try to overwrite the source values */
928 igt_subtest_f("%s-%s-overwrite-source-one%s%s", mode->name, p->prefix, suffix, h->suffix) {
929 h->require();
930 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000931 buffers_create(&buffers, num_buffers);
932 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100933 do_overwrite_source__one,
934 p->copy, h->hang);
935 }
936
937 igt_subtest_f("%s-%s-overwrite-source%s%s", mode->name, p->prefix, suffix, h->suffix) {
938 h->require();
939 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000940 buffers_create(&buffers, num_buffers);
941 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100942 do_overwrite_source,
943 p->copy, h->hang);
944 }
Chris Wilsona1726762015-03-16 16:29:57 +0000945
946 igt_subtest_f("%s-%s-overwrite-source-read-bcs%s%s", mode->name, p->prefix, suffix, h->suffix) {
947 h->require();
948 p->require();
949 buffers_create(&buffers, num_buffers);
950 run_wrap_func(&buffers,
951 do_overwrite_source_read_bcs,
952 p->copy, h->hang);
953 }
954
955 igt_subtest_f("%s-%s-overwrite-source-read-rcs%s%s", mode->name, p->prefix, suffix, h->suffix) {
956 h->require();
957 p->require();
958 igt_require(rendercopy);
959 buffers_create(&buffers, num_buffers);
960 run_wrap_func(&buffers,
961 do_overwrite_source_read_rcs,
962 p->copy, h->hang);
963 }
964
Chris Wilson16bafdf2014-09-04 09:26:24 +0100965 igt_subtest_f("%s-%s-overwrite-source-rev%s%s", mode->name, p->prefix, suffix, h->suffix) {
966 h->require();
967 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +0000968 buffers_create(&buffers, num_buffers);
969 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +0100970 do_overwrite_source__rev,
971 p->copy, h->hang);
972 }
973
Chris Wilsona72d4052015-03-18 14:15:22 +0000974 /* try to intermix copies with GPU copies*/
975 igt_subtest_f("%s-%s-intermix-rcs%s%s", mode->name, p->prefix, suffix, h->suffix) {
976 h->require();
977 p->require();
978 igt_require(rendercopy);
979 buffers_create(&buffers, num_buffers);
980 run_wrap_func(&buffers,
981 do_intermix_rcs,
982 p->copy, h->hang);
983 }
984 igt_subtest_f("%s-%s-intermix-bcs%s%s", mode->name, p->prefix, suffix, h->suffix) {
985 h->require();
986 p->require();
987 igt_require(rendercopy);
988 buffers_create(&buffers, num_buffers);
989 run_wrap_func(&buffers,
990 do_intermix_bcs,
991 p->copy, h->hang);
992 }
993 igt_subtest_f("%s-%s-intermix-both%s%s", mode->name, p->prefix, suffix, h->suffix) {
994 h->require();
995 p->require();
996 igt_require(rendercopy);
997 buffers_create(&buffers, num_buffers);
998 run_wrap_func(&buffers,
999 do_intermix_both,
1000 p->copy, h->hang);
1001 }
1002
Chris Wilson16bafdf2014-09-04 09:26:24 +01001003 /* try to read the results before the copy completes */
1004 igt_subtest_f("%s-%s-early-read%s%s", mode->name, p->prefix, suffix, h->suffix) {
1005 h->require();
1006 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001007 buffers_create(&buffers, num_buffers);
1008 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001009 do_early_read,
1010 p->copy, h->hang);
1011 }
1012
Chris Wilson35b0ac92015-03-16 11:55:46 +00001013 /* concurrent reads */
1014 igt_subtest_f("%s-%s-read-read-bcs%s%s", mode->name, p->prefix, suffix, h->suffix) {
1015 h->require();
1016 p->require();
1017 buffers_create(&buffers, num_buffers);
1018 run_wrap_func(&buffers,
1019 do_read_read_bcs,
1020 p->copy, h->hang);
1021 }
1022 igt_subtest_f("%s-%s-read-read-rcs%s%s", mode->name, p->prefix, suffix, h->suffix) {
1023 h->require();
1024 p->require();
1025 igt_require(rendercopy);
1026 buffers_create(&buffers, num_buffers);
1027 run_wrap_func(&buffers,
1028 do_read_read_rcs,
1029 p->copy, h->hang);
1030 }
1031
Chris Wilson16bafdf2014-09-04 09:26:24 +01001032 /* and finally try to trick the kernel into loosing the pending write */
1033 igt_subtest_f("%s-%s-gpu-read-after-write%s%s", mode->name, p->prefix, suffix, h->suffix) {
1034 h->require();
1035 p->require();
Chris Wilson99b5ee82015-01-22 10:03:45 +00001036 buffers_create(&buffers, num_buffers);
1037 run_wrap_func(&buffers,
Chris Wilson16bafdf2014-09-04 09:26:24 +01001038 do_gpu_read_after_write,
1039 p->copy, h->hang);
1040 }
1041
1042 igt_fixture {
Chris Wilson99b5ee82015-01-22 10:03:45 +00001043 buffers_fini(&buffers);
Chris Wilson16bafdf2014-09-04 09:26:24 +01001044 }
Chris Wilson08188752014-09-03 13:38:30 +01001045 }
Chris Wilson59c55622014-08-29 13:11:37 +01001046 }
Daniel Vetter5a598c92013-08-14 15:08:05 +02001047}
Daniel Vetter43779e32013-08-14 14:50:50 +02001048
1049static void
Chris Wilsonf2a045f2015-01-02 16:33:33 +05301050run_modes(const struct access_mode *mode)
Daniel Vetter43779e32013-08-14 14:50:50 +02001051{
Chris Wilson77633492015-03-26 08:11:43 +00001052 if (all) {
1053 run_basic_modes(mode, "", run_single);
Chris Wilson6c428a62014-08-29 13:11:37 +01001054
Chris Wilson77633492015-03-26 08:11:43 +00001055 igt_fork_signal_helper();
1056 run_basic_modes(mode, "-interruptible", run_interruptible);
1057 igt_stop_signal_helper();
1058 }
Daniel Vetter3dba47e2013-08-06 22:27:37 +02001059
Chris Wilson6c428a62014-08-29 13:11:37 +01001060 igt_fork_signal_helper();
Chris Wilson16bafdf2014-09-04 09:26:24 +01001061 run_basic_modes(mode, "-forked", run_forked);
Chris Wilson6c428a62014-08-29 13:11:37 +01001062 igt_stop_signal_helper();
Daniel Vetter43779e32013-08-14 14:50:50 +02001063}
1064
Daniel Vetter071e9ca2013-10-31 16:23:26 +01001065igt_main
Daniel Vetter43779e32013-08-14 14:50:50 +02001066{
1067 int max, i;
1068
Daniel Vetter43779e32013-08-14 14:50:50 +02001069 igt_skip_on_simulation();
1070
Chris Wilson77633492015-03-26 08:11:43 +00001071 if (strstr(igt_test_name(), "all"))
1072 all = true;
1073
Daniel Vetter2dbd9982013-08-14 15:48:54 +02001074 igt_fixture {
1075 fd = drm_open_any();
Chris Wilson6c428a62014-08-29 13:11:37 +01001076 devid = intel_get_drm_devid(fd);
1077 gen = intel_gen(devid);
Chris Wilson59c55622014-08-29 13:11:37 +01001078 rendercopy = igt_get_render_copyfunc(devid);
Daniel Vetter43779e32013-08-14 14:50:50 +02001079
Daniel Vetter2dbd9982013-08-14 15:48:54 +02001080 max = gem_aperture_size (fd) / (1024 * 1024) / 2;
1081 if (num_buffers > max)
1082 num_buffers = max;
Daniel Vetteraee0dcb2013-12-03 16:32:52 +01001083
1084 max = intel_get_total_ram_mb() * 3 / 4;
1085 if (num_buffers > max)
1086 num_buffers = max;
Chris Wilson0b4c33f2014-01-26 14:36:32 +00001087 num_buffers /= 2;
Daniel Vettere624fa82014-05-14 00:36:04 +02001088 igt_info("using 2x%d buffers, each 1MiB\n", num_buffers);
Daniel Vetter2dbd9982013-08-14 15:48:54 +02001089 }
Daniel Vetter43779e32013-08-14 14:50:50 +02001090
1091 for (i = 0; i < ARRAY_SIZE(access_modes); i++)
1092 run_modes(&access_modes[i]);
Daniel Vetter3dba47e2013-08-06 22:27:37 +02001093}