blob: 01f5019c047d84fb642e5beda9edcc9a322e4bea [file] [log] [blame]
Chris Wilson721d8742016-10-27 11:32:47 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
Chris Wilson976ed7c2017-08-24 12:16:17 +010024#include "config.h"
25
Chris Wilson721d8742016-10-27 11:32:47 +010026#include <sys/poll.h>
Chris Wilsona19ef052017-07-31 14:15:59 +010027#include <sys/ioctl.h>
Chris Wilson976ed7c2017-08-24 12:16:17 +010028#include <sched.h>
Chris Wilson721d8742016-10-27 11:32:47 +010029
30#include "igt.h"
31#include "igt_vgem.h"
Chris Wilson61f8de72017-07-20 10:08:28 +010032#include "igt_rand.h"
Chris Wilson976ed7c2017-08-24 12:16:17 +010033#include "igt_sysfs.h"
Chris Wilson721d8742016-10-27 11:32:47 +010034
Chris Wilson765f7b02017-09-25 21:45:21 +010035#define BIT(x) (1ul << (x))
36
Chris Wilson49f44c72016-11-14 21:24:52 +000037#define LOCAL_PARAM_HAS_SCHEDULER 41
Chris Wilson765f7b02017-09-25 21:45:21 +010038#define HAS_SCHEDULER BIT(0)
39#define HAS_PRIORITY BIT(1)
40#define HAS_PREEMPTION BIT(2)
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000041#define LOCAL_CONTEXT_PARAM_PRIORITY 6
Chris Wilson721d8742016-10-27 11:32:47 +010042
43#define LO 0
44#define HI 1
45#define NOISE 2
46
47#define MAX_PRIO 1023
Chris Wilsonda553ff2017-09-22 10:32:44 +010048#define MIN_PRIO -1023
Chris Wilson721d8742016-10-27 11:32:47 +010049
50#define BUSY_QLEN 8
51
52IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
53
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000054static int __ctx_set_priority(int fd, uint32_t ctx, int prio)
Chris Wilson721d8742016-10-27 11:32:47 +010055{
56 struct local_i915_gem_context_param param;
57
58 memset(&param, 0, sizeof(param));
59 param.context = ctx;
60 param.size = 0;
61 param.param = LOCAL_CONTEXT_PARAM_PRIORITY;
62 param.value = prio;
63
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000064 return __gem_context_set_param(fd, &param);
65}
66
67static void ctx_set_priority(int fd, uint32_t ctx, int prio)
68{
69 igt_assert_eq(__ctx_set_priority(fd, ctx, prio), 0);
70}
71
72static void ctx_has_priority(int fd)
73{
74 igt_require(__ctx_set_priority(fd, 0, MAX_PRIO) == 0);
Chris Wilson721d8742016-10-27 11:32:47 +010075}
76
77static void store_dword(int fd, uint32_t ctx, unsigned ring,
78 uint32_t target, uint32_t offset, uint32_t value,
79 uint32_t cork, unsigned write_domain)
80{
81 const int gen = intel_gen(intel_get_drm_devid(fd));
82 struct drm_i915_gem_exec_object2 obj[3];
83 struct drm_i915_gem_relocation_entry reloc;
84 struct drm_i915_gem_execbuffer2 execbuf;
85 uint32_t batch[16];
86 int i;
87
88 memset(&execbuf, 0, sizeof(execbuf));
Chris Wilson4de67b22017-01-02 11:05:21 +000089 execbuf.buffers_ptr = to_user_pointer(obj + !cork);
Chris Wilson721d8742016-10-27 11:32:47 +010090 execbuf.buffer_count = 2 + !!cork;
91 execbuf.flags = ring;
92 if (gen < 6)
93 execbuf.flags |= I915_EXEC_SECURE;
94 execbuf.rsvd1 = ctx;
95
96 memset(obj, 0, sizeof(obj));
97 obj[0].handle = cork;
98 obj[1].handle = target;
99 obj[2].handle = gem_create(fd, 4096);
100
101 memset(&reloc, 0, sizeof(reloc));
102 reloc.target_handle = obj[1].handle;
103 reloc.presumed_offset = 0;
104 reloc.offset = sizeof(uint32_t);
105 reloc.delta = offset;
106 reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
107 reloc.write_domain = write_domain;
Chris Wilson4de67b22017-01-02 11:05:21 +0000108 obj[2].relocs_ptr = to_user_pointer(&reloc);
Chris Wilson721d8742016-10-27 11:32:47 +0100109 obj[2].relocation_count = 1;
110
111 i = 0;
112 batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
113 if (gen >= 8) {
114 batch[++i] = offset;
115 batch[++i] = 0;
116 } else if (gen >= 4) {
117 batch[++i] = 0;
118 batch[++i] = offset;
119 reloc.offset += sizeof(uint32_t);
120 } else {
121 batch[i]--;
122 batch[++i] = offset;
123 }
124 batch[++i] = value;
125 batch[++i] = MI_BATCH_BUFFER_END;
126 gem_write(fd, obj[2].handle, 0, batch, sizeof(batch));
127 gem_execbuf(fd, &execbuf);
128 gem_close(fd, obj[2].handle);
129}
130
Chris Wilson721d8742016-10-27 11:32:47 +0100131struct cork {
132 int device;
133 uint32_t handle;
134 uint32_t fence;
135};
136
137static void plug(int fd, struct cork *c)
138{
139 struct vgem_bo bo;
140 int dmabuf;
141
142 c->device = drm_open_driver(DRIVER_VGEM);
143
144 bo.width = bo.height = 1;
145 bo.bpp = 4;
146 vgem_create(c->device, &bo);
147 c->fence = vgem_fence_attach(c->device, &bo, VGEM_FENCE_WRITE);
148
149 dmabuf = prime_handle_to_fd(c->device, bo.handle);
150 c->handle = prime_fd_to_handle(fd, dmabuf);
151 close(dmabuf);
152}
153
154static void unplug(struct cork *c)
155{
156 vgem_fence_signal(c->device, c->fence);
157 close(c->device);
158}
159
Chris Wilson2885b102017-09-25 20:59:54 +0100160static uint32_t create_highest_priority(int fd)
161{
162 uint32_t ctx = gem_context_create(fd);
163
164 /*
165 * If there is no priority support, all contexts will have equal
166 * priority (and therefore the max user priority), so no context
167 * can overtake us, and we effectively can form a plug.
168 */
169 __ctx_set_priority(fd, ctx, MAX_PRIO);
170
171 return ctx;
172}
173
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100174static void unplug_show_queue(int fd, struct cork *c, unsigned int engine)
175{
Chris Wilson2885b102017-09-25 20:59:54 +0100176 igt_spin_t *spin[BUSY_QLEN];
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100177
Chris Wilson2885b102017-09-25 20:59:54 +0100178 for (int n = 0; n < ARRAY_SIZE(spin); n++) {
179 uint32_t ctx = create_highest_priority(fd);
180 spin[n] = __igt_spin_batch_new(fd, ctx, engine, 0);
181 gem_context_destroy(fd, ctx);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100182 }
183
184 unplug(c); /* batches will now be queued on the engine */
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100185 igt_debugfs_dump(fd, "i915_engine_info");
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100186
Chris Wilson2885b102017-09-25 20:59:54 +0100187 for (int n = 0; n < ARRAY_SIZE(spin); n++)
188 igt_spin_batch_free(fd, spin[n]);
189
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100190}
191
Chris Wilson721d8742016-10-27 11:32:47 +0100192static void fifo(int fd, unsigned ring)
193{
194 struct cork cork;
Chris Wilson721d8742016-10-27 11:32:47 +0100195 uint32_t scratch;
196 uint32_t *ptr;
197
198 scratch = gem_create(fd, 4096);
199
Chris Wilson721d8742016-10-27 11:32:47 +0100200 plug(fd, &cork);
201
202 /* Same priority, same timeline, final result will be the second eb */
203 store_dword(fd, 0, ring, scratch, 0, 1, cork.handle, 0);
204 store_dword(fd, 0, ring, scratch, 0, 2, cork.handle, 0);
205
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100206 unplug_show_queue(fd, &cork, ring);
Chris Wilson721d8742016-10-27 11:32:47 +0100207
208 ptr = gem_mmap__gtt(fd, scratch, 4096, PROT_READ);
209 gem_set_domain(fd, scratch, /* no write hazard lies! */
210 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
211 gem_close(fd, scratch);
212
213 igt_assert_eq_u32(ptr[0], 2);
214 munmap(ptr, 4096);
215}
216
217static void reorder(int fd, unsigned ring, unsigned flags)
218#define EQUAL 1
219{
220 struct cork cork;
221 uint32_t scratch;
Chris Wilson721d8742016-10-27 11:32:47 +0100222 uint32_t *ptr;
223 uint32_t ctx[2];
224
225 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100226 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilson721d8742016-10-27 11:32:47 +0100227
228 ctx[HI] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100229 ctx_set_priority(fd, ctx[HI], flags & EQUAL ? MIN_PRIO : 0);
Chris Wilson721d8742016-10-27 11:32:47 +0100230
231 scratch = gem_create(fd, 4096);
Chris Wilson721d8742016-10-27 11:32:47 +0100232 plug(fd, &cork);
233
234 /* We expect the high priority context to be executed first, and
235 * so the final result will be value from the low priority context.
236 */
237 store_dword(fd, ctx[LO], ring, scratch, 0, ctx[LO], cork.handle, 0);
238 store_dword(fd, ctx[HI], ring, scratch, 0, ctx[HI], cork.handle, 0);
239
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100240 unplug_show_queue(fd, &cork, ring);
Chris Wilson721d8742016-10-27 11:32:47 +0100241
242 gem_context_destroy(fd, ctx[LO]);
243 gem_context_destroy(fd, ctx[HI]);
244
245 ptr = gem_mmap__gtt(fd, scratch, 4096, PROT_READ);
246 gem_set_domain(fd, scratch, /* no write hazard lies! */
247 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
248 gem_close(fd, scratch);
249
250 if (flags & EQUAL) /* equal priority, result will be fifo */
251 igt_assert_eq_u32(ptr[0], ctx[HI]);
252 else
253 igt_assert_eq_u32(ptr[0], ctx[LO]);
254 munmap(ptr, 4096);
255}
256
257static void promotion(int fd, unsigned ring)
258{
259 struct cork cork;
260 uint32_t result, dep;
Chris Wilson721d8742016-10-27 11:32:47 +0100261 uint32_t *ptr;
262 uint32_t ctx[3];
263
264 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100265 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilson721d8742016-10-27 11:32:47 +0100266
267 ctx[HI] = gem_context_create(fd);
268 ctx_set_priority(fd, ctx[HI], 0);
269
270 ctx[NOISE] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100271 ctx_set_priority(fd, ctx[NOISE], MIN_PRIO/2);
Chris Wilson721d8742016-10-27 11:32:47 +0100272
273 result = gem_create(fd, 4096);
274 dep = gem_create(fd, 4096);
275
Chris Wilson721d8742016-10-27 11:32:47 +0100276 plug(fd, &cork);
277
278 /* Expect that HI promotes LO, so the order will be LO, HI, NOISE.
279 *
280 * fifo would be NOISE, LO, HI.
281 * strict priority would be HI, NOISE, LO
282 */
283 store_dword(fd, ctx[NOISE], ring, result, 0, ctx[NOISE], cork.handle, 0);
284 store_dword(fd, ctx[LO], ring, result, 0, ctx[LO], cork.handle, 0);
285
286 /* link LO <-> HI via a dependency on another buffer */
287 store_dword(fd, ctx[LO], ring, dep, 0, ctx[LO], 0, I915_GEM_DOMAIN_INSTRUCTION);
288 store_dword(fd, ctx[HI], ring, dep, 0, ctx[HI], 0, 0);
289
290 store_dword(fd, ctx[HI], ring, result, 0, ctx[HI], 0, 0);
291
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100292 unplug_show_queue(fd, &cork, ring);
Chris Wilson721d8742016-10-27 11:32:47 +0100293
294 gem_context_destroy(fd, ctx[NOISE]);
295 gem_context_destroy(fd, ctx[LO]);
296 gem_context_destroy(fd, ctx[HI]);
297
298 ptr = gem_mmap__gtt(fd, dep, 4096, PROT_READ);
299 gem_set_domain(fd, dep, /* no write hazard lies! */
300 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
301 gem_close(fd, dep);
302
303 igt_assert_eq_u32(ptr[0], ctx[HI]);
304 munmap(ptr, 4096);
305
306 ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
307 gem_set_domain(fd, result, /* no write hazard lies! */
308 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
309 gem_close(fd, result);
310
311 igt_assert_eq_u32(ptr[0], ctx[NOISE]);
312 munmap(ptr, 4096);
313}
314
Chris Wilsona3801342017-07-16 16:28:41 +0100315#define NEW_CTX 0x1
316static void preempt(int fd, unsigned ring, unsigned flags)
317{
318 uint32_t result = gem_create(fd, 4096);
319 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
320 igt_spin_t *spin[16];
321 uint32_t ctx[2];
322
323 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100324 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100325
326 ctx[HI] = gem_context_create(fd);
327 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
328
329 for (int n = 0; n < 16; n++) {
330 if (flags & NEW_CTX) {
331 gem_context_destroy(fd, ctx[LO]);
332 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100333 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100334 }
335 spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0);
336 igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
337
338 store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER);
339
340 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
341 igt_assert_eq_u32(ptr[0], n + 1);
342 igt_assert(gem_bo_busy(fd, spin[0]->handle));
343 }
344
345 for (int n = 0; n < 16; n++)
346 igt_spin_batch_free(fd, spin[n]);
347
348 gem_context_destroy(fd, ctx[LO]);
349 gem_context_destroy(fd, ctx[HI]);
350
351 munmap(ptr, 4096);
352 gem_close(fd, result);
353}
354
355static void preempt_other(int fd, unsigned ring)
356{
357 uint32_t result = gem_create(fd, 4096);
358 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
359 igt_spin_t *spin[16];
360 unsigned int other;
361 unsigned int n, i;
362 uint32_t ctx[3];
363
364 /* On each engine, insert
365 * [NOISE] spinner,
366 * [LOW] write
367 *
368 * Then on our target engine do a [HIGH] write which should then
369 * prompt its dependent LOW writes in front of the spinner on
370 * each engine. The purpose of this test is to check that preemption
371 * can cross engines.
372 */
373
374 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100375 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100376
377 ctx[NOISE] = gem_context_create(fd);
378
379 ctx[HI] = gem_context_create(fd);
380 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
381
382 n = 0;
383 for_each_engine(fd, other) {
384 spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
385 store_dword(fd, ctx[LO], other,
386 result, (n + 1)*sizeof(uint32_t), n + 1,
387 0, I915_GEM_DOMAIN_RENDER);
388 n++;
389 }
390 store_dword(fd, ctx[HI], ring,
391 result, (n + 1)*sizeof(uint32_t), n + 1,
392 0, I915_GEM_DOMAIN_RENDER);
393
394 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
395
396 for (i = 0; i < n; i++) {
397 igt_assert(gem_bo_busy(fd, spin[i]->handle));
398 igt_spin_batch_free(fd, spin[i]);
399 }
400
401 n++;
402 for (i = 0; i <= n; i++)
403 igt_assert_eq_u32(ptr[i], i);
404
405 gem_context_destroy(fd, ctx[LO]);
406 gem_context_destroy(fd, ctx[NOISE]);
407 gem_context_destroy(fd, ctx[HI]);
408
409 munmap(ptr, 4096);
410 gem_close(fd, result);
411}
412
413static void preempt_self(int fd, unsigned ring)
414{
415 uint32_t result = gem_create(fd, 4096);
416 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
417 igt_spin_t *spin[16];
418 unsigned int other;
419 unsigned int n, i;
420 uint32_t ctx[3];
421
422 /* On each engine, insert
423 * [NOISE] spinner,
424 * [self/LOW] write
425 *
426 * Then on our target engine do a [self/HIGH] write which should then
427 * preempt its own lower priority task on any engine.
428 */
429
430 ctx[NOISE] = gem_context_create(fd);
431
432 ctx[HI] = gem_context_create(fd);
433
434 n = 0;
Chris Wilsonda553ff2017-09-22 10:32:44 +0100435 ctx_set_priority(fd, ctx[HI], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100436 for_each_engine(fd, other) {
437 spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
438 store_dword(fd, ctx[HI], other,
439 result, (n + 1)*sizeof(uint32_t), n + 1,
440 0, I915_GEM_DOMAIN_RENDER);
441 n++;
442 }
443 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
444 store_dword(fd, ctx[HI], ring,
445 result, (n + 1)*sizeof(uint32_t), n + 1,
446 0, I915_GEM_DOMAIN_RENDER);
447
448 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
449
450 for (i = 0; i < n; i++) {
451 igt_assert(gem_bo_busy(fd, spin[i]->handle));
452 igt_spin_batch_free(fd, spin[i]);
453 }
454
455 n++;
456 for (i = 0; i <= n; i++)
457 igt_assert_eq_u32(ptr[i], i);
458
459 gem_context_destroy(fd, ctx[NOISE]);
460 gem_context_destroy(fd, ctx[HI]);
461
462 munmap(ptr, 4096);
463 gem_close(fd, result);
464}
465
Chris Wilson721d8742016-10-27 11:32:47 +0100466static void deep(int fd, unsigned ring)
467{
468#define XS 8
Chris Wilsonb9c88302017-09-28 11:09:17 +0100469 const unsigned int nctx = MAX_PRIO - MIN_PRIO;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100470 const unsigned size = ALIGN(4*nctx, 4096);
Chris Wilsonb9c88302017-09-28 11:09:17 +0100471 struct timespec tv = {};
Chris Wilson721d8742016-10-27 11:32:47 +0100472 struct cork cork;
473 uint32_t result, dep[XS];
Chris Wilsonb9c88302017-09-28 11:09:17 +0100474 uint32_t expected = 0;
Chris Wilson721d8742016-10-27 11:32:47 +0100475 uint32_t *ptr;
476 uint32_t *ctx;
477
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100478 ctx = malloc(sizeof(*ctx) * nctx);
479 for (int n = 0; n < nctx; n++) {
Chris Wilson721d8742016-10-27 11:32:47 +0100480 ctx[n] = gem_context_create(fd);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100481 ctx_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
Chris Wilson721d8742016-10-27 11:32:47 +0100482 }
483
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100484 result = gem_create(fd, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100485 for (int m = 0; m < XS; m ++)
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100486 dep[m] = gem_create(fd, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100487
Chris Wilsonb9c88302017-09-28 11:09:17 +0100488 /* Bind all surfaces and contexts before starting the timeout. */
489 {
490 struct drm_i915_gem_exec_object2 obj[XS + 2];
491 struct drm_i915_gem_execbuffer2 execbuf;
492 const uint32_t bbe = MI_BATCH_BUFFER_END;
493
494 memset(obj, 0, sizeof(obj));
495 for (int n = 0; n < XS; n++)
496 obj[n].handle = dep[n];
497 obj[XS].handle = result;
498 obj[XS+1].handle = gem_create(fd, 4096);
499 gem_write(fd, obj[XS+1].handle, 0, &bbe, sizeof(bbe));
500
501 memset(&execbuf, 0, sizeof(execbuf));
502 execbuf.buffers_ptr = to_user_pointer(obj);
503 execbuf.buffer_count = XS + 2;
504 execbuf.flags = ring;
505 for (int n = 0; n < nctx; n++) {
506 execbuf.rsvd1 = ctx[n];
507 gem_execbuf(fd, &execbuf);
508 }
509 gem_close(fd, obj[XS+1].handle);
510 gem_sync(fd, result);
511 }
512
Chris Wilson721d8742016-10-27 11:32:47 +0100513 plug(fd, &cork);
514
515 /* Create a deep dependency chain, with a few branches */
Chris Wilsonb9c88302017-09-28 11:09:17 +0100516 for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 8; n++)
Chris Wilson721d8742016-10-27 11:32:47 +0100517 for (int m = 0; m < XS; m++)
518 store_dword(fd, ctx[n], ring, dep[m], 4*n, ctx[n], cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
519
Chris Wilsonb9c88302017-09-28 11:09:17 +0100520 for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 6; n++) {
Chris Wilson721d8742016-10-27 11:32:47 +0100521 for (int m = 0; m < XS; m++) {
522 store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], dep[m], 0);
523 store_dword(fd, ctx[n], ring, result, 4*m, ctx[n], 0, I915_GEM_DOMAIN_INSTRUCTION);
524 }
Chris Wilsonb9c88302017-09-28 11:09:17 +0100525 expected = ctx[n];
Chris Wilson721d8742016-10-27 11:32:47 +0100526 }
527
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100528 unplug_show_queue(fd, &cork, ring);
Chris Wilsonb9c88302017-09-28 11:09:17 +0100529 igt_require(expected); /* too slow */
Chris Wilson721d8742016-10-27 11:32:47 +0100530
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100531 for (int n = 0; n < nctx; n++)
Chris Wilson721d8742016-10-27 11:32:47 +0100532 gem_context_destroy(fd, ctx[n]);
533
534 for (int m = 0; m < XS; m++) {
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100535 ptr = gem_mmap__gtt(fd, dep[m], size, PROT_READ);
Chris Wilson721d8742016-10-27 11:32:47 +0100536 gem_set_domain(fd, dep[m], /* no write hazard lies! */
537 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
538 gem_close(fd, dep[m]);
539
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100540 for (int n = 0; n < nctx; n++)
Chris Wilson721d8742016-10-27 11:32:47 +0100541 igt_assert_eq_u32(ptr[n], ctx[n]);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100542 munmap(ptr, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100543 }
544
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100545 ptr = gem_mmap__gtt(fd, result, size, PROT_READ);
Chris Wilson721d8742016-10-27 11:32:47 +0100546 gem_set_domain(fd, result, /* no write hazard lies! */
547 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
548 gem_close(fd, result);
549
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100550 /* No reordering due to PI on all contexts because of the common dep */
Chris Wilson721d8742016-10-27 11:32:47 +0100551 for (int m = 0; m < XS; m++)
Chris Wilsonb9c88302017-09-28 11:09:17 +0100552 igt_assert_eq_u32(ptr[m], expected);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100553 munmap(ptr, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100554
555 free(ctx);
Chris Wilsonf6920752017-04-24 13:20:04 +0100556#undef XS
557}
558
Chris Wilsona19ef052017-07-31 14:15:59 +0100559static void alarm_handler(int sig)
560{
561}
562
563static int __execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
564{
Chris Wilson976ed7c2017-08-24 12:16:17 +0100565 int err = 0;
566 if (ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf))
567 err = -errno;
568 return err;
Chris Wilsona19ef052017-07-31 14:15:59 +0100569}
570
571static unsigned int measure_ring_size(int fd, unsigned int ring)
572{
573 struct sigaction sa = { .sa_handler = alarm_handler };
574 struct drm_i915_gem_exec_object2 obj[2];
575 struct drm_i915_gem_execbuffer2 execbuf;
576 const uint32_t bbe = MI_BATCH_BUFFER_END;
577 unsigned int count, last;
578 struct itimerval itv;
579 struct cork c;
580
581 memset(obj, 0, sizeof(obj));
582 obj[1].handle = gem_create(fd, 4096);
583 gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
584
585 memset(&execbuf, 0, sizeof(execbuf));
586 execbuf.buffers_ptr = to_user_pointer(obj + 1);
587 execbuf.buffer_count = 1;
588 execbuf.flags = ring;
589 gem_execbuf(fd, &execbuf);
590 gem_sync(fd, obj[1].handle);
591
592 plug(fd, &c);
593 obj[0].handle = c.handle;
594
595 execbuf.buffers_ptr = to_user_pointer(obj);
596 execbuf.buffer_count = 2;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100597 execbuf.rsvd1 = gem_context_create(fd);
Chris Wilsona19ef052017-07-31 14:15:59 +0100598
599 sigaction(SIGALRM, &sa, NULL);
600 itv.it_interval.tv_sec = 0;
601 itv.it_interval.tv_usec = 100;
602 itv.it_value.tv_sec = 0;
603 itv.it_value.tv_usec = 1000;
604 setitimer(ITIMER_REAL, &itv, NULL);
605
606 last = -1;
607 count = 0;
608 do {
609 if (__execbuf(fd, &execbuf) == 0) {
610 count++;
611 continue;
612 }
613
614 if (last == count)
615 break;
616
617 last = count;
618 } while (1);
619
620 memset(&itv, 0, sizeof(itv));
621 setitimer(ITIMER_REAL, &itv, NULL);
622
623 unplug(&c);
624 gem_close(fd, obj[1].handle);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100625 gem_context_destroy(fd, execbuf.rsvd1);
Chris Wilsona19ef052017-07-31 14:15:59 +0100626
627 return count;
628}
629
Chris Wilsonf6920752017-04-24 13:20:04 +0100630static void wide(int fd, unsigned ring)
631{
Chris Wilsonf6920752017-04-24 13:20:04 +0100632#define NCTX 4096
Chris Wilsona19ef052017-07-31 14:15:59 +0100633 struct timespec tv = {};
634 unsigned int ring_size = measure_ring_size(fd, ring);
Chris Wilsonf6920752017-04-24 13:20:04 +0100635
636 struct cork cork;
637 uint32_t result;
Chris Wilsonf6920752017-04-24 13:20:04 +0100638 uint32_t *ptr;
639 uint32_t *ctx;
Chris Wilsona19ef052017-07-31 14:15:59 +0100640 unsigned int count;
Chris Wilsonf6920752017-04-24 13:20:04 +0100641
642 ctx = malloc(sizeof(*ctx)*NCTX);
643 for (int n = 0; n < NCTX; n++)
644 ctx[n] = gem_context_create(fd);
645
Chris Wilsonf6920752017-04-24 13:20:04 +0100646 result = gem_create(fd, 4*NCTX);
647
Chris Wilsonf6920752017-04-24 13:20:04 +0100648 plug(fd, &cork);
649
650 /* Lots of in-order requests, plugged and submitted simultaneously */
Chris Wilsona19ef052017-07-31 14:15:59 +0100651 for (count = 0;
652 igt_seconds_elapsed(&tv) < 5 && count < ring_size;
653 count++) {
654 for (int n = 0; n < NCTX; n++) {
655 store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
656 }
Chris Wilsonf6920752017-04-24 13:20:04 +0100657 }
Chris Wilsona19ef052017-07-31 14:15:59 +0100658 igt_info("Submitted %d requests over %d contexts in %.1fms\n",
659 count, NCTX, igt_nsec_elapsed(&tv) * 1e-6);
Chris Wilsonf6920752017-04-24 13:20:04 +0100660
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100661 unplug_show_queue(fd, &cork, ring);
Chris Wilsonf6920752017-04-24 13:20:04 +0100662
663 for (int n = 0; n < NCTX; n++)
664 gem_context_destroy(fd, ctx[n]);
665
666 ptr = gem_mmap__gtt(fd, result, 4*NCTX, PROT_READ);
667 gem_set_domain(fd, result, /* no write hazard lies! */
668 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
669 for (int n = 0; n < NCTX; n++)
670 igt_assert_eq_u32(ptr[n], ctx[n]);
Chris Wilson61f8de72017-07-20 10:08:28 +0100671 munmap(ptr, 4*NCTX);
Chris Wilsonf6920752017-04-24 13:20:04 +0100672
Chris Wilsonf6920752017-04-24 13:20:04 +0100673 gem_close(fd, result);
674 free(ctx);
Chris Wilson61f8de72017-07-20 10:08:28 +0100675#undef NCTX
676}
677
678static void reorder_wide(int fd, unsigned ring)
679{
680 const int gen = intel_gen(intel_get_drm_devid(fd));
681 struct drm_i915_gem_relocation_entry reloc;
682 struct drm_i915_gem_exec_object2 obj[3];
683 struct drm_i915_gem_execbuffer2 execbuf;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100684 struct timespec tv = {};
685 unsigned int ring_size = measure_ring_size(fd, ring);
Chris Wilson61f8de72017-07-20 10:08:28 +0100686 struct cork cork;
687 uint32_t result, target;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100688 uint32_t *found, *expected;
Chris Wilson61f8de72017-07-20 10:08:28 +0100689
690 result = gem_create(fd, 4096);
691 target = gem_create(fd, 4096);
692
Chris Wilson61f8de72017-07-20 10:08:28 +0100693 plug(fd, &cork);
694
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100695 expected = gem_mmap__cpu(fd, target, 0, 4096, PROT_WRITE);
Chris Wilson61f8de72017-07-20 10:08:28 +0100696 gem_set_domain(fd, target, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
697
698 memset(obj, 0, sizeof(obj));
699 obj[0].handle = cork.handle;
700 obj[1].handle = result;
701 obj[2].relocs_ptr = to_user_pointer(&reloc);
702 obj[2].relocation_count = 1;
703
704 memset(&reloc, 0, sizeof(reloc));
705 reloc.target_handle = result;
706 reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
707 reloc.write_domain = 0; /* lies */
708
709 memset(&execbuf, 0, sizeof(execbuf));
710 execbuf.buffers_ptr = to_user_pointer(obj);
711 execbuf.buffer_count = 3;
712 execbuf.flags = ring;
713 if (gen < 6)
714 execbuf.flags |= I915_EXEC_SECURE;
715
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100716 for (int n = MIN_PRIO, x = 1;
717 igt_seconds_elapsed(&tv) < 5 && n <= MAX_PRIO;
718 n++, x++) {
719 unsigned int sz = ALIGN(ring_size * 64, 4096);
Chris Wilson61f8de72017-07-20 10:08:28 +0100720 uint32_t *batch;
721
722 execbuf.rsvd1 = gem_context_create(fd);
723 ctx_set_priority(fd, execbuf.rsvd1, n);
724
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100725 obj[2].handle = gem_create(fd, sz);
726 batch = gem_mmap__gtt(fd, obj[2].handle, sz, PROT_WRITE);
Chris Wilson61f8de72017-07-20 10:08:28 +0100727 gem_set_domain(fd, obj[2].handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
728
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100729 for (int m = 0; m < ring_size; m++) {
Chris Wilson61f8de72017-07-20 10:08:28 +0100730 uint64_t addr;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100731 int idx = hars_petruska_f54_1_random_unsafe_max(1024);
Chris Wilson61f8de72017-07-20 10:08:28 +0100732 int i;
733
734 execbuf.batch_start_offset = m * 64;
735 reloc.offset = execbuf.batch_start_offset + sizeof(uint32_t);
736 reloc.delta = idx * sizeof(uint32_t);
737 addr = reloc.presumed_offset + reloc.delta;
738
739 i = execbuf.batch_start_offset / sizeof(uint32_t);
740 batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
741 if (gen >= 8) {
742 batch[++i] = addr;
743 batch[++i] = addr >> 32;
744 } else if (gen >= 4) {
745 batch[++i] = 0;
746 batch[++i] = addr;
747 reloc.offset += sizeof(uint32_t);
748 } else {
749 batch[i]--;
750 batch[++i] = addr;
751 }
752 batch[++i] = x;
753 batch[++i] = MI_BATCH_BUFFER_END;
754
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100755 if (!expected[idx])
756 expected[idx] = x;
Chris Wilson61f8de72017-07-20 10:08:28 +0100757
758 gem_execbuf(fd, &execbuf);
759 }
760
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100761 munmap(batch, sz);
Chris Wilson61f8de72017-07-20 10:08:28 +0100762 gem_close(fd, obj[2].handle);
763 gem_context_destroy(fd, execbuf.rsvd1);
764 }
765
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100766 unplug_show_queue(fd, &cork, ring);
Chris Wilson61f8de72017-07-20 10:08:28 +0100767
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100768 found = gem_mmap__gtt(fd, result, 4096, PROT_READ);
Chris Wilson61f8de72017-07-20 10:08:28 +0100769 gem_set_domain(fd, result, /* no write hazard lies! */
770 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
771 for (int n = 0; n < 1024; n++)
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100772 igt_assert_eq_u32(found[n], expected[n]);
773 munmap(found, 4096);
774 munmap(expected, 4096);
Chris Wilson61f8de72017-07-20 10:08:28 +0100775
776 gem_close(fd, result);
777 gem_close(fd, target);
Chris Wilson721d8742016-10-27 11:32:47 +0100778}
779
Chris Wilson976ed7c2017-08-24 12:16:17 +0100780static void bind_to_cpu(int cpu)
781{
782 const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
783 struct sched_param rt = {.sched_priority = 99 };
784 cpu_set_t allowed;
785
786 igt_assert(sched_setscheduler(getpid(), SCHED_RR | SCHED_RESET_ON_FORK, &rt) == 0);
787
788 CPU_ZERO(&allowed);
789 CPU_SET(cpu % ncpus, &allowed);
790 igt_assert(sched_setaffinity(getpid(), sizeof(cpu_set_t), &allowed) == 0);
791}
792
793static void test_pi_ringfull(int fd, unsigned int engine)
794{
795 const uint32_t bbe = MI_BATCH_BUFFER_END;
796 struct sigaction sa = { .sa_handler = alarm_handler };
797 struct drm_i915_gem_execbuffer2 execbuf;
798 struct drm_i915_gem_exec_object2 obj[2];
799 unsigned int last, count;
800 struct itimerval itv;
801 struct cork c;
802 bool *result;
803
804 result = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
805 igt_assert(result != MAP_FAILED);
806
807 memset(&execbuf, 0, sizeof(execbuf));
808 memset(&obj, 0, sizeof(obj));
809
810 obj[1].handle = gem_create(fd, 4096);
811 gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
812
813 execbuf.buffers_ptr = to_user_pointer(&obj[1]);
814 execbuf.buffer_count = 1;
815 execbuf.flags = engine;
816 execbuf.rsvd1 = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100817 ctx_set_priority(fd, execbuf.rsvd1, MIN_PRIO);
Chris Wilson976ed7c2017-08-24 12:16:17 +0100818
819 gem_execbuf(fd, &execbuf);
820 gem_sync(fd, obj[1].handle);
821
822 /* Fill the low-priority ring */
823 plug(fd, &c);
824 obj[0].handle = c.handle;
825
826 execbuf.buffers_ptr = to_user_pointer(obj);
827 execbuf.buffer_count = 2;
828
829 sigaction(SIGALRM, &sa, NULL);
830 itv.it_interval.tv_sec = 0;
831 itv.it_interval.tv_usec = 100;
832 itv.it_value.tv_sec = 0;
833 itv.it_value.tv_usec = 1000;
834 setitimer(ITIMER_REAL, &itv, NULL);
835
836 last = -1;
837 count = 0;
838 do {
839 if (__execbuf(fd, &execbuf) == 0) {
840 count++;
841 continue;
842 }
843
844 if (last == count)
845 break;
846
847 last = count;
848 } while (1);
849 igt_debug("Filled low-priority ring with %d batches\n", count);
850
851 memset(&itv, 0, sizeof(itv));
852 setitimer(ITIMER_REAL, &itv, NULL);
853
854 execbuf.buffers_ptr = to_user_pointer(&obj[1]);
855 execbuf.buffer_count = 1;
856
857 /* both parent + child on the same cpu, only parent is RT */
858 bind_to_cpu(0);
859
860 igt_fork(child, 1) {
861 result[0] = true;
862
863 igt_debug("Creating HP context\n");
864 execbuf.rsvd1 = gem_context_create(fd);
865 ctx_set_priority(fd, execbuf.rsvd1, MAX_PRIO);
866
867 kill(getppid(), SIGALRM);
868 sched_yield();
869 result[1] = true;
870
871 itv.it_value.tv_sec = 0;
872 itv.it_value.tv_usec = 10000;
873 setitimer(ITIMER_REAL, &itv, NULL);
874
875 /* Since we are the high priority task, we expect to be
876 * able to add ourselves to *our* ring without interruption.
877 */
878 igt_debug("HP child executing\n");
879 result[2] = __execbuf(fd, &execbuf) == 0;
880 gem_context_destroy(fd, execbuf.rsvd1);
881 }
882
883 /* Relinquish CPU just to allow child to create a context */
884 sleep(1);
885 igt_assert_f(result[0], "HP context (child) not created");
886 igt_assert_f(!result[1], "Child released too early!\n");
887
888 /* Parent sleeps waiting for ringspace, releasing child */
889 itv.it_value.tv_sec = 0;
890 itv.it_value.tv_usec = 50000;
891 setitimer(ITIMER_REAL, &itv, NULL);
892 igt_debug("LP parent executing\n");
893 igt_assert_eq(__execbuf(fd, &execbuf), -EINTR);
894 igt_assert_f(result[1], "Child was not released!\n");
895 igt_assert_f(result[2],
896 "High priority child unable to submit within 10ms\n");
897
898 unplug(&c);
899 igt_waitchildren();
900
901 gem_context_destroy(fd, execbuf.rsvd1);
902 gem_close(fd, obj[1].handle);
903 gem_close(fd, obj[0].handle);
904 munmap(result, 4096);
905}
906
Chris Wilson765f7b02017-09-25 21:45:21 +0100907static unsigned int has_scheduler(int fd)
Chris Wilson721d8742016-10-27 11:32:47 +0100908{
909 drm_i915_getparam_t gp;
Chris Wilson765f7b02017-09-25 21:45:21 +0100910 unsigned int caps = 0;
Chris Wilson721d8742016-10-27 11:32:47 +0100911
912 gp.param = LOCAL_PARAM_HAS_SCHEDULER;
Chris Wilson765f7b02017-09-25 21:45:21 +0100913 gp.value = (int *)&caps;
Chris Wilson721d8742016-10-27 11:32:47 +0100914 drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
915
Chris Wilson765f7b02017-09-25 21:45:21 +0100916 if (!caps)
917 return 0;
918
919 igt_info("Has kernel scheduler\n");
920 if (caps & HAS_PRIORITY)
921 igt_info(" - With priority sorting\n");
922 if (caps & HAS_PREEMPTION)
923 igt_info(" - With preemption enabled\n");
924
925 return caps;
Chris Wilson721d8742016-10-27 11:32:47 +0100926}
927
Chris Wilson976ed7c2017-08-24 12:16:17 +0100928#define HAVE_EXECLISTS 0x1
929#define HAVE_GUC 0x2
930static unsigned print_welcome(int fd)
931{
932 unsigned flags = 0;
933 bool active;
934 int dir;
935
936 dir = igt_sysfs_open_parameters(fd);
937 if (dir < 0)
938 return 0;
939
940 active = igt_sysfs_get_boolean(dir, "enable_guc_submission");
941 if (active) {
942 igt_info("Using GuC submission\n");
943 flags |= HAVE_GUC | HAVE_EXECLISTS;
944 goto out;
945 }
946
947 active = igt_sysfs_get_boolean(dir, "enable_execlists");
948 if (active) {
949 igt_info("Using Execlists submission\n");
950 flags |= HAVE_EXECLISTS;
951 goto out;
952 }
953
954 active = igt_sysfs_get_boolean(dir, "semaphores");
955 igt_info("Using Legacy submission%s\n",
956 active ? ", with semaphores" : "");
957
958out:
959 close(dir);
960 return flags;
961}
962
Chris Wilson721d8742016-10-27 11:32:47 +0100963igt_main
964{
965 const struct intel_execution_engine *e;
Chris Wilson765f7b02017-09-25 21:45:21 +0100966 unsigned int exec_caps = 0;
967 unsigned int sched_caps = 0;
Chris Wilson721d8742016-10-27 11:32:47 +0100968 int fd = -1;
969
970 igt_skip_on_simulation();
971
972 igt_fixture {
973 fd = drm_open_driver_master(DRIVER_INTEL);
Chris Wilson765f7b02017-09-25 21:45:21 +0100974 exec_caps = print_welcome(fd);
975 sched_caps = has_scheduler(fd);
Chris Wilson9518cb52017-02-22 15:24:54 +0000976 igt_require_gem(fd);
Chris Wilson721d8742016-10-27 11:32:47 +0100977 gem_require_mmap_wc(fd);
978 igt_fork_hang_detector(fd);
979 }
980
981 igt_subtest_group {
982 for (e = intel_execution_engines; e->name; e++) {
983 /* default exec-id is purely symbolic */
984 if (e->exec_id == 0)
985 continue;
986
987 igt_subtest_f("fifo-%s", e->name) {
988 gem_require_ring(fd, e->exec_id | e->flags);
Chris Wilsonbc787762017-05-18 12:11:59 +0100989 igt_require(gem_can_store_dword(fd, e->exec_id) | e->flags);
Chris Wilson721d8742016-10-27 11:32:47 +0100990 fifo(fd, e->exec_id | e->flags);
991 }
992 }
993 }
994
995 igt_subtest_group {
996 igt_fixture {
Chris Wilson765f7b02017-09-25 21:45:21 +0100997 igt_require(sched_caps & HAS_SCHEDULER);
Chris Wilsonaf0e1c52017-02-21 18:25:58 +0000998 ctx_has_priority(fd);
Chris Wilson721d8742016-10-27 11:32:47 +0100999 }
1000
1001 for (e = intel_execution_engines; e->name; e++) {
1002 /* default exec-id is purely symbolic */
1003 if (e->exec_id == 0)
1004 continue;
1005
1006 igt_subtest_group {
Chris Wilson073cfd72017-03-17 11:52:51 +00001007 igt_fixture {
Chris Wilson721d8742016-10-27 11:32:47 +01001008 gem_require_ring(fd, e->exec_id | e->flags);
Chris Wilsonbc787762017-05-18 12:11:59 +01001009 igt_require(gem_can_store_dword(fd, e->exec_id) | e->flags);
Chris Wilson073cfd72017-03-17 11:52:51 +00001010 }
Chris Wilson721d8742016-10-27 11:32:47 +01001011
1012 igt_subtest_f("in-order-%s", e->name)
1013 reorder(fd, e->exec_id | e->flags, EQUAL);
1014
1015 igt_subtest_f("out-order-%s", e->name)
1016 reorder(fd, e->exec_id | e->flags, 0);
1017
1018 igt_subtest_f("promotion-%s", e->name)
1019 promotion(fd, e->exec_id | e->flags);
1020
Chris Wilson765f7b02017-09-25 21:45:21 +01001021 igt_subtest_group {
1022 igt_fixture {
1023 igt_require(sched_caps & HAS_PREEMPTION);
1024 }
Chris Wilsona3801342017-07-16 16:28:41 +01001025
Chris Wilson765f7b02017-09-25 21:45:21 +01001026 igt_subtest_f("preempt-%s", e->name)
1027 preempt(fd, e->exec_id | e->flags, 0);
Chris Wilsona3801342017-07-16 16:28:41 +01001028
Chris Wilson765f7b02017-09-25 21:45:21 +01001029 igt_subtest_f("preempt-contexts-%s", e->name)
1030 preempt(fd, e->exec_id | e->flags, NEW_CTX);
Chris Wilsona3801342017-07-16 16:28:41 +01001031
Chris Wilson765f7b02017-09-25 21:45:21 +01001032 igt_subtest_f("preempt-other-%s", e->name)
1033 preempt_other(fd, e->exec_id | e->flags);
1034
1035 igt_subtest_f("preempt-self-%s", e->name)
1036 preempt_self(fd, e->exec_id | e->flags);
1037 }
Chris Wilsona3801342017-07-16 16:28:41 +01001038
Chris Wilson721d8742016-10-27 11:32:47 +01001039 igt_subtest_f("deep-%s", e->name)
1040 deep(fd, e->exec_id | e->flags);
Chris Wilsonf6920752017-04-24 13:20:04 +01001041
1042 igt_subtest_f("wide-%s", e->name)
1043 wide(fd, e->exec_id | e->flags);
Chris Wilson61f8de72017-07-20 10:08:28 +01001044
1045 igt_subtest_f("reorder-wide-%s", e->name)
1046 reorder_wide(fd, e->exec_id | e->flags);
Chris Wilson721d8742016-10-27 11:32:47 +01001047 }
1048 }
1049 }
1050
Chris Wilson976ed7c2017-08-24 12:16:17 +01001051 igt_subtest_group {
1052 igt_fixture {
Chris Wilson765f7b02017-09-25 21:45:21 +01001053 igt_require(sched_caps & HAS_SCHEDULER);
Chris Wilson976ed7c2017-08-24 12:16:17 +01001054 ctx_has_priority(fd);
1055
1056 /* need separate rings */
Chris Wilson765f7b02017-09-25 21:45:21 +01001057 igt_require(exec_caps & HAVE_EXECLISTS);
Chris Wilson976ed7c2017-08-24 12:16:17 +01001058 }
1059
1060 for (e = intel_execution_engines; e->name; e++) {
1061 igt_subtest_group {
1062 igt_fixture {
1063 gem_require_ring(fd, e->exec_id | e->flags);
Chris Wilson765f7b02017-09-25 21:45:21 +01001064 igt_require(sched_caps & HAS_PREEMPTION);
Chris Wilson976ed7c2017-08-24 12:16:17 +01001065 }
1066
1067 igt_subtest_f("pi-ringfull-%s", e->name)
1068 test_pi_ringfull(fd, e->exec_id | e->flags);
1069 }
1070 }
1071 }
1072
Chris Wilson721d8742016-10-27 11:32:47 +01001073 igt_fixture {
1074 igt_stop_hang_detector();
1075 close(fd);
1076 }
1077}