blob: 25704399bee5b6442c01bd09a965e6a6b12e360e [file] [log] [blame]
Chris Wilson721d8742016-10-27 11:32:47 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
Chris Wilson976ed7c2017-08-24 12:16:17 +010024#include "config.h"
25
Chris Wilson721d8742016-10-27 11:32:47 +010026#include <sys/poll.h>
Chris Wilsona19ef052017-07-31 14:15:59 +010027#include <sys/ioctl.h>
Chris Wilson976ed7c2017-08-24 12:16:17 +010028#include <sched.h>
Petri Latvala0c7e5062017-09-29 13:51:58 +030029#include <signal.h>
Chris Wilson721d8742016-10-27 11:32:47 +010030
31#include "igt.h"
32#include "igt_vgem.h"
Chris Wilson61f8de72017-07-20 10:08:28 +010033#include "igt_rand.h"
Chris Wilson976ed7c2017-08-24 12:16:17 +010034#include "igt_sysfs.h"
Chris Wilson721d8742016-10-27 11:32:47 +010035
Chris Wilson765f7b02017-09-25 21:45:21 +010036#define BIT(x) (1ul << (x))
37
Chris Wilson49f44c72016-11-14 21:24:52 +000038#define LOCAL_PARAM_HAS_SCHEDULER 41
Chris Wilson765f7b02017-09-25 21:45:21 +010039#define HAS_SCHEDULER BIT(0)
40#define HAS_PRIORITY BIT(1)
41#define HAS_PREEMPTION BIT(2)
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000042#define LOCAL_CONTEXT_PARAM_PRIORITY 6
Chris Wilson721d8742016-10-27 11:32:47 +010043
44#define LO 0
45#define HI 1
46#define NOISE 2
47
48#define MAX_PRIO 1023
Chris Wilsonda553ff2017-09-22 10:32:44 +010049#define MIN_PRIO -1023
Chris Wilson721d8742016-10-27 11:32:47 +010050
51#define BUSY_QLEN 8
52
53IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
54
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000055static int __ctx_set_priority(int fd, uint32_t ctx, int prio)
Chris Wilson721d8742016-10-27 11:32:47 +010056{
57 struct local_i915_gem_context_param param;
58
59 memset(&param, 0, sizeof(param));
60 param.context = ctx;
61 param.size = 0;
62 param.param = LOCAL_CONTEXT_PARAM_PRIORITY;
63 param.value = prio;
64
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000065 return __gem_context_set_param(fd, &param);
66}
67
68static void ctx_set_priority(int fd, uint32_t ctx, int prio)
69{
70 igt_assert_eq(__ctx_set_priority(fd, ctx, prio), 0);
71}
72
73static void ctx_has_priority(int fd)
74{
75 igt_require(__ctx_set_priority(fd, 0, MAX_PRIO) == 0);
Chris Wilson721d8742016-10-27 11:32:47 +010076}
77
78static void store_dword(int fd, uint32_t ctx, unsigned ring,
79 uint32_t target, uint32_t offset, uint32_t value,
80 uint32_t cork, unsigned write_domain)
81{
82 const int gen = intel_gen(intel_get_drm_devid(fd));
83 struct drm_i915_gem_exec_object2 obj[3];
84 struct drm_i915_gem_relocation_entry reloc;
85 struct drm_i915_gem_execbuffer2 execbuf;
86 uint32_t batch[16];
87 int i;
88
89 memset(&execbuf, 0, sizeof(execbuf));
Chris Wilson4de67b22017-01-02 11:05:21 +000090 execbuf.buffers_ptr = to_user_pointer(obj + !cork);
Chris Wilson721d8742016-10-27 11:32:47 +010091 execbuf.buffer_count = 2 + !!cork;
92 execbuf.flags = ring;
93 if (gen < 6)
94 execbuf.flags |= I915_EXEC_SECURE;
95 execbuf.rsvd1 = ctx;
96
97 memset(obj, 0, sizeof(obj));
98 obj[0].handle = cork;
99 obj[1].handle = target;
100 obj[2].handle = gem_create(fd, 4096);
101
102 memset(&reloc, 0, sizeof(reloc));
103 reloc.target_handle = obj[1].handle;
104 reloc.presumed_offset = 0;
105 reloc.offset = sizeof(uint32_t);
106 reloc.delta = offset;
107 reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
108 reloc.write_domain = write_domain;
Chris Wilson4de67b22017-01-02 11:05:21 +0000109 obj[2].relocs_ptr = to_user_pointer(&reloc);
Chris Wilson721d8742016-10-27 11:32:47 +0100110 obj[2].relocation_count = 1;
111
112 i = 0;
113 batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
114 if (gen >= 8) {
115 batch[++i] = offset;
116 batch[++i] = 0;
117 } else if (gen >= 4) {
118 batch[++i] = 0;
119 batch[++i] = offset;
120 reloc.offset += sizeof(uint32_t);
121 } else {
122 batch[i]--;
123 batch[++i] = offset;
124 }
125 batch[++i] = value;
126 batch[++i] = MI_BATCH_BUFFER_END;
127 gem_write(fd, obj[2].handle, 0, batch, sizeof(batch));
128 gem_execbuf(fd, &execbuf);
129 gem_close(fd, obj[2].handle);
130}
131
Chris Wilson721d8742016-10-27 11:32:47 +0100132struct cork {
133 int device;
134 uint32_t handle;
135 uint32_t fence;
136};
137
138static void plug(int fd, struct cork *c)
139{
140 struct vgem_bo bo;
141 int dmabuf;
142
143 c->device = drm_open_driver(DRIVER_VGEM);
144
145 bo.width = bo.height = 1;
146 bo.bpp = 4;
147 vgem_create(c->device, &bo);
148 c->fence = vgem_fence_attach(c->device, &bo, VGEM_FENCE_WRITE);
149
150 dmabuf = prime_handle_to_fd(c->device, bo.handle);
151 c->handle = prime_fd_to_handle(fd, dmabuf);
152 close(dmabuf);
153}
154
155static void unplug(struct cork *c)
156{
157 vgem_fence_signal(c->device, c->fence);
158 close(c->device);
159}
160
Chris Wilson2885b102017-09-25 20:59:54 +0100161static uint32_t create_highest_priority(int fd)
162{
163 uint32_t ctx = gem_context_create(fd);
164
165 /*
166 * If there is no priority support, all contexts will have equal
167 * priority (and therefore the max user priority), so no context
168 * can overtake us, and we effectively can form a plug.
169 */
170 __ctx_set_priority(fd, ctx, MAX_PRIO);
171
172 return ctx;
173}
174
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100175static void unplug_show_queue(int fd, struct cork *c, unsigned int engine)
176{
Chris Wilson2885b102017-09-25 20:59:54 +0100177 igt_spin_t *spin[BUSY_QLEN];
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100178
Chris Wilson2885b102017-09-25 20:59:54 +0100179 for (int n = 0; n < ARRAY_SIZE(spin); n++) {
180 uint32_t ctx = create_highest_priority(fd);
181 spin[n] = __igt_spin_batch_new(fd, ctx, engine, 0);
182 gem_context_destroy(fd, ctx);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100183 }
184
185 unplug(c); /* batches will now be queued on the engine */
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100186 igt_debugfs_dump(fd, "i915_engine_info");
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100187
Chris Wilson2885b102017-09-25 20:59:54 +0100188 for (int n = 0; n < ARRAY_SIZE(spin); n++)
189 igt_spin_batch_free(fd, spin[n]);
190
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100191}
192
Chris Wilson721d8742016-10-27 11:32:47 +0100193static void fifo(int fd, unsigned ring)
194{
195 struct cork cork;
Chris Wilson721d8742016-10-27 11:32:47 +0100196 uint32_t scratch;
197 uint32_t *ptr;
198
199 scratch = gem_create(fd, 4096);
200
Chris Wilson721d8742016-10-27 11:32:47 +0100201 plug(fd, &cork);
202
203 /* Same priority, same timeline, final result will be the second eb */
204 store_dword(fd, 0, ring, scratch, 0, 1, cork.handle, 0);
205 store_dword(fd, 0, ring, scratch, 0, 2, cork.handle, 0);
206
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100207 unplug_show_queue(fd, &cork, ring);
Chris Wilson721d8742016-10-27 11:32:47 +0100208
209 ptr = gem_mmap__gtt(fd, scratch, 4096, PROT_READ);
210 gem_set_domain(fd, scratch, /* no write hazard lies! */
211 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
212 gem_close(fd, scratch);
213
214 igt_assert_eq_u32(ptr[0], 2);
215 munmap(ptr, 4096);
216}
217
218static void reorder(int fd, unsigned ring, unsigned flags)
219#define EQUAL 1
220{
221 struct cork cork;
222 uint32_t scratch;
Chris Wilson721d8742016-10-27 11:32:47 +0100223 uint32_t *ptr;
224 uint32_t ctx[2];
225
226 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100227 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilson721d8742016-10-27 11:32:47 +0100228
229 ctx[HI] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100230 ctx_set_priority(fd, ctx[HI], flags & EQUAL ? MIN_PRIO : 0);
Chris Wilson721d8742016-10-27 11:32:47 +0100231
232 scratch = gem_create(fd, 4096);
Chris Wilson721d8742016-10-27 11:32:47 +0100233 plug(fd, &cork);
234
235 /* We expect the high priority context to be executed first, and
236 * so the final result will be value from the low priority context.
237 */
238 store_dword(fd, ctx[LO], ring, scratch, 0, ctx[LO], cork.handle, 0);
239 store_dword(fd, ctx[HI], ring, scratch, 0, ctx[HI], cork.handle, 0);
240
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100241 unplug_show_queue(fd, &cork, ring);
Chris Wilson721d8742016-10-27 11:32:47 +0100242
243 gem_context_destroy(fd, ctx[LO]);
244 gem_context_destroy(fd, ctx[HI]);
245
246 ptr = gem_mmap__gtt(fd, scratch, 4096, PROT_READ);
247 gem_set_domain(fd, scratch, /* no write hazard lies! */
248 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
249 gem_close(fd, scratch);
250
251 if (flags & EQUAL) /* equal priority, result will be fifo */
252 igt_assert_eq_u32(ptr[0], ctx[HI]);
253 else
254 igt_assert_eq_u32(ptr[0], ctx[LO]);
255 munmap(ptr, 4096);
256}
257
258static void promotion(int fd, unsigned ring)
259{
260 struct cork cork;
261 uint32_t result, dep;
Chris Wilson721d8742016-10-27 11:32:47 +0100262 uint32_t *ptr;
263 uint32_t ctx[3];
264
265 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100266 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilson721d8742016-10-27 11:32:47 +0100267
268 ctx[HI] = gem_context_create(fd);
269 ctx_set_priority(fd, ctx[HI], 0);
270
271 ctx[NOISE] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100272 ctx_set_priority(fd, ctx[NOISE], MIN_PRIO/2);
Chris Wilson721d8742016-10-27 11:32:47 +0100273
274 result = gem_create(fd, 4096);
275 dep = gem_create(fd, 4096);
276
Chris Wilson721d8742016-10-27 11:32:47 +0100277 plug(fd, &cork);
278
279 /* Expect that HI promotes LO, so the order will be LO, HI, NOISE.
280 *
281 * fifo would be NOISE, LO, HI.
282 * strict priority would be HI, NOISE, LO
283 */
284 store_dword(fd, ctx[NOISE], ring, result, 0, ctx[NOISE], cork.handle, 0);
285 store_dword(fd, ctx[LO], ring, result, 0, ctx[LO], cork.handle, 0);
286
287 /* link LO <-> HI via a dependency on another buffer */
288 store_dword(fd, ctx[LO], ring, dep, 0, ctx[LO], 0, I915_GEM_DOMAIN_INSTRUCTION);
289 store_dword(fd, ctx[HI], ring, dep, 0, ctx[HI], 0, 0);
290
291 store_dword(fd, ctx[HI], ring, result, 0, ctx[HI], 0, 0);
292
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100293 unplug_show_queue(fd, &cork, ring);
Chris Wilson721d8742016-10-27 11:32:47 +0100294
295 gem_context_destroy(fd, ctx[NOISE]);
296 gem_context_destroy(fd, ctx[LO]);
297 gem_context_destroy(fd, ctx[HI]);
298
299 ptr = gem_mmap__gtt(fd, dep, 4096, PROT_READ);
300 gem_set_domain(fd, dep, /* no write hazard lies! */
301 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
302 gem_close(fd, dep);
303
304 igt_assert_eq_u32(ptr[0], ctx[HI]);
305 munmap(ptr, 4096);
306
307 ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
308 gem_set_domain(fd, result, /* no write hazard lies! */
309 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
310 gem_close(fd, result);
311
312 igt_assert_eq_u32(ptr[0], ctx[NOISE]);
313 munmap(ptr, 4096);
314}
315
Chris Wilsona3801342017-07-16 16:28:41 +0100316#define NEW_CTX 0x1
317static void preempt(int fd, unsigned ring, unsigned flags)
318{
319 uint32_t result = gem_create(fd, 4096);
320 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
321 igt_spin_t *spin[16];
322 uint32_t ctx[2];
323
324 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100325 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100326
327 ctx[HI] = gem_context_create(fd);
328 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
329
330 for (int n = 0; n < 16; n++) {
331 if (flags & NEW_CTX) {
332 gem_context_destroy(fd, ctx[LO]);
333 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100334 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100335 }
336 spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0);
337 igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
338
339 store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER);
340
341 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
342 igt_assert_eq_u32(ptr[0], n + 1);
343 igt_assert(gem_bo_busy(fd, spin[0]->handle));
344 }
345
346 for (int n = 0; n < 16; n++)
347 igt_spin_batch_free(fd, spin[n]);
348
349 gem_context_destroy(fd, ctx[LO]);
350 gem_context_destroy(fd, ctx[HI]);
351
352 munmap(ptr, 4096);
353 gem_close(fd, result);
354}
355
356static void preempt_other(int fd, unsigned ring)
357{
358 uint32_t result = gem_create(fd, 4096);
359 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
360 igt_spin_t *spin[16];
361 unsigned int other;
362 unsigned int n, i;
363 uint32_t ctx[3];
364
365 /* On each engine, insert
366 * [NOISE] spinner,
367 * [LOW] write
368 *
369 * Then on our target engine do a [HIGH] write which should then
370 * prompt its dependent LOW writes in front of the spinner on
371 * each engine. The purpose of this test is to check that preemption
372 * can cross engines.
373 */
374
375 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100376 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100377
378 ctx[NOISE] = gem_context_create(fd);
379
380 ctx[HI] = gem_context_create(fd);
381 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
382
383 n = 0;
384 for_each_engine(fd, other) {
385 spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
386 store_dword(fd, ctx[LO], other,
387 result, (n + 1)*sizeof(uint32_t), n + 1,
388 0, I915_GEM_DOMAIN_RENDER);
389 n++;
390 }
391 store_dword(fd, ctx[HI], ring,
392 result, (n + 1)*sizeof(uint32_t), n + 1,
393 0, I915_GEM_DOMAIN_RENDER);
394
395 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
396
397 for (i = 0; i < n; i++) {
398 igt_assert(gem_bo_busy(fd, spin[i]->handle));
399 igt_spin_batch_free(fd, spin[i]);
400 }
401
402 n++;
403 for (i = 0; i <= n; i++)
404 igt_assert_eq_u32(ptr[i], i);
405
406 gem_context_destroy(fd, ctx[LO]);
407 gem_context_destroy(fd, ctx[NOISE]);
408 gem_context_destroy(fd, ctx[HI]);
409
410 munmap(ptr, 4096);
411 gem_close(fd, result);
412}
413
414static void preempt_self(int fd, unsigned ring)
415{
416 uint32_t result = gem_create(fd, 4096);
417 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
418 igt_spin_t *spin[16];
419 unsigned int other;
420 unsigned int n, i;
421 uint32_t ctx[3];
422
423 /* On each engine, insert
424 * [NOISE] spinner,
425 * [self/LOW] write
426 *
427 * Then on our target engine do a [self/HIGH] write which should then
428 * preempt its own lower priority task on any engine.
429 */
430
431 ctx[NOISE] = gem_context_create(fd);
432
433 ctx[HI] = gem_context_create(fd);
434
435 n = 0;
Chris Wilsonda553ff2017-09-22 10:32:44 +0100436 ctx_set_priority(fd, ctx[HI], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100437 for_each_engine(fd, other) {
438 spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
439 store_dword(fd, ctx[HI], other,
440 result, (n + 1)*sizeof(uint32_t), n + 1,
441 0, I915_GEM_DOMAIN_RENDER);
442 n++;
443 }
444 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
445 store_dword(fd, ctx[HI], ring,
446 result, (n + 1)*sizeof(uint32_t), n + 1,
447 0, I915_GEM_DOMAIN_RENDER);
448
449 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
450
451 for (i = 0; i < n; i++) {
452 igt_assert(gem_bo_busy(fd, spin[i]->handle));
453 igt_spin_batch_free(fd, spin[i]);
454 }
455
456 n++;
457 for (i = 0; i <= n; i++)
458 igt_assert_eq_u32(ptr[i], i);
459
460 gem_context_destroy(fd, ctx[NOISE]);
461 gem_context_destroy(fd, ctx[HI]);
462
463 munmap(ptr, 4096);
464 gem_close(fd, result);
465}
466
Chris Wilson721d8742016-10-27 11:32:47 +0100467static void deep(int fd, unsigned ring)
468{
469#define XS 8
Chris Wilsonb9c88302017-09-28 11:09:17 +0100470 const unsigned int nctx = MAX_PRIO - MIN_PRIO;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100471 const unsigned size = ALIGN(4*nctx, 4096);
Chris Wilsonb9c88302017-09-28 11:09:17 +0100472 struct timespec tv = {};
Chris Wilson721d8742016-10-27 11:32:47 +0100473 struct cork cork;
474 uint32_t result, dep[XS];
Chris Wilsonb9c88302017-09-28 11:09:17 +0100475 uint32_t expected = 0;
Chris Wilson721d8742016-10-27 11:32:47 +0100476 uint32_t *ptr;
477 uint32_t *ctx;
478
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100479 ctx = malloc(sizeof(*ctx) * nctx);
480 for (int n = 0; n < nctx; n++) {
Chris Wilson721d8742016-10-27 11:32:47 +0100481 ctx[n] = gem_context_create(fd);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100482 ctx_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
Chris Wilson721d8742016-10-27 11:32:47 +0100483 }
484
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100485 result = gem_create(fd, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100486 for (int m = 0; m < XS; m ++)
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100487 dep[m] = gem_create(fd, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100488
Chris Wilsonb9c88302017-09-28 11:09:17 +0100489 /* Bind all surfaces and contexts before starting the timeout. */
490 {
491 struct drm_i915_gem_exec_object2 obj[XS + 2];
492 struct drm_i915_gem_execbuffer2 execbuf;
493 const uint32_t bbe = MI_BATCH_BUFFER_END;
494
495 memset(obj, 0, sizeof(obj));
496 for (int n = 0; n < XS; n++)
497 obj[n].handle = dep[n];
498 obj[XS].handle = result;
499 obj[XS+1].handle = gem_create(fd, 4096);
500 gem_write(fd, obj[XS+1].handle, 0, &bbe, sizeof(bbe));
501
502 memset(&execbuf, 0, sizeof(execbuf));
503 execbuf.buffers_ptr = to_user_pointer(obj);
504 execbuf.buffer_count = XS + 2;
505 execbuf.flags = ring;
506 for (int n = 0; n < nctx; n++) {
507 execbuf.rsvd1 = ctx[n];
508 gem_execbuf(fd, &execbuf);
509 }
510 gem_close(fd, obj[XS+1].handle);
511 gem_sync(fd, result);
512 }
513
Chris Wilson721d8742016-10-27 11:32:47 +0100514 plug(fd, &cork);
515
516 /* Create a deep dependency chain, with a few branches */
Chris Wilsonb9c88302017-09-28 11:09:17 +0100517 for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 8; n++)
Chris Wilson721d8742016-10-27 11:32:47 +0100518 for (int m = 0; m < XS; m++)
519 store_dword(fd, ctx[n], ring, dep[m], 4*n, ctx[n], cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
520
Chris Wilsonb9c88302017-09-28 11:09:17 +0100521 for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 6; n++) {
Chris Wilson721d8742016-10-27 11:32:47 +0100522 for (int m = 0; m < XS; m++) {
523 store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], dep[m], 0);
524 store_dword(fd, ctx[n], ring, result, 4*m, ctx[n], 0, I915_GEM_DOMAIN_INSTRUCTION);
525 }
Chris Wilsonb9c88302017-09-28 11:09:17 +0100526 expected = ctx[n];
Chris Wilson721d8742016-10-27 11:32:47 +0100527 }
528
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100529 unplug_show_queue(fd, &cork, ring);
Chris Wilsonb9c88302017-09-28 11:09:17 +0100530 igt_require(expected); /* too slow */
Chris Wilson721d8742016-10-27 11:32:47 +0100531
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100532 for (int n = 0; n < nctx; n++)
Chris Wilson721d8742016-10-27 11:32:47 +0100533 gem_context_destroy(fd, ctx[n]);
534
535 for (int m = 0; m < XS; m++) {
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100536 ptr = gem_mmap__gtt(fd, dep[m], size, PROT_READ);
Chris Wilson721d8742016-10-27 11:32:47 +0100537 gem_set_domain(fd, dep[m], /* no write hazard lies! */
538 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
539 gem_close(fd, dep[m]);
540
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100541 for (int n = 0; n < nctx; n++)
Chris Wilson721d8742016-10-27 11:32:47 +0100542 igt_assert_eq_u32(ptr[n], ctx[n]);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100543 munmap(ptr, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100544 }
545
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100546 ptr = gem_mmap__gtt(fd, result, size, PROT_READ);
Chris Wilson721d8742016-10-27 11:32:47 +0100547 gem_set_domain(fd, result, /* no write hazard lies! */
548 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
549 gem_close(fd, result);
550
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100551 /* No reordering due to PI on all contexts because of the common dep */
Chris Wilson721d8742016-10-27 11:32:47 +0100552 for (int m = 0; m < XS; m++)
Chris Wilsonb9c88302017-09-28 11:09:17 +0100553 igt_assert_eq_u32(ptr[m], expected);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100554 munmap(ptr, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100555
556 free(ctx);
Chris Wilsonf6920752017-04-24 13:20:04 +0100557#undef XS
558}
559
Chris Wilsona19ef052017-07-31 14:15:59 +0100560static void alarm_handler(int sig)
561{
562}
563
564static int __execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
565{
Chris Wilson976ed7c2017-08-24 12:16:17 +0100566 int err = 0;
567 if (ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf))
568 err = -errno;
569 return err;
Chris Wilsona19ef052017-07-31 14:15:59 +0100570}
571
572static unsigned int measure_ring_size(int fd, unsigned int ring)
573{
574 struct sigaction sa = { .sa_handler = alarm_handler };
575 struct drm_i915_gem_exec_object2 obj[2];
576 struct drm_i915_gem_execbuffer2 execbuf;
577 const uint32_t bbe = MI_BATCH_BUFFER_END;
578 unsigned int count, last;
579 struct itimerval itv;
580 struct cork c;
581
582 memset(obj, 0, sizeof(obj));
583 obj[1].handle = gem_create(fd, 4096);
584 gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
585
586 memset(&execbuf, 0, sizeof(execbuf));
587 execbuf.buffers_ptr = to_user_pointer(obj + 1);
588 execbuf.buffer_count = 1;
589 execbuf.flags = ring;
590 gem_execbuf(fd, &execbuf);
591 gem_sync(fd, obj[1].handle);
592
593 plug(fd, &c);
594 obj[0].handle = c.handle;
595
596 execbuf.buffers_ptr = to_user_pointer(obj);
597 execbuf.buffer_count = 2;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100598 execbuf.rsvd1 = gem_context_create(fd);
Chris Wilsona19ef052017-07-31 14:15:59 +0100599
600 sigaction(SIGALRM, &sa, NULL);
601 itv.it_interval.tv_sec = 0;
602 itv.it_interval.tv_usec = 100;
603 itv.it_value.tv_sec = 0;
604 itv.it_value.tv_usec = 1000;
605 setitimer(ITIMER_REAL, &itv, NULL);
606
607 last = -1;
608 count = 0;
609 do {
610 if (__execbuf(fd, &execbuf) == 0) {
611 count++;
612 continue;
613 }
614
615 if (last == count)
616 break;
617
618 last = count;
619 } while (1);
620
621 memset(&itv, 0, sizeof(itv));
622 setitimer(ITIMER_REAL, &itv, NULL);
623
624 unplug(&c);
625 gem_close(fd, obj[1].handle);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100626 gem_context_destroy(fd, execbuf.rsvd1);
Chris Wilsona19ef052017-07-31 14:15:59 +0100627
628 return count;
629}
630
Chris Wilsonf6920752017-04-24 13:20:04 +0100631static void wide(int fd, unsigned ring)
632{
Chris Wilsonf6920752017-04-24 13:20:04 +0100633#define NCTX 4096
Chris Wilsona19ef052017-07-31 14:15:59 +0100634 struct timespec tv = {};
635 unsigned int ring_size = measure_ring_size(fd, ring);
Chris Wilsonf6920752017-04-24 13:20:04 +0100636
637 struct cork cork;
638 uint32_t result;
Chris Wilsonf6920752017-04-24 13:20:04 +0100639 uint32_t *ptr;
640 uint32_t *ctx;
Chris Wilsona19ef052017-07-31 14:15:59 +0100641 unsigned int count;
Chris Wilsonf6920752017-04-24 13:20:04 +0100642
643 ctx = malloc(sizeof(*ctx)*NCTX);
644 for (int n = 0; n < NCTX; n++)
645 ctx[n] = gem_context_create(fd);
646
Chris Wilsonf6920752017-04-24 13:20:04 +0100647 result = gem_create(fd, 4*NCTX);
648
Chris Wilsonf6920752017-04-24 13:20:04 +0100649 plug(fd, &cork);
650
651 /* Lots of in-order requests, plugged and submitted simultaneously */
Chris Wilsona19ef052017-07-31 14:15:59 +0100652 for (count = 0;
653 igt_seconds_elapsed(&tv) < 5 && count < ring_size;
654 count++) {
655 for (int n = 0; n < NCTX; n++) {
656 store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
657 }
Chris Wilsonf6920752017-04-24 13:20:04 +0100658 }
Chris Wilsona19ef052017-07-31 14:15:59 +0100659 igt_info("Submitted %d requests over %d contexts in %.1fms\n",
660 count, NCTX, igt_nsec_elapsed(&tv) * 1e-6);
Chris Wilsonf6920752017-04-24 13:20:04 +0100661
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100662 unplug_show_queue(fd, &cork, ring);
Chris Wilsonf6920752017-04-24 13:20:04 +0100663
664 for (int n = 0; n < NCTX; n++)
665 gem_context_destroy(fd, ctx[n]);
666
667 ptr = gem_mmap__gtt(fd, result, 4*NCTX, PROT_READ);
668 gem_set_domain(fd, result, /* no write hazard lies! */
669 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
670 for (int n = 0; n < NCTX; n++)
671 igt_assert_eq_u32(ptr[n], ctx[n]);
Chris Wilson61f8de72017-07-20 10:08:28 +0100672 munmap(ptr, 4*NCTX);
Chris Wilsonf6920752017-04-24 13:20:04 +0100673
Chris Wilsonf6920752017-04-24 13:20:04 +0100674 gem_close(fd, result);
675 free(ctx);
Chris Wilson61f8de72017-07-20 10:08:28 +0100676#undef NCTX
677}
678
679static void reorder_wide(int fd, unsigned ring)
680{
681 const int gen = intel_gen(intel_get_drm_devid(fd));
682 struct drm_i915_gem_relocation_entry reloc;
683 struct drm_i915_gem_exec_object2 obj[3];
684 struct drm_i915_gem_execbuffer2 execbuf;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100685 struct timespec tv = {};
686 unsigned int ring_size = measure_ring_size(fd, ring);
Chris Wilson61f8de72017-07-20 10:08:28 +0100687 struct cork cork;
688 uint32_t result, target;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100689 uint32_t *found, *expected;
Chris Wilson61f8de72017-07-20 10:08:28 +0100690
691 result = gem_create(fd, 4096);
692 target = gem_create(fd, 4096);
693
Chris Wilson61f8de72017-07-20 10:08:28 +0100694 plug(fd, &cork);
695
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100696 expected = gem_mmap__cpu(fd, target, 0, 4096, PROT_WRITE);
Chris Wilson61f8de72017-07-20 10:08:28 +0100697 gem_set_domain(fd, target, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
698
699 memset(obj, 0, sizeof(obj));
700 obj[0].handle = cork.handle;
701 obj[1].handle = result;
702 obj[2].relocs_ptr = to_user_pointer(&reloc);
703 obj[2].relocation_count = 1;
704
705 memset(&reloc, 0, sizeof(reloc));
706 reloc.target_handle = result;
707 reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
708 reloc.write_domain = 0; /* lies */
709
710 memset(&execbuf, 0, sizeof(execbuf));
711 execbuf.buffers_ptr = to_user_pointer(obj);
712 execbuf.buffer_count = 3;
713 execbuf.flags = ring;
714 if (gen < 6)
715 execbuf.flags |= I915_EXEC_SECURE;
716
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100717 for (int n = MIN_PRIO, x = 1;
718 igt_seconds_elapsed(&tv) < 5 && n <= MAX_PRIO;
719 n++, x++) {
720 unsigned int sz = ALIGN(ring_size * 64, 4096);
Chris Wilson61f8de72017-07-20 10:08:28 +0100721 uint32_t *batch;
722
723 execbuf.rsvd1 = gem_context_create(fd);
724 ctx_set_priority(fd, execbuf.rsvd1, n);
725
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100726 obj[2].handle = gem_create(fd, sz);
727 batch = gem_mmap__gtt(fd, obj[2].handle, sz, PROT_WRITE);
Chris Wilson61f8de72017-07-20 10:08:28 +0100728 gem_set_domain(fd, obj[2].handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
729
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100730 for (int m = 0; m < ring_size; m++) {
Chris Wilson61f8de72017-07-20 10:08:28 +0100731 uint64_t addr;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100732 int idx = hars_petruska_f54_1_random_unsafe_max(1024);
Chris Wilson61f8de72017-07-20 10:08:28 +0100733 int i;
734
735 execbuf.batch_start_offset = m * 64;
736 reloc.offset = execbuf.batch_start_offset + sizeof(uint32_t);
737 reloc.delta = idx * sizeof(uint32_t);
738 addr = reloc.presumed_offset + reloc.delta;
739
740 i = execbuf.batch_start_offset / sizeof(uint32_t);
741 batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
742 if (gen >= 8) {
743 batch[++i] = addr;
744 batch[++i] = addr >> 32;
745 } else if (gen >= 4) {
746 batch[++i] = 0;
747 batch[++i] = addr;
748 reloc.offset += sizeof(uint32_t);
749 } else {
750 batch[i]--;
751 batch[++i] = addr;
752 }
753 batch[++i] = x;
754 batch[++i] = MI_BATCH_BUFFER_END;
755
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100756 if (!expected[idx])
757 expected[idx] = x;
Chris Wilson61f8de72017-07-20 10:08:28 +0100758
759 gem_execbuf(fd, &execbuf);
760 }
761
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100762 munmap(batch, sz);
Chris Wilson61f8de72017-07-20 10:08:28 +0100763 gem_close(fd, obj[2].handle);
764 gem_context_destroy(fd, execbuf.rsvd1);
765 }
766
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100767 unplug_show_queue(fd, &cork, ring);
Chris Wilson61f8de72017-07-20 10:08:28 +0100768
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100769 found = gem_mmap__gtt(fd, result, 4096, PROT_READ);
Chris Wilson61f8de72017-07-20 10:08:28 +0100770 gem_set_domain(fd, result, /* no write hazard lies! */
771 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
772 for (int n = 0; n < 1024; n++)
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100773 igt_assert_eq_u32(found[n], expected[n]);
774 munmap(found, 4096);
775 munmap(expected, 4096);
Chris Wilson61f8de72017-07-20 10:08:28 +0100776
777 gem_close(fd, result);
778 gem_close(fd, target);
Chris Wilson721d8742016-10-27 11:32:47 +0100779}
780
Chris Wilson976ed7c2017-08-24 12:16:17 +0100781static void bind_to_cpu(int cpu)
782{
783 const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
784 struct sched_param rt = {.sched_priority = 99 };
785 cpu_set_t allowed;
786
787 igt_assert(sched_setscheduler(getpid(), SCHED_RR | SCHED_RESET_ON_FORK, &rt) == 0);
788
789 CPU_ZERO(&allowed);
790 CPU_SET(cpu % ncpus, &allowed);
791 igt_assert(sched_setaffinity(getpid(), sizeof(cpu_set_t), &allowed) == 0);
792}
793
794static void test_pi_ringfull(int fd, unsigned int engine)
795{
796 const uint32_t bbe = MI_BATCH_BUFFER_END;
797 struct sigaction sa = { .sa_handler = alarm_handler };
798 struct drm_i915_gem_execbuffer2 execbuf;
799 struct drm_i915_gem_exec_object2 obj[2];
800 unsigned int last, count;
801 struct itimerval itv;
802 struct cork c;
803 bool *result;
804
805 result = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
806 igt_assert(result != MAP_FAILED);
807
808 memset(&execbuf, 0, sizeof(execbuf));
809 memset(&obj, 0, sizeof(obj));
810
811 obj[1].handle = gem_create(fd, 4096);
812 gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
813
814 execbuf.buffers_ptr = to_user_pointer(&obj[1]);
815 execbuf.buffer_count = 1;
816 execbuf.flags = engine;
817 execbuf.rsvd1 = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100818 ctx_set_priority(fd, execbuf.rsvd1, MIN_PRIO);
Chris Wilson976ed7c2017-08-24 12:16:17 +0100819
820 gem_execbuf(fd, &execbuf);
821 gem_sync(fd, obj[1].handle);
822
823 /* Fill the low-priority ring */
824 plug(fd, &c);
825 obj[0].handle = c.handle;
826
827 execbuf.buffers_ptr = to_user_pointer(obj);
828 execbuf.buffer_count = 2;
829
830 sigaction(SIGALRM, &sa, NULL);
831 itv.it_interval.tv_sec = 0;
832 itv.it_interval.tv_usec = 100;
833 itv.it_value.tv_sec = 0;
834 itv.it_value.tv_usec = 1000;
835 setitimer(ITIMER_REAL, &itv, NULL);
836
837 last = -1;
838 count = 0;
839 do {
840 if (__execbuf(fd, &execbuf) == 0) {
841 count++;
842 continue;
843 }
844
845 if (last == count)
846 break;
847
848 last = count;
849 } while (1);
850 igt_debug("Filled low-priority ring with %d batches\n", count);
851
852 memset(&itv, 0, sizeof(itv));
853 setitimer(ITIMER_REAL, &itv, NULL);
854
855 execbuf.buffers_ptr = to_user_pointer(&obj[1]);
856 execbuf.buffer_count = 1;
857
858 /* both parent + child on the same cpu, only parent is RT */
859 bind_to_cpu(0);
860
861 igt_fork(child, 1) {
862 result[0] = true;
863
864 igt_debug("Creating HP context\n");
865 execbuf.rsvd1 = gem_context_create(fd);
866 ctx_set_priority(fd, execbuf.rsvd1, MAX_PRIO);
867
868 kill(getppid(), SIGALRM);
869 sched_yield();
870 result[1] = true;
871
872 itv.it_value.tv_sec = 0;
873 itv.it_value.tv_usec = 10000;
874 setitimer(ITIMER_REAL, &itv, NULL);
875
876 /* Since we are the high priority task, we expect to be
877 * able to add ourselves to *our* ring without interruption.
878 */
879 igt_debug("HP child executing\n");
880 result[2] = __execbuf(fd, &execbuf) == 0;
881 gem_context_destroy(fd, execbuf.rsvd1);
882 }
883
884 /* Relinquish CPU just to allow child to create a context */
885 sleep(1);
886 igt_assert_f(result[0], "HP context (child) not created");
887 igt_assert_f(!result[1], "Child released too early!\n");
888
889 /* Parent sleeps waiting for ringspace, releasing child */
890 itv.it_value.tv_sec = 0;
891 itv.it_value.tv_usec = 50000;
892 setitimer(ITIMER_REAL, &itv, NULL);
893 igt_debug("LP parent executing\n");
894 igt_assert_eq(__execbuf(fd, &execbuf), -EINTR);
895 igt_assert_f(result[1], "Child was not released!\n");
896 igt_assert_f(result[2],
897 "High priority child unable to submit within 10ms\n");
898
899 unplug(&c);
900 igt_waitchildren();
901
902 gem_context_destroy(fd, execbuf.rsvd1);
903 gem_close(fd, obj[1].handle);
904 gem_close(fd, obj[0].handle);
905 munmap(result, 4096);
906}
907
Chris Wilson765f7b02017-09-25 21:45:21 +0100908static unsigned int has_scheduler(int fd)
Chris Wilson721d8742016-10-27 11:32:47 +0100909{
910 drm_i915_getparam_t gp;
Chris Wilson765f7b02017-09-25 21:45:21 +0100911 unsigned int caps = 0;
Chris Wilson721d8742016-10-27 11:32:47 +0100912
913 gp.param = LOCAL_PARAM_HAS_SCHEDULER;
Chris Wilson765f7b02017-09-25 21:45:21 +0100914 gp.value = (int *)&caps;
Chris Wilson721d8742016-10-27 11:32:47 +0100915 drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
916
Chris Wilson765f7b02017-09-25 21:45:21 +0100917 if (!caps)
918 return 0;
919
920 igt_info("Has kernel scheduler\n");
921 if (caps & HAS_PRIORITY)
922 igt_info(" - With priority sorting\n");
923 if (caps & HAS_PREEMPTION)
924 igt_info(" - With preemption enabled\n");
925
926 return caps;
Chris Wilson721d8742016-10-27 11:32:47 +0100927}
928
Chris Wilson976ed7c2017-08-24 12:16:17 +0100929#define HAVE_EXECLISTS 0x1
930#define HAVE_GUC 0x2
931static unsigned print_welcome(int fd)
932{
933 unsigned flags = 0;
934 bool active;
935 int dir;
936
937 dir = igt_sysfs_open_parameters(fd);
938 if (dir < 0)
939 return 0;
940
941 active = igt_sysfs_get_boolean(dir, "enable_guc_submission");
942 if (active) {
943 igt_info("Using GuC submission\n");
944 flags |= HAVE_GUC | HAVE_EXECLISTS;
945 goto out;
946 }
947
948 active = igt_sysfs_get_boolean(dir, "enable_execlists");
949 if (active) {
950 igt_info("Using Execlists submission\n");
951 flags |= HAVE_EXECLISTS;
952 goto out;
953 }
954
955 active = igt_sysfs_get_boolean(dir, "semaphores");
956 igt_info("Using Legacy submission%s\n",
957 active ? ", with semaphores" : "");
958
959out:
960 close(dir);
961 return flags;
962}
963
Chris Wilson721d8742016-10-27 11:32:47 +0100964igt_main
965{
966 const struct intel_execution_engine *e;
Chris Wilson765f7b02017-09-25 21:45:21 +0100967 unsigned int exec_caps = 0;
968 unsigned int sched_caps = 0;
Chris Wilson721d8742016-10-27 11:32:47 +0100969 int fd = -1;
970
971 igt_skip_on_simulation();
972
973 igt_fixture {
974 fd = drm_open_driver_master(DRIVER_INTEL);
Chris Wilson765f7b02017-09-25 21:45:21 +0100975 exec_caps = print_welcome(fd);
976 sched_caps = has_scheduler(fd);
Chris Wilson9518cb52017-02-22 15:24:54 +0000977 igt_require_gem(fd);
Chris Wilson721d8742016-10-27 11:32:47 +0100978 gem_require_mmap_wc(fd);
979 igt_fork_hang_detector(fd);
980 }
981
982 igt_subtest_group {
983 for (e = intel_execution_engines; e->name; e++) {
984 /* default exec-id is purely symbolic */
985 if (e->exec_id == 0)
986 continue;
987
988 igt_subtest_f("fifo-%s", e->name) {
989 gem_require_ring(fd, e->exec_id | e->flags);
Chris Wilsonbc787762017-05-18 12:11:59 +0100990 igt_require(gem_can_store_dword(fd, e->exec_id) | e->flags);
Chris Wilson721d8742016-10-27 11:32:47 +0100991 fifo(fd, e->exec_id | e->flags);
992 }
993 }
994 }
995
996 igt_subtest_group {
997 igt_fixture {
Chris Wilson765f7b02017-09-25 21:45:21 +0100998 igt_require(sched_caps & HAS_SCHEDULER);
Chris Wilsonaf0e1c52017-02-21 18:25:58 +0000999 ctx_has_priority(fd);
Chris Wilson721d8742016-10-27 11:32:47 +01001000 }
1001
1002 for (e = intel_execution_engines; e->name; e++) {
1003 /* default exec-id is purely symbolic */
1004 if (e->exec_id == 0)
1005 continue;
1006
1007 igt_subtest_group {
Chris Wilson073cfd72017-03-17 11:52:51 +00001008 igt_fixture {
Chris Wilson721d8742016-10-27 11:32:47 +01001009 gem_require_ring(fd, e->exec_id | e->flags);
Chris Wilsonbc787762017-05-18 12:11:59 +01001010 igt_require(gem_can_store_dword(fd, e->exec_id) | e->flags);
Chris Wilson073cfd72017-03-17 11:52:51 +00001011 }
Chris Wilson721d8742016-10-27 11:32:47 +01001012
1013 igt_subtest_f("in-order-%s", e->name)
1014 reorder(fd, e->exec_id | e->flags, EQUAL);
1015
1016 igt_subtest_f("out-order-%s", e->name)
1017 reorder(fd, e->exec_id | e->flags, 0);
1018
1019 igt_subtest_f("promotion-%s", e->name)
1020 promotion(fd, e->exec_id | e->flags);
1021
Chris Wilson765f7b02017-09-25 21:45:21 +01001022 igt_subtest_group {
1023 igt_fixture {
1024 igt_require(sched_caps & HAS_PREEMPTION);
1025 }
Chris Wilsona3801342017-07-16 16:28:41 +01001026
Chris Wilson765f7b02017-09-25 21:45:21 +01001027 igt_subtest_f("preempt-%s", e->name)
1028 preempt(fd, e->exec_id | e->flags, 0);
Chris Wilsona3801342017-07-16 16:28:41 +01001029
Chris Wilson765f7b02017-09-25 21:45:21 +01001030 igt_subtest_f("preempt-contexts-%s", e->name)
1031 preempt(fd, e->exec_id | e->flags, NEW_CTX);
Chris Wilsona3801342017-07-16 16:28:41 +01001032
Chris Wilson765f7b02017-09-25 21:45:21 +01001033 igt_subtest_f("preempt-other-%s", e->name)
1034 preempt_other(fd, e->exec_id | e->flags);
1035
1036 igt_subtest_f("preempt-self-%s", e->name)
1037 preempt_self(fd, e->exec_id | e->flags);
1038 }
Chris Wilsona3801342017-07-16 16:28:41 +01001039
Chris Wilson721d8742016-10-27 11:32:47 +01001040 igt_subtest_f("deep-%s", e->name)
1041 deep(fd, e->exec_id | e->flags);
Chris Wilsonf6920752017-04-24 13:20:04 +01001042
1043 igt_subtest_f("wide-%s", e->name)
1044 wide(fd, e->exec_id | e->flags);
Chris Wilson61f8de72017-07-20 10:08:28 +01001045
1046 igt_subtest_f("reorder-wide-%s", e->name)
1047 reorder_wide(fd, e->exec_id | e->flags);
Chris Wilson721d8742016-10-27 11:32:47 +01001048 }
1049 }
1050 }
1051
Chris Wilson976ed7c2017-08-24 12:16:17 +01001052 igt_subtest_group {
1053 igt_fixture {
Chris Wilson765f7b02017-09-25 21:45:21 +01001054 igt_require(sched_caps & HAS_SCHEDULER);
Chris Wilson976ed7c2017-08-24 12:16:17 +01001055 ctx_has_priority(fd);
1056
1057 /* need separate rings */
Chris Wilson765f7b02017-09-25 21:45:21 +01001058 igt_require(exec_caps & HAVE_EXECLISTS);
Chris Wilson976ed7c2017-08-24 12:16:17 +01001059 }
1060
1061 for (e = intel_execution_engines; e->name; e++) {
1062 igt_subtest_group {
1063 igt_fixture {
1064 gem_require_ring(fd, e->exec_id | e->flags);
Chris Wilson765f7b02017-09-25 21:45:21 +01001065 igt_require(sched_caps & HAS_PREEMPTION);
Chris Wilson976ed7c2017-08-24 12:16:17 +01001066 }
1067
1068 igt_subtest_f("pi-ringfull-%s", e->name)
1069 test_pi_ringfull(fd, e->exec_id | e->flags);
1070 }
1071 }
1072 }
1073
Chris Wilson721d8742016-10-27 11:32:47 +01001074 igt_fixture {
1075 igt_stop_hang_detector();
1076 close(fd);
1077 }
1078}