blob: 79738ee1a1819c132b4e1eb1c36701948603e93d [file] [log] [blame]
Chris Wilson721d8742016-10-27 11:32:47 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
Chris Wilson976ed7c2017-08-24 12:16:17 +010024#include "config.h"
25
Chris Wilson721d8742016-10-27 11:32:47 +010026#include <sys/poll.h>
Chris Wilsona19ef052017-07-31 14:15:59 +010027#include <sys/ioctl.h>
Chris Wilson976ed7c2017-08-24 12:16:17 +010028#include <sched.h>
Chris Wilson721d8742016-10-27 11:32:47 +010029
30#include "igt.h"
31#include "igt_vgem.h"
Chris Wilson61f8de72017-07-20 10:08:28 +010032#include "igt_rand.h"
Chris Wilson976ed7c2017-08-24 12:16:17 +010033#include "igt_sysfs.h"
Chris Wilson721d8742016-10-27 11:32:47 +010034
Chris Wilson49f44c72016-11-14 21:24:52 +000035#define LOCAL_PARAM_HAS_SCHEDULER 41
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000036#define LOCAL_CONTEXT_PARAM_PRIORITY 6
Chris Wilson721d8742016-10-27 11:32:47 +010037
38#define LO 0
39#define HI 1
40#define NOISE 2
41
42#define MAX_PRIO 1023
Chris Wilsonda553ff2017-09-22 10:32:44 +010043#define MIN_PRIO -1023
Chris Wilson721d8742016-10-27 11:32:47 +010044
45#define BUSY_QLEN 8
46
47IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
48
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000049static int __ctx_set_priority(int fd, uint32_t ctx, int prio)
Chris Wilson721d8742016-10-27 11:32:47 +010050{
51 struct local_i915_gem_context_param param;
52
53 memset(&param, 0, sizeof(param));
54 param.context = ctx;
55 param.size = 0;
56 param.param = LOCAL_CONTEXT_PARAM_PRIORITY;
57 param.value = prio;
58
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000059 return __gem_context_set_param(fd, &param);
60}
61
62static void ctx_set_priority(int fd, uint32_t ctx, int prio)
63{
64 igt_assert_eq(__ctx_set_priority(fd, ctx, prio), 0);
65}
66
67static void ctx_has_priority(int fd)
68{
69 igt_require(__ctx_set_priority(fd, 0, MAX_PRIO) == 0);
Chris Wilson721d8742016-10-27 11:32:47 +010070}
71
72static void store_dword(int fd, uint32_t ctx, unsigned ring,
73 uint32_t target, uint32_t offset, uint32_t value,
74 uint32_t cork, unsigned write_domain)
75{
76 const int gen = intel_gen(intel_get_drm_devid(fd));
77 struct drm_i915_gem_exec_object2 obj[3];
78 struct drm_i915_gem_relocation_entry reloc;
79 struct drm_i915_gem_execbuffer2 execbuf;
80 uint32_t batch[16];
81 int i;
82
83 memset(&execbuf, 0, sizeof(execbuf));
Chris Wilson4de67b22017-01-02 11:05:21 +000084 execbuf.buffers_ptr = to_user_pointer(obj + !cork);
Chris Wilson721d8742016-10-27 11:32:47 +010085 execbuf.buffer_count = 2 + !!cork;
86 execbuf.flags = ring;
87 if (gen < 6)
88 execbuf.flags |= I915_EXEC_SECURE;
89 execbuf.rsvd1 = ctx;
90
91 memset(obj, 0, sizeof(obj));
92 obj[0].handle = cork;
93 obj[1].handle = target;
94 obj[2].handle = gem_create(fd, 4096);
95
96 memset(&reloc, 0, sizeof(reloc));
97 reloc.target_handle = obj[1].handle;
98 reloc.presumed_offset = 0;
99 reloc.offset = sizeof(uint32_t);
100 reloc.delta = offset;
101 reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
102 reloc.write_domain = write_domain;
Chris Wilson4de67b22017-01-02 11:05:21 +0000103 obj[2].relocs_ptr = to_user_pointer(&reloc);
Chris Wilson721d8742016-10-27 11:32:47 +0100104 obj[2].relocation_count = 1;
105
106 i = 0;
107 batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
108 if (gen >= 8) {
109 batch[++i] = offset;
110 batch[++i] = 0;
111 } else if (gen >= 4) {
112 batch[++i] = 0;
113 batch[++i] = offset;
114 reloc.offset += sizeof(uint32_t);
115 } else {
116 batch[i]--;
117 batch[++i] = offset;
118 }
119 batch[++i] = value;
120 batch[++i] = MI_BATCH_BUFFER_END;
121 gem_write(fd, obj[2].handle, 0, batch, sizeof(batch));
122 gem_execbuf(fd, &execbuf);
123 gem_close(fd, obj[2].handle);
124}
125
Chris Wilson721d8742016-10-27 11:32:47 +0100126struct cork {
127 int device;
128 uint32_t handle;
129 uint32_t fence;
130};
131
132static void plug(int fd, struct cork *c)
133{
134 struct vgem_bo bo;
135 int dmabuf;
136
137 c->device = drm_open_driver(DRIVER_VGEM);
138
139 bo.width = bo.height = 1;
140 bo.bpp = 4;
141 vgem_create(c->device, &bo);
142 c->fence = vgem_fence_attach(c->device, &bo, VGEM_FENCE_WRITE);
143
144 dmabuf = prime_handle_to_fd(c->device, bo.handle);
145 c->handle = prime_fd_to_handle(fd, dmabuf);
146 close(dmabuf);
147}
148
149static void unplug(struct cork *c)
150{
151 vgem_fence_signal(c->device, c->fence);
152 close(c->device);
153}
154
Chris Wilson2885b102017-09-25 20:59:54 +0100155static uint32_t create_highest_priority(int fd)
156{
157 uint32_t ctx = gem_context_create(fd);
158
159 /*
160 * If there is no priority support, all contexts will have equal
161 * priority (and therefore the max user priority), so no context
162 * can overtake us, and we effectively can form a plug.
163 */
164 __ctx_set_priority(fd, ctx, MAX_PRIO);
165
166 return ctx;
167}
168
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100169static void unplug_show_queue(int fd, struct cork *c, unsigned int engine)
170{
Chris Wilson2885b102017-09-25 20:59:54 +0100171 igt_spin_t *spin[BUSY_QLEN];
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100172
Chris Wilson2885b102017-09-25 20:59:54 +0100173 for (int n = 0; n < ARRAY_SIZE(spin); n++) {
174 uint32_t ctx = create_highest_priority(fd);
175 spin[n] = __igt_spin_batch_new(fd, ctx, engine, 0);
176 gem_context_destroy(fd, ctx);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100177 }
178
179 unplug(c); /* batches will now be queued on the engine */
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100180 igt_debugfs_dump(fd, "i915_engine_info");
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100181
Chris Wilson2885b102017-09-25 20:59:54 +0100182 for (int n = 0; n < ARRAY_SIZE(spin); n++)
183 igt_spin_batch_free(fd, spin[n]);
184
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100185}
186
Chris Wilson721d8742016-10-27 11:32:47 +0100187static void fifo(int fd, unsigned ring)
188{
189 struct cork cork;
Chris Wilson721d8742016-10-27 11:32:47 +0100190 uint32_t scratch;
191 uint32_t *ptr;
192
193 scratch = gem_create(fd, 4096);
194
Chris Wilson721d8742016-10-27 11:32:47 +0100195 plug(fd, &cork);
196
197 /* Same priority, same timeline, final result will be the second eb */
198 store_dword(fd, 0, ring, scratch, 0, 1, cork.handle, 0);
199 store_dword(fd, 0, ring, scratch, 0, 2, cork.handle, 0);
200
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100201 unplug_show_queue(fd, &cork, ring);
Chris Wilson721d8742016-10-27 11:32:47 +0100202
203 ptr = gem_mmap__gtt(fd, scratch, 4096, PROT_READ);
204 gem_set_domain(fd, scratch, /* no write hazard lies! */
205 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
206 gem_close(fd, scratch);
207
208 igt_assert_eq_u32(ptr[0], 2);
209 munmap(ptr, 4096);
210}
211
212static void reorder(int fd, unsigned ring, unsigned flags)
213#define EQUAL 1
214{
215 struct cork cork;
216 uint32_t scratch;
Chris Wilson721d8742016-10-27 11:32:47 +0100217 uint32_t *ptr;
218 uint32_t ctx[2];
219
220 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100221 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilson721d8742016-10-27 11:32:47 +0100222
223 ctx[HI] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100224 ctx_set_priority(fd, ctx[HI], flags & EQUAL ? MIN_PRIO : 0);
Chris Wilson721d8742016-10-27 11:32:47 +0100225
226 scratch = gem_create(fd, 4096);
Chris Wilson721d8742016-10-27 11:32:47 +0100227 plug(fd, &cork);
228
229 /* We expect the high priority context to be executed first, and
230 * so the final result will be value from the low priority context.
231 */
232 store_dword(fd, ctx[LO], ring, scratch, 0, ctx[LO], cork.handle, 0);
233 store_dword(fd, ctx[HI], ring, scratch, 0, ctx[HI], cork.handle, 0);
234
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100235 unplug_show_queue(fd, &cork, ring);
Chris Wilson721d8742016-10-27 11:32:47 +0100236
237 gem_context_destroy(fd, ctx[LO]);
238 gem_context_destroy(fd, ctx[HI]);
239
240 ptr = gem_mmap__gtt(fd, scratch, 4096, PROT_READ);
241 gem_set_domain(fd, scratch, /* no write hazard lies! */
242 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
243 gem_close(fd, scratch);
244
245 if (flags & EQUAL) /* equal priority, result will be fifo */
246 igt_assert_eq_u32(ptr[0], ctx[HI]);
247 else
248 igt_assert_eq_u32(ptr[0], ctx[LO]);
249 munmap(ptr, 4096);
250}
251
252static void promotion(int fd, unsigned ring)
253{
254 struct cork cork;
255 uint32_t result, dep;
Chris Wilson721d8742016-10-27 11:32:47 +0100256 uint32_t *ptr;
257 uint32_t ctx[3];
258
259 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100260 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilson721d8742016-10-27 11:32:47 +0100261
262 ctx[HI] = gem_context_create(fd);
263 ctx_set_priority(fd, ctx[HI], 0);
264
265 ctx[NOISE] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100266 ctx_set_priority(fd, ctx[NOISE], MIN_PRIO/2);
Chris Wilson721d8742016-10-27 11:32:47 +0100267
268 result = gem_create(fd, 4096);
269 dep = gem_create(fd, 4096);
270
Chris Wilson721d8742016-10-27 11:32:47 +0100271 plug(fd, &cork);
272
273 /* Expect that HI promotes LO, so the order will be LO, HI, NOISE.
274 *
275 * fifo would be NOISE, LO, HI.
276 * strict priority would be HI, NOISE, LO
277 */
278 store_dword(fd, ctx[NOISE], ring, result, 0, ctx[NOISE], cork.handle, 0);
279 store_dword(fd, ctx[LO], ring, result, 0, ctx[LO], cork.handle, 0);
280
281 /* link LO <-> HI via a dependency on another buffer */
282 store_dword(fd, ctx[LO], ring, dep, 0, ctx[LO], 0, I915_GEM_DOMAIN_INSTRUCTION);
283 store_dword(fd, ctx[HI], ring, dep, 0, ctx[HI], 0, 0);
284
285 store_dword(fd, ctx[HI], ring, result, 0, ctx[HI], 0, 0);
286
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100287 unplug_show_queue(fd, &cork, ring);
Chris Wilson721d8742016-10-27 11:32:47 +0100288
289 gem_context_destroy(fd, ctx[NOISE]);
290 gem_context_destroy(fd, ctx[LO]);
291 gem_context_destroy(fd, ctx[HI]);
292
293 ptr = gem_mmap__gtt(fd, dep, 4096, PROT_READ);
294 gem_set_domain(fd, dep, /* no write hazard lies! */
295 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
296 gem_close(fd, dep);
297
298 igt_assert_eq_u32(ptr[0], ctx[HI]);
299 munmap(ptr, 4096);
300
301 ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
302 gem_set_domain(fd, result, /* no write hazard lies! */
303 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
304 gem_close(fd, result);
305
306 igt_assert_eq_u32(ptr[0], ctx[NOISE]);
307 munmap(ptr, 4096);
308}
309
Chris Wilsona3801342017-07-16 16:28:41 +0100310#define NEW_CTX 0x1
311static void preempt(int fd, unsigned ring, unsigned flags)
312{
313 uint32_t result = gem_create(fd, 4096);
314 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
315 igt_spin_t *spin[16];
316 uint32_t ctx[2];
317
318 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100319 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100320
321 ctx[HI] = gem_context_create(fd);
322 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
323
324 for (int n = 0; n < 16; n++) {
325 if (flags & NEW_CTX) {
326 gem_context_destroy(fd, ctx[LO]);
327 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100328 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100329 }
330 spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0);
331 igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
332
333 store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER);
334
335 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
336 igt_assert_eq_u32(ptr[0], n + 1);
337 igt_assert(gem_bo_busy(fd, spin[0]->handle));
338 }
339
340 for (int n = 0; n < 16; n++)
341 igt_spin_batch_free(fd, spin[n]);
342
343 gem_context_destroy(fd, ctx[LO]);
344 gem_context_destroy(fd, ctx[HI]);
345
346 munmap(ptr, 4096);
347 gem_close(fd, result);
348}
349
350static void preempt_other(int fd, unsigned ring)
351{
352 uint32_t result = gem_create(fd, 4096);
353 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
354 igt_spin_t *spin[16];
355 unsigned int other;
356 unsigned int n, i;
357 uint32_t ctx[3];
358
359 /* On each engine, insert
360 * [NOISE] spinner,
361 * [LOW] write
362 *
363 * Then on our target engine do a [HIGH] write which should then
364 * prompt its dependent LOW writes in front of the spinner on
365 * each engine. The purpose of this test is to check that preemption
366 * can cross engines.
367 */
368
369 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100370 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100371
372 ctx[NOISE] = gem_context_create(fd);
373
374 ctx[HI] = gem_context_create(fd);
375 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
376
377 n = 0;
378 for_each_engine(fd, other) {
379 spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
380 store_dword(fd, ctx[LO], other,
381 result, (n + 1)*sizeof(uint32_t), n + 1,
382 0, I915_GEM_DOMAIN_RENDER);
383 n++;
384 }
385 store_dword(fd, ctx[HI], ring,
386 result, (n + 1)*sizeof(uint32_t), n + 1,
387 0, I915_GEM_DOMAIN_RENDER);
388
389 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
390
391 for (i = 0; i < n; i++) {
392 igt_assert(gem_bo_busy(fd, spin[i]->handle));
393 igt_spin_batch_free(fd, spin[i]);
394 }
395
396 n++;
397 for (i = 0; i <= n; i++)
398 igt_assert_eq_u32(ptr[i], i);
399
400 gem_context_destroy(fd, ctx[LO]);
401 gem_context_destroy(fd, ctx[NOISE]);
402 gem_context_destroy(fd, ctx[HI]);
403
404 munmap(ptr, 4096);
405 gem_close(fd, result);
406}
407
408static void preempt_self(int fd, unsigned ring)
409{
410 uint32_t result = gem_create(fd, 4096);
411 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
412 igt_spin_t *spin[16];
413 unsigned int other;
414 unsigned int n, i;
415 uint32_t ctx[3];
416
417 /* On each engine, insert
418 * [NOISE] spinner,
419 * [self/LOW] write
420 *
421 * Then on our target engine do a [self/HIGH] write which should then
422 * preempt its own lower priority task on any engine.
423 */
424
425 ctx[NOISE] = gem_context_create(fd);
426
427 ctx[HI] = gem_context_create(fd);
428
429 n = 0;
Chris Wilsonda553ff2017-09-22 10:32:44 +0100430 ctx_set_priority(fd, ctx[HI], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100431 for_each_engine(fd, other) {
432 spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
433 store_dword(fd, ctx[HI], other,
434 result, (n + 1)*sizeof(uint32_t), n + 1,
435 0, I915_GEM_DOMAIN_RENDER);
436 n++;
437 }
438 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
439 store_dword(fd, ctx[HI], ring,
440 result, (n + 1)*sizeof(uint32_t), n + 1,
441 0, I915_GEM_DOMAIN_RENDER);
442
443 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
444
445 for (i = 0; i < n; i++) {
446 igt_assert(gem_bo_busy(fd, spin[i]->handle));
447 igt_spin_batch_free(fd, spin[i]);
448 }
449
450 n++;
451 for (i = 0; i <= n; i++)
452 igt_assert_eq_u32(ptr[i], i);
453
454 gem_context_destroy(fd, ctx[NOISE]);
455 gem_context_destroy(fd, ctx[HI]);
456
457 munmap(ptr, 4096);
458 gem_close(fd, result);
459}
460
Chris Wilson721d8742016-10-27 11:32:47 +0100461static void deep(int fd, unsigned ring)
462{
463#define XS 8
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100464 const unsigned int nctx = MAX_PRIO + 1;
465 const unsigned size = ALIGN(4*nctx, 4096);
Chris Wilson721d8742016-10-27 11:32:47 +0100466 struct cork cork;
467 uint32_t result, dep[XS];
Chris Wilson721d8742016-10-27 11:32:47 +0100468 uint32_t *ptr;
469 uint32_t *ctx;
470
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100471 ctx = malloc(sizeof(*ctx) * nctx);
472 for (int n = 0; n < nctx; n++) {
Chris Wilson721d8742016-10-27 11:32:47 +0100473 ctx[n] = gem_context_create(fd);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100474 ctx_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
Chris Wilson721d8742016-10-27 11:32:47 +0100475 }
476
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100477 result = gem_create(fd, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100478 for (int m = 0; m < XS; m ++)
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100479 dep[m] = gem_create(fd, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100480
Chris Wilson721d8742016-10-27 11:32:47 +0100481 plug(fd, &cork);
482
483 /* Create a deep dependency chain, with a few branches */
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100484 for (int n = 0; n < nctx; n++)
Chris Wilson721d8742016-10-27 11:32:47 +0100485 for (int m = 0; m < XS; m++)
486 store_dword(fd, ctx[n], ring, dep[m], 4*n, ctx[n], cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
487
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100488 for (int n = 0; n < nctx; n++) {
Chris Wilson721d8742016-10-27 11:32:47 +0100489 for (int m = 0; m < XS; m++) {
490 store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], dep[m], 0);
491 store_dword(fd, ctx[n], ring, result, 4*m, ctx[n], 0, I915_GEM_DOMAIN_INSTRUCTION);
492 }
493 }
494
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100495 unplug_show_queue(fd, &cork, ring);
Chris Wilson721d8742016-10-27 11:32:47 +0100496
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100497 for (int n = 0; n < nctx; n++)
Chris Wilson721d8742016-10-27 11:32:47 +0100498 gem_context_destroy(fd, ctx[n]);
499
500 for (int m = 0; m < XS; m++) {
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100501 ptr = gem_mmap__gtt(fd, dep[m], size, PROT_READ);
Chris Wilson721d8742016-10-27 11:32:47 +0100502 gem_set_domain(fd, dep[m], /* no write hazard lies! */
503 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
504 gem_close(fd, dep[m]);
505
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100506 for (int n = 0; n < nctx; n++)
Chris Wilson721d8742016-10-27 11:32:47 +0100507 igt_assert_eq_u32(ptr[n], ctx[n]);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100508 munmap(ptr, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100509 }
510
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100511 ptr = gem_mmap__gtt(fd, result, size, PROT_READ);
Chris Wilson721d8742016-10-27 11:32:47 +0100512 gem_set_domain(fd, result, /* no write hazard lies! */
513 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
514 gem_close(fd, result);
515
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100516 /* No reordering due to PI on all contexts because of the common dep */
Chris Wilson721d8742016-10-27 11:32:47 +0100517 for (int m = 0; m < XS; m++)
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100518 igt_assert_eq_u32(ptr[m], ctx[nctx - 1]);
519 munmap(ptr, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100520
521 free(ctx);
Chris Wilsonf6920752017-04-24 13:20:04 +0100522#undef XS
523}
524
Chris Wilsona19ef052017-07-31 14:15:59 +0100525static void alarm_handler(int sig)
526{
527}
528
529static int __execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
530{
Chris Wilson976ed7c2017-08-24 12:16:17 +0100531 int err = 0;
532 if (ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf))
533 err = -errno;
534 return err;
Chris Wilsona19ef052017-07-31 14:15:59 +0100535}
536
537static unsigned int measure_ring_size(int fd, unsigned int ring)
538{
539 struct sigaction sa = { .sa_handler = alarm_handler };
540 struct drm_i915_gem_exec_object2 obj[2];
541 struct drm_i915_gem_execbuffer2 execbuf;
542 const uint32_t bbe = MI_BATCH_BUFFER_END;
543 unsigned int count, last;
544 struct itimerval itv;
545 struct cork c;
546
547 memset(obj, 0, sizeof(obj));
548 obj[1].handle = gem_create(fd, 4096);
549 gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
550
551 memset(&execbuf, 0, sizeof(execbuf));
552 execbuf.buffers_ptr = to_user_pointer(obj + 1);
553 execbuf.buffer_count = 1;
554 execbuf.flags = ring;
555 gem_execbuf(fd, &execbuf);
556 gem_sync(fd, obj[1].handle);
557
558 plug(fd, &c);
559 obj[0].handle = c.handle;
560
561 execbuf.buffers_ptr = to_user_pointer(obj);
562 execbuf.buffer_count = 2;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100563 execbuf.rsvd1 = gem_context_create(fd);
Chris Wilsona19ef052017-07-31 14:15:59 +0100564
565 sigaction(SIGALRM, &sa, NULL);
566 itv.it_interval.tv_sec = 0;
567 itv.it_interval.tv_usec = 100;
568 itv.it_value.tv_sec = 0;
569 itv.it_value.tv_usec = 1000;
570 setitimer(ITIMER_REAL, &itv, NULL);
571
572 last = -1;
573 count = 0;
574 do {
575 if (__execbuf(fd, &execbuf) == 0) {
576 count++;
577 continue;
578 }
579
580 if (last == count)
581 break;
582
583 last = count;
584 } while (1);
585
586 memset(&itv, 0, sizeof(itv));
587 setitimer(ITIMER_REAL, &itv, NULL);
588
589 unplug(&c);
590 gem_close(fd, obj[1].handle);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100591 gem_context_destroy(fd, execbuf.rsvd1);
Chris Wilsona19ef052017-07-31 14:15:59 +0100592
593 return count;
594}
595
Chris Wilsonf6920752017-04-24 13:20:04 +0100596static void wide(int fd, unsigned ring)
597{
Chris Wilsonf6920752017-04-24 13:20:04 +0100598#define NCTX 4096
Chris Wilsona19ef052017-07-31 14:15:59 +0100599 struct timespec tv = {};
600 unsigned int ring_size = measure_ring_size(fd, ring);
Chris Wilsonf6920752017-04-24 13:20:04 +0100601
602 struct cork cork;
603 uint32_t result;
Chris Wilsonf6920752017-04-24 13:20:04 +0100604 uint32_t *ptr;
605 uint32_t *ctx;
Chris Wilsona19ef052017-07-31 14:15:59 +0100606 unsigned int count;
Chris Wilsonf6920752017-04-24 13:20:04 +0100607
608 ctx = malloc(sizeof(*ctx)*NCTX);
609 for (int n = 0; n < NCTX; n++)
610 ctx[n] = gem_context_create(fd);
611
Chris Wilsonf6920752017-04-24 13:20:04 +0100612 result = gem_create(fd, 4*NCTX);
613
Chris Wilsonf6920752017-04-24 13:20:04 +0100614 plug(fd, &cork);
615
616 /* Lots of in-order requests, plugged and submitted simultaneously */
Chris Wilsona19ef052017-07-31 14:15:59 +0100617 for (count = 0;
618 igt_seconds_elapsed(&tv) < 5 && count < ring_size;
619 count++) {
620 for (int n = 0; n < NCTX; n++) {
621 store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
622 }
Chris Wilsonf6920752017-04-24 13:20:04 +0100623 }
Chris Wilsona19ef052017-07-31 14:15:59 +0100624 igt_info("Submitted %d requests over %d contexts in %.1fms\n",
625 count, NCTX, igt_nsec_elapsed(&tv) * 1e-6);
Chris Wilsonf6920752017-04-24 13:20:04 +0100626
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100627 unplug_show_queue(fd, &cork, ring);
Chris Wilsonf6920752017-04-24 13:20:04 +0100628
629 for (int n = 0; n < NCTX; n++)
630 gem_context_destroy(fd, ctx[n]);
631
632 ptr = gem_mmap__gtt(fd, result, 4*NCTX, PROT_READ);
633 gem_set_domain(fd, result, /* no write hazard lies! */
634 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
635 for (int n = 0; n < NCTX; n++)
636 igt_assert_eq_u32(ptr[n], ctx[n]);
Chris Wilson61f8de72017-07-20 10:08:28 +0100637 munmap(ptr, 4*NCTX);
Chris Wilsonf6920752017-04-24 13:20:04 +0100638
Chris Wilsonf6920752017-04-24 13:20:04 +0100639 gem_close(fd, result);
640 free(ctx);
Chris Wilson61f8de72017-07-20 10:08:28 +0100641#undef NCTX
642}
643
644static void reorder_wide(int fd, unsigned ring)
645{
646 const int gen = intel_gen(intel_get_drm_devid(fd));
647 struct drm_i915_gem_relocation_entry reloc;
648 struct drm_i915_gem_exec_object2 obj[3];
649 struct drm_i915_gem_execbuffer2 execbuf;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100650 struct timespec tv = {};
651 unsigned int ring_size = measure_ring_size(fd, ring);
Chris Wilson61f8de72017-07-20 10:08:28 +0100652 struct cork cork;
653 uint32_t result, target;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100654 uint32_t *found, *expected;
Chris Wilson61f8de72017-07-20 10:08:28 +0100655
656 result = gem_create(fd, 4096);
657 target = gem_create(fd, 4096);
658
Chris Wilson61f8de72017-07-20 10:08:28 +0100659 plug(fd, &cork);
660
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100661 expected = gem_mmap__cpu(fd, target, 0, 4096, PROT_WRITE);
Chris Wilson61f8de72017-07-20 10:08:28 +0100662 gem_set_domain(fd, target, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
663
664 memset(obj, 0, sizeof(obj));
665 obj[0].handle = cork.handle;
666 obj[1].handle = result;
667 obj[2].relocs_ptr = to_user_pointer(&reloc);
668 obj[2].relocation_count = 1;
669
670 memset(&reloc, 0, sizeof(reloc));
671 reloc.target_handle = result;
672 reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
673 reloc.write_domain = 0; /* lies */
674
675 memset(&execbuf, 0, sizeof(execbuf));
676 execbuf.buffers_ptr = to_user_pointer(obj);
677 execbuf.buffer_count = 3;
678 execbuf.flags = ring;
679 if (gen < 6)
680 execbuf.flags |= I915_EXEC_SECURE;
681
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100682 for (int n = MIN_PRIO, x = 1;
683 igt_seconds_elapsed(&tv) < 5 && n <= MAX_PRIO;
684 n++, x++) {
685 unsigned int sz = ALIGN(ring_size * 64, 4096);
Chris Wilson61f8de72017-07-20 10:08:28 +0100686 uint32_t *batch;
687
688 execbuf.rsvd1 = gem_context_create(fd);
689 ctx_set_priority(fd, execbuf.rsvd1, n);
690
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100691 obj[2].handle = gem_create(fd, sz);
692 batch = gem_mmap__gtt(fd, obj[2].handle, sz, PROT_WRITE);
Chris Wilson61f8de72017-07-20 10:08:28 +0100693 gem_set_domain(fd, obj[2].handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
694
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100695 for (int m = 0; m < ring_size; m++) {
Chris Wilson61f8de72017-07-20 10:08:28 +0100696 uint64_t addr;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100697 int idx = hars_petruska_f54_1_random_unsafe_max(1024);
Chris Wilson61f8de72017-07-20 10:08:28 +0100698 int i;
699
700 execbuf.batch_start_offset = m * 64;
701 reloc.offset = execbuf.batch_start_offset + sizeof(uint32_t);
702 reloc.delta = idx * sizeof(uint32_t);
703 addr = reloc.presumed_offset + reloc.delta;
704
705 i = execbuf.batch_start_offset / sizeof(uint32_t);
706 batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
707 if (gen >= 8) {
708 batch[++i] = addr;
709 batch[++i] = addr >> 32;
710 } else if (gen >= 4) {
711 batch[++i] = 0;
712 batch[++i] = addr;
713 reloc.offset += sizeof(uint32_t);
714 } else {
715 batch[i]--;
716 batch[++i] = addr;
717 }
718 batch[++i] = x;
719 batch[++i] = MI_BATCH_BUFFER_END;
720
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100721 if (!expected[idx])
722 expected[idx] = x;
Chris Wilson61f8de72017-07-20 10:08:28 +0100723
724 gem_execbuf(fd, &execbuf);
725 }
726
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100727 munmap(batch, sz);
Chris Wilson61f8de72017-07-20 10:08:28 +0100728 gem_close(fd, obj[2].handle);
729 gem_context_destroy(fd, execbuf.rsvd1);
730 }
731
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100732 unplug_show_queue(fd, &cork, ring);
Chris Wilson61f8de72017-07-20 10:08:28 +0100733
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100734 found = gem_mmap__gtt(fd, result, 4096, PROT_READ);
Chris Wilson61f8de72017-07-20 10:08:28 +0100735 gem_set_domain(fd, result, /* no write hazard lies! */
736 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
737 for (int n = 0; n < 1024; n++)
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100738 igt_assert_eq_u32(found[n], expected[n]);
739 munmap(found, 4096);
740 munmap(expected, 4096);
Chris Wilson61f8de72017-07-20 10:08:28 +0100741
742 gem_close(fd, result);
743 gem_close(fd, target);
Chris Wilson721d8742016-10-27 11:32:47 +0100744}
745
Chris Wilson976ed7c2017-08-24 12:16:17 +0100746static void bind_to_cpu(int cpu)
747{
748 const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
749 struct sched_param rt = {.sched_priority = 99 };
750 cpu_set_t allowed;
751
752 igt_assert(sched_setscheduler(getpid(), SCHED_RR | SCHED_RESET_ON_FORK, &rt) == 0);
753
754 CPU_ZERO(&allowed);
755 CPU_SET(cpu % ncpus, &allowed);
756 igt_assert(sched_setaffinity(getpid(), sizeof(cpu_set_t), &allowed) == 0);
757}
758
759static void test_pi_ringfull(int fd, unsigned int engine)
760{
761 const uint32_t bbe = MI_BATCH_BUFFER_END;
762 struct sigaction sa = { .sa_handler = alarm_handler };
763 struct drm_i915_gem_execbuffer2 execbuf;
764 struct drm_i915_gem_exec_object2 obj[2];
765 unsigned int last, count;
766 struct itimerval itv;
767 struct cork c;
768 bool *result;
769
770 result = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
771 igt_assert(result != MAP_FAILED);
772
773 memset(&execbuf, 0, sizeof(execbuf));
774 memset(&obj, 0, sizeof(obj));
775
776 obj[1].handle = gem_create(fd, 4096);
777 gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
778
779 execbuf.buffers_ptr = to_user_pointer(&obj[1]);
780 execbuf.buffer_count = 1;
781 execbuf.flags = engine;
782 execbuf.rsvd1 = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100783 ctx_set_priority(fd, execbuf.rsvd1, MIN_PRIO);
Chris Wilson976ed7c2017-08-24 12:16:17 +0100784
785 gem_execbuf(fd, &execbuf);
786 gem_sync(fd, obj[1].handle);
787
788 /* Fill the low-priority ring */
789 plug(fd, &c);
790 obj[0].handle = c.handle;
791
792 execbuf.buffers_ptr = to_user_pointer(obj);
793 execbuf.buffer_count = 2;
794
795 sigaction(SIGALRM, &sa, NULL);
796 itv.it_interval.tv_sec = 0;
797 itv.it_interval.tv_usec = 100;
798 itv.it_value.tv_sec = 0;
799 itv.it_value.tv_usec = 1000;
800 setitimer(ITIMER_REAL, &itv, NULL);
801
802 last = -1;
803 count = 0;
804 do {
805 if (__execbuf(fd, &execbuf) == 0) {
806 count++;
807 continue;
808 }
809
810 if (last == count)
811 break;
812
813 last = count;
814 } while (1);
815 igt_debug("Filled low-priority ring with %d batches\n", count);
816
817 memset(&itv, 0, sizeof(itv));
818 setitimer(ITIMER_REAL, &itv, NULL);
819
820 execbuf.buffers_ptr = to_user_pointer(&obj[1]);
821 execbuf.buffer_count = 1;
822
823 /* both parent + child on the same cpu, only parent is RT */
824 bind_to_cpu(0);
825
826 igt_fork(child, 1) {
827 result[0] = true;
828
829 igt_debug("Creating HP context\n");
830 execbuf.rsvd1 = gem_context_create(fd);
831 ctx_set_priority(fd, execbuf.rsvd1, MAX_PRIO);
832
833 kill(getppid(), SIGALRM);
834 sched_yield();
835 result[1] = true;
836
837 itv.it_value.tv_sec = 0;
838 itv.it_value.tv_usec = 10000;
839 setitimer(ITIMER_REAL, &itv, NULL);
840
841 /* Since we are the high priority task, we expect to be
842 * able to add ourselves to *our* ring without interruption.
843 */
844 igt_debug("HP child executing\n");
845 result[2] = __execbuf(fd, &execbuf) == 0;
846 gem_context_destroy(fd, execbuf.rsvd1);
847 }
848
849 /* Relinquish CPU just to allow child to create a context */
850 sleep(1);
851 igt_assert_f(result[0], "HP context (child) not created");
852 igt_assert_f(!result[1], "Child released too early!\n");
853
854 /* Parent sleeps waiting for ringspace, releasing child */
855 itv.it_value.tv_sec = 0;
856 itv.it_value.tv_usec = 50000;
857 setitimer(ITIMER_REAL, &itv, NULL);
858 igt_debug("LP parent executing\n");
859 igt_assert_eq(__execbuf(fd, &execbuf), -EINTR);
860 igt_assert_f(result[1], "Child was not released!\n");
861 igt_assert_f(result[2],
862 "High priority child unable to submit within 10ms\n");
863
864 unplug(&c);
865 igt_waitchildren();
866
867 gem_context_destroy(fd, execbuf.rsvd1);
868 gem_close(fd, obj[1].handle);
869 gem_close(fd, obj[0].handle);
870 munmap(result, 4096);
871}
872
Chris Wilson721d8742016-10-27 11:32:47 +0100873static bool has_scheduler(int fd)
874{
875 drm_i915_getparam_t gp;
876 int has = -1;
877
878 gp.param = LOCAL_PARAM_HAS_SCHEDULER;
879 gp.value = &has;
880 drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
881
882 return has > 0;
883}
884
Chris Wilson976ed7c2017-08-24 12:16:17 +0100885#define HAVE_EXECLISTS 0x1
886#define HAVE_GUC 0x2
887static unsigned print_welcome(int fd)
888{
889 unsigned flags = 0;
890 bool active;
891 int dir;
892
893 dir = igt_sysfs_open_parameters(fd);
894 if (dir < 0)
895 return 0;
896
897 active = igt_sysfs_get_boolean(dir, "enable_guc_submission");
898 if (active) {
899 igt_info("Using GuC submission\n");
900 flags |= HAVE_GUC | HAVE_EXECLISTS;
901 goto out;
902 }
903
904 active = igt_sysfs_get_boolean(dir, "enable_execlists");
905 if (active) {
906 igt_info("Using Execlists submission\n");
907 flags |= HAVE_EXECLISTS;
908 goto out;
909 }
910
911 active = igt_sysfs_get_boolean(dir, "semaphores");
912 igt_info("Using Legacy submission%s\n",
913 active ? ", with semaphores" : "");
914
915out:
916 close(dir);
917 return flags;
918}
919
Chris Wilson721d8742016-10-27 11:32:47 +0100920igt_main
921{
922 const struct intel_execution_engine *e;
Chris Wilson976ed7c2017-08-24 12:16:17 +0100923 unsigned int caps = 0;
Chris Wilson721d8742016-10-27 11:32:47 +0100924 int fd = -1;
925
926 igt_skip_on_simulation();
927
928 igt_fixture {
929 fd = drm_open_driver_master(DRIVER_INTEL);
Chris Wilson976ed7c2017-08-24 12:16:17 +0100930 caps = print_welcome(fd);
Chris Wilson9518cb52017-02-22 15:24:54 +0000931 igt_require_gem(fd);
Chris Wilson721d8742016-10-27 11:32:47 +0100932 gem_require_mmap_wc(fd);
933 igt_fork_hang_detector(fd);
934 }
935
936 igt_subtest_group {
937 for (e = intel_execution_engines; e->name; e++) {
938 /* default exec-id is purely symbolic */
939 if (e->exec_id == 0)
940 continue;
941
942 igt_subtest_f("fifo-%s", e->name) {
943 gem_require_ring(fd, e->exec_id | e->flags);
Chris Wilsonbc787762017-05-18 12:11:59 +0100944 igt_require(gem_can_store_dword(fd, e->exec_id) | e->flags);
Chris Wilson721d8742016-10-27 11:32:47 +0100945 fifo(fd, e->exec_id | e->flags);
946 }
947 }
948 }
949
950 igt_subtest_group {
951 igt_fixture {
952 igt_require(has_scheduler(fd));
Chris Wilsonaf0e1c52017-02-21 18:25:58 +0000953 ctx_has_priority(fd);
Chris Wilson721d8742016-10-27 11:32:47 +0100954 }
955
956 for (e = intel_execution_engines; e->name; e++) {
957 /* default exec-id is purely symbolic */
958 if (e->exec_id == 0)
959 continue;
960
961 igt_subtest_group {
Chris Wilson073cfd72017-03-17 11:52:51 +0000962 igt_fixture {
Chris Wilson721d8742016-10-27 11:32:47 +0100963 gem_require_ring(fd, e->exec_id | e->flags);
Chris Wilsonbc787762017-05-18 12:11:59 +0100964 igt_require(gem_can_store_dword(fd, e->exec_id) | e->flags);
Chris Wilson073cfd72017-03-17 11:52:51 +0000965 }
Chris Wilson721d8742016-10-27 11:32:47 +0100966
967 igt_subtest_f("in-order-%s", e->name)
968 reorder(fd, e->exec_id | e->flags, EQUAL);
969
970 igt_subtest_f("out-order-%s", e->name)
971 reorder(fd, e->exec_id | e->flags, 0);
972
973 igt_subtest_f("promotion-%s", e->name)
974 promotion(fd, e->exec_id | e->flags);
975
Chris Wilsona3801342017-07-16 16:28:41 +0100976 igt_subtest_f("preempt-%s", e->name)
977 preempt(fd, e->exec_id | e->flags, 0);
978
979 igt_subtest_f("preempt-contexts-%s", e->name)
980 preempt(fd, e->exec_id | e->flags, NEW_CTX);
981
982 igt_subtest_f("preempt-other-%s", e->name)
983 preempt_other(fd, e->exec_id | e->flags);
984
985 igt_subtest_f("preempt-self-%s", e->name)
986 preempt_self(fd, e->exec_id | e->flags);
987
Chris Wilson721d8742016-10-27 11:32:47 +0100988 igt_subtest_f("deep-%s", e->name)
989 deep(fd, e->exec_id | e->flags);
Chris Wilsonf6920752017-04-24 13:20:04 +0100990
991 igt_subtest_f("wide-%s", e->name)
992 wide(fd, e->exec_id | e->flags);
Chris Wilson61f8de72017-07-20 10:08:28 +0100993
994 igt_subtest_f("reorder-wide-%s", e->name)
995 reorder_wide(fd, e->exec_id | e->flags);
Chris Wilson721d8742016-10-27 11:32:47 +0100996 }
997 }
998 }
999
Chris Wilson976ed7c2017-08-24 12:16:17 +01001000 igt_subtest_group {
1001 igt_fixture {
1002 igt_require(has_scheduler(fd));
1003 ctx_has_priority(fd);
1004
1005 /* need separate rings */
1006 igt_require(caps & HAVE_EXECLISTS);
1007 }
1008
1009 for (e = intel_execution_engines; e->name; e++) {
1010 igt_subtest_group {
1011 igt_fixture {
1012 gem_require_ring(fd, e->exec_id | e->flags);
1013 }
1014
1015 igt_subtest_f("pi-ringfull-%s", e->name)
1016 test_pi_ringfull(fd, e->exec_id | e->flags);
1017 }
1018 }
1019 }
1020
Chris Wilson721d8742016-10-27 11:32:47 +01001021 igt_fixture {
1022 igt_stop_hang_detector();
1023 close(fd);
1024 }
1025}