blob: f5c849d0f3e1e22a8a13a487df2431874c6788e9 [file] [log] [blame]
Chris Wilson721d8742016-10-27 11:32:47 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
Chris Wilson976ed7c2017-08-24 12:16:17 +010024#include "config.h"
25
Chris Wilson721d8742016-10-27 11:32:47 +010026#include <sys/poll.h>
Chris Wilsona19ef052017-07-31 14:15:59 +010027#include <sys/ioctl.h>
Chris Wilson976ed7c2017-08-24 12:16:17 +010028#include <sched.h>
Petri Latvala0c7e5062017-09-29 13:51:58 +030029#include <signal.h>
Chris Wilson721d8742016-10-27 11:32:47 +010030
31#include "igt.h"
32#include "igt_vgem.h"
Chris Wilson61f8de72017-07-20 10:08:28 +010033#include "igt_rand.h"
Chris Wilson976ed7c2017-08-24 12:16:17 +010034#include "igt_sysfs.h"
Chris Wilson721d8742016-10-27 11:32:47 +010035
Chris Wilson765f7b02017-09-25 21:45:21 +010036#define BIT(x) (1ul << (x))
37
Chris Wilson49f44c72016-11-14 21:24:52 +000038#define LOCAL_PARAM_HAS_SCHEDULER 41
Chris Wilson765f7b02017-09-25 21:45:21 +010039#define HAS_SCHEDULER BIT(0)
40#define HAS_PRIORITY BIT(1)
41#define HAS_PREEMPTION BIT(2)
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000042#define LOCAL_CONTEXT_PARAM_PRIORITY 6
Chris Wilson721d8742016-10-27 11:32:47 +010043
44#define LO 0
45#define HI 1
46#define NOISE 2
47
48#define MAX_PRIO 1023
Chris Wilsonda553ff2017-09-22 10:32:44 +010049#define MIN_PRIO -1023
Chris Wilson721d8742016-10-27 11:32:47 +010050
51#define BUSY_QLEN 8
52
53IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
54
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000055static int __ctx_set_priority(int fd, uint32_t ctx, int prio)
Chris Wilson721d8742016-10-27 11:32:47 +010056{
57 struct local_i915_gem_context_param param;
58
59 memset(&param, 0, sizeof(param));
60 param.context = ctx;
61 param.size = 0;
62 param.param = LOCAL_CONTEXT_PARAM_PRIORITY;
63 param.value = prio;
64
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000065 return __gem_context_set_param(fd, &param);
66}
67
68static void ctx_set_priority(int fd, uint32_t ctx, int prio)
69{
70 igt_assert_eq(__ctx_set_priority(fd, ctx, prio), 0);
71}
72
73static void ctx_has_priority(int fd)
74{
75 igt_require(__ctx_set_priority(fd, 0, MAX_PRIO) == 0);
Chris Wilson721d8742016-10-27 11:32:47 +010076}
77
78static void store_dword(int fd, uint32_t ctx, unsigned ring,
79 uint32_t target, uint32_t offset, uint32_t value,
80 uint32_t cork, unsigned write_domain)
81{
82 const int gen = intel_gen(intel_get_drm_devid(fd));
83 struct drm_i915_gem_exec_object2 obj[3];
84 struct drm_i915_gem_relocation_entry reloc;
85 struct drm_i915_gem_execbuffer2 execbuf;
86 uint32_t batch[16];
87 int i;
88
89 memset(&execbuf, 0, sizeof(execbuf));
Chris Wilson4de67b22017-01-02 11:05:21 +000090 execbuf.buffers_ptr = to_user_pointer(obj + !cork);
Chris Wilson721d8742016-10-27 11:32:47 +010091 execbuf.buffer_count = 2 + !!cork;
92 execbuf.flags = ring;
93 if (gen < 6)
94 execbuf.flags |= I915_EXEC_SECURE;
95 execbuf.rsvd1 = ctx;
96
97 memset(obj, 0, sizeof(obj));
98 obj[0].handle = cork;
99 obj[1].handle = target;
100 obj[2].handle = gem_create(fd, 4096);
101
102 memset(&reloc, 0, sizeof(reloc));
103 reloc.target_handle = obj[1].handle;
104 reloc.presumed_offset = 0;
105 reloc.offset = sizeof(uint32_t);
106 reloc.delta = offset;
107 reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
108 reloc.write_domain = write_domain;
Chris Wilson4de67b22017-01-02 11:05:21 +0000109 obj[2].relocs_ptr = to_user_pointer(&reloc);
Chris Wilson721d8742016-10-27 11:32:47 +0100110 obj[2].relocation_count = 1;
111
112 i = 0;
113 batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
114 if (gen >= 8) {
115 batch[++i] = offset;
116 batch[++i] = 0;
117 } else if (gen >= 4) {
118 batch[++i] = 0;
119 batch[++i] = offset;
120 reloc.offset += sizeof(uint32_t);
121 } else {
122 batch[i]--;
123 batch[++i] = offset;
124 }
125 batch[++i] = value;
126 batch[++i] = MI_BATCH_BUFFER_END;
127 gem_write(fd, obj[2].handle, 0, batch, sizeof(batch));
128 gem_execbuf(fd, &execbuf);
129 gem_close(fd, obj[2].handle);
130}
131
Chris Wilson721d8742016-10-27 11:32:47 +0100132struct cork {
133 int device;
134 uint32_t handle;
135 uint32_t fence;
136};
137
138static void plug(int fd, struct cork *c)
139{
140 struct vgem_bo bo;
141 int dmabuf;
142
143 c->device = drm_open_driver(DRIVER_VGEM);
144
145 bo.width = bo.height = 1;
146 bo.bpp = 4;
147 vgem_create(c->device, &bo);
148 c->fence = vgem_fence_attach(c->device, &bo, VGEM_FENCE_WRITE);
149
150 dmabuf = prime_handle_to_fd(c->device, bo.handle);
151 c->handle = prime_fd_to_handle(fd, dmabuf);
152 close(dmabuf);
153}
154
155static void unplug(struct cork *c)
156{
157 vgem_fence_signal(c->device, c->fence);
158 close(c->device);
159}
160
Chris Wilson2885b102017-09-25 20:59:54 +0100161static uint32_t create_highest_priority(int fd)
162{
163 uint32_t ctx = gem_context_create(fd);
164
165 /*
166 * If there is no priority support, all contexts will have equal
167 * priority (and therefore the max user priority), so no context
168 * can overtake us, and we effectively can form a plug.
169 */
170 __ctx_set_priority(fd, ctx, MAX_PRIO);
171
172 return ctx;
173}
174
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100175static void unplug_show_queue(int fd, struct cork *c, unsigned int engine)
176{
Chris Wilson2885b102017-09-25 20:59:54 +0100177 igt_spin_t *spin[BUSY_QLEN];
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100178
Chris Wilson2885b102017-09-25 20:59:54 +0100179 for (int n = 0; n < ARRAY_SIZE(spin); n++) {
180 uint32_t ctx = create_highest_priority(fd);
181 spin[n] = __igt_spin_batch_new(fd, ctx, engine, 0);
182 gem_context_destroy(fd, ctx);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100183 }
184
185 unplug(c); /* batches will now be queued on the engine */
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100186 igt_debugfs_dump(fd, "i915_engine_info");
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100187
Chris Wilson2885b102017-09-25 20:59:54 +0100188 for (int n = 0; n < ARRAY_SIZE(spin); n++)
189 igt_spin_batch_free(fd, spin[n]);
190
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100191}
192
Chris Wilson721d8742016-10-27 11:32:47 +0100193static void fifo(int fd, unsigned ring)
194{
195 struct cork cork;
Chris Wilson721d8742016-10-27 11:32:47 +0100196 uint32_t scratch;
197 uint32_t *ptr;
198
199 scratch = gem_create(fd, 4096);
200
Chris Wilson721d8742016-10-27 11:32:47 +0100201 plug(fd, &cork);
202
203 /* Same priority, same timeline, final result will be the second eb */
204 store_dword(fd, 0, ring, scratch, 0, 1, cork.handle, 0);
205 store_dword(fd, 0, ring, scratch, 0, 2, cork.handle, 0);
206
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100207 unplug_show_queue(fd, &cork, ring);
Chris Wilson721d8742016-10-27 11:32:47 +0100208
209 ptr = gem_mmap__gtt(fd, scratch, 4096, PROT_READ);
210 gem_set_domain(fd, scratch, /* no write hazard lies! */
211 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
212 gem_close(fd, scratch);
213
214 igt_assert_eq_u32(ptr[0], 2);
215 munmap(ptr, 4096);
216}
217
Chris Wilsond8954f02017-10-05 12:49:03 +0100218static bool ignore_engine(int fd, unsigned engine)
219{
220 if (engine == 0)
221 return true;
222
223 if (gem_has_bsd2(fd) && engine == I915_EXEC_BSD)
224 return true;
225
226 return false;
227}
228
229static void smoketest(int fd, unsigned ring, unsigned timeout)
230{
231 const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
232 unsigned engines[16];
233 unsigned nengine;
234 unsigned engine;
235 uint32_t scratch;
236 uint32_t *ptr;
237
238 nengine = 0;
239 for_each_engine(fd, engine) {
240 if (ignore_engine(fd, engine))
241 continue;
242
243 engines[nengine++] = engine;
244 }
245 igt_require(nengine);
246
247 scratch = gem_create(fd, 4096);
248 igt_fork(child, ncpus) {
249 unsigned long count = 0;
250 uint32_t ctx;
251
252 hars_petruska_f54_1_random_perturb(child);
253
254 ctx = gem_context_create(fd);
255 igt_until_timeout(timeout) {
256 int prio;
257
258 prio = hars_petruska_f54_1_random_unsafe_max(MAX_PRIO - MIN_PRIO) + MIN_PRIO;
259 ctx_set_priority(fd, ctx, prio);
260
261 engine = engines[hars_petruska_f54_1_random_unsafe_max(nengine)];
262 store_dword(fd, ctx, engine, scratch,
263 8*child + 0, ~child,
264 0, 0);
265 for (unsigned int step = 0; step < 8; step++)
266 store_dword(fd, ctx, engine, scratch,
267 8*child + 4, count++,
268 0, 0);
269 }
270 gem_context_destroy(fd, ctx);
271 }
272 igt_waitchildren();
273
274 ptr = gem_mmap__gtt(fd, scratch, 4096, PROT_READ);
275 gem_set_domain(fd, scratch, /* no write hazard lies! */
276 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
277 gem_close(fd, scratch);
278
279 for (unsigned n = 0; n < ncpus; n++) {
280 igt_assert_eq_u32(ptr[2*n], ~n);
281 /*
282 * Note this count is approximate due to unconstrained
283 * ordering of the dword writes between engines.
284 *
285 * Take the result with a pinch of salt.
286 */
287 igt_info("Child[%d] completed %u cycles\n", n, ptr[2*n+1]);
288 }
289 munmap(ptr, 4096);
290}
291
Chris Wilson721d8742016-10-27 11:32:47 +0100292static void reorder(int fd, unsigned ring, unsigned flags)
293#define EQUAL 1
294{
295 struct cork cork;
296 uint32_t scratch;
Chris Wilson721d8742016-10-27 11:32:47 +0100297 uint32_t *ptr;
298 uint32_t ctx[2];
299
300 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100301 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilson721d8742016-10-27 11:32:47 +0100302
303 ctx[HI] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100304 ctx_set_priority(fd, ctx[HI], flags & EQUAL ? MIN_PRIO : 0);
Chris Wilson721d8742016-10-27 11:32:47 +0100305
306 scratch = gem_create(fd, 4096);
Chris Wilson721d8742016-10-27 11:32:47 +0100307 plug(fd, &cork);
308
309 /* We expect the high priority context to be executed first, and
310 * so the final result will be value from the low priority context.
311 */
312 store_dword(fd, ctx[LO], ring, scratch, 0, ctx[LO], cork.handle, 0);
313 store_dword(fd, ctx[HI], ring, scratch, 0, ctx[HI], cork.handle, 0);
314
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100315 unplug_show_queue(fd, &cork, ring);
Chris Wilson721d8742016-10-27 11:32:47 +0100316
317 gem_context_destroy(fd, ctx[LO]);
318 gem_context_destroy(fd, ctx[HI]);
319
320 ptr = gem_mmap__gtt(fd, scratch, 4096, PROT_READ);
321 gem_set_domain(fd, scratch, /* no write hazard lies! */
322 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
323 gem_close(fd, scratch);
324
325 if (flags & EQUAL) /* equal priority, result will be fifo */
326 igt_assert_eq_u32(ptr[0], ctx[HI]);
327 else
328 igt_assert_eq_u32(ptr[0], ctx[LO]);
329 munmap(ptr, 4096);
330}
331
332static void promotion(int fd, unsigned ring)
333{
334 struct cork cork;
335 uint32_t result, dep;
Chris Wilson721d8742016-10-27 11:32:47 +0100336 uint32_t *ptr;
337 uint32_t ctx[3];
338
339 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100340 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilson721d8742016-10-27 11:32:47 +0100341
342 ctx[HI] = gem_context_create(fd);
343 ctx_set_priority(fd, ctx[HI], 0);
344
345 ctx[NOISE] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100346 ctx_set_priority(fd, ctx[NOISE], MIN_PRIO/2);
Chris Wilson721d8742016-10-27 11:32:47 +0100347
348 result = gem_create(fd, 4096);
349 dep = gem_create(fd, 4096);
350
Chris Wilson721d8742016-10-27 11:32:47 +0100351 plug(fd, &cork);
352
353 /* Expect that HI promotes LO, so the order will be LO, HI, NOISE.
354 *
355 * fifo would be NOISE, LO, HI.
356 * strict priority would be HI, NOISE, LO
357 */
358 store_dword(fd, ctx[NOISE], ring, result, 0, ctx[NOISE], cork.handle, 0);
359 store_dword(fd, ctx[LO], ring, result, 0, ctx[LO], cork.handle, 0);
360
361 /* link LO <-> HI via a dependency on another buffer */
362 store_dword(fd, ctx[LO], ring, dep, 0, ctx[LO], 0, I915_GEM_DOMAIN_INSTRUCTION);
363 store_dword(fd, ctx[HI], ring, dep, 0, ctx[HI], 0, 0);
364
365 store_dword(fd, ctx[HI], ring, result, 0, ctx[HI], 0, 0);
366
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100367 unplug_show_queue(fd, &cork, ring);
Chris Wilson721d8742016-10-27 11:32:47 +0100368
369 gem_context_destroy(fd, ctx[NOISE]);
370 gem_context_destroy(fd, ctx[LO]);
371 gem_context_destroy(fd, ctx[HI]);
372
373 ptr = gem_mmap__gtt(fd, dep, 4096, PROT_READ);
374 gem_set_domain(fd, dep, /* no write hazard lies! */
375 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
376 gem_close(fd, dep);
377
378 igt_assert_eq_u32(ptr[0], ctx[HI]);
379 munmap(ptr, 4096);
380
381 ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
382 gem_set_domain(fd, result, /* no write hazard lies! */
383 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
384 gem_close(fd, result);
385
386 igt_assert_eq_u32(ptr[0], ctx[NOISE]);
387 munmap(ptr, 4096);
388}
389
Chris Wilsona3801342017-07-16 16:28:41 +0100390#define NEW_CTX 0x1
391static void preempt(int fd, unsigned ring, unsigned flags)
392{
393 uint32_t result = gem_create(fd, 4096);
394 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
395 igt_spin_t *spin[16];
396 uint32_t ctx[2];
397
398 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100399 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100400
401 ctx[HI] = gem_context_create(fd);
402 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
403
404 for (int n = 0; n < 16; n++) {
405 if (flags & NEW_CTX) {
406 gem_context_destroy(fd, ctx[LO]);
407 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100408 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100409 }
410 spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0);
411 igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
412
413 store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER);
414
415 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
416 igt_assert_eq_u32(ptr[0], n + 1);
417 igt_assert(gem_bo_busy(fd, spin[0]->handle));
418 }
419
420 for (int n = 0; n < 16; n++)
421 igt_spin_batch_free(fd, spin[n]);
422
423 gem_context_destroy(fd, ctx[LO]);
424 gem_context_destroy(fd, ctx[HI]);
425
426 munmap(ptr, 4096);
427 gem_close(fd, result);
428}
429
430static void preempt_other(int fd, unsigned ring)
431{
432 uint32_t result = gem_create(fd, 4096);
433 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
434 igt_spin_t *spin[16];
435 unsigned int other;
436 unsigned int n, i;
437 uint32_t ctx[3];
438
439 /* On each engine, insert
440 * [NOISE] spinner,
441 * [LOW] write
442 *
443 * Then on our target engine do a [HIGH] write which should then
444 * prompt its dependent LOW writes in front of the spinner on
445 * each engine. The purpose of this test is to check that preemption
446 * can cross engines.
447 */
448
449 ctx[LO] = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100450 ctx_set_priority(fd, ctx[LO], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100451
452 ctx[NOISE] = gem_context_create(fd);
453
454 ctx[HI] = gem_context_create(fd);
455 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
456
457 n = 0;
458 for_each_engine(fd, other) {
459 spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
460 store_dword(fd, ctx[LO], other,
461 result, (n + 1)*sizeof(uint32_t), n + 1,
462 0, I915_GEM_DOMAIN_RENDER);
463 n++;
464 }
465 store_dword(fd, ctx[HI], ring,
466 result, (n + 1)*sizeof(uint32_t), n + 1,
467 0, I915_GEM_DOMAIN_RENDER);
468
469 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
470
471 for (i = 0; i < n; i++) {
472 igt_assert(gem_bo_busy(fd, spin[i]->handle));
473 igt_spin_batch_free(fd, spin[i]);
474 }
475
476 n++;
477 for (i = 0; i <= n; i++)
478 igt_assert_eq_u32(ptr[i], i);
479
480 gem_context_destroy(fd, ctx[LO]);
481 gem_context_destroy(fd, ctx[NOISE]);
482 gem_context_destroy(fd, ctx[HI]);
483
484 munmap(ptr, 4096);
485 gem_close(fd, result);
486}
487
488static void preempt_self(int fd, unsigned ring)
489{
490 uint32_t result = gem_create(fd, 4096);
491 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
492 igt_spin_t *spin[16];
493 unsigned int other;
494 unsigned int n, i;
495 uint32_t ctx[3];
496
497 /* On each engine, insert
498 * [NOISE] spinner,
499 * [self/LOW] write
500 *
501 * Then on our target engine do a [self/HIGH] write which should then
502 * preempt its own lower priority task on any engine.
503 */
504
505 ctx[NOISE] = gem_context_create(fd);
506
507 ctx[HI] = gem_context_create(fd);
508
509 n = 0;
Chris Wilsonda553ff2017-09-22 10:32:44 +0100510 ctx_set_priority(fd, ctx[HI], MIN_PRIO);
Chris Wilsona3801342017-07-16 16:28:41 +0100511 for_each_engine(fd, other) {
512 spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
513 store_dword(fd, ctx[HI], other,
514 result, (n + 1)*sizeof(uint32_t), n + 1,
515 0, I915_GEM_DOMAIN_RENDER);
516 n++;
517 }
518 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
519 store_dword(fd, ctx[HI], ring,
520 result, (n + 1)*sizeof(uint32_t), n + 1,
521 0, I915_GEM_DOMAIN_RENDER);
522
523 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
524
525 for (i = 0; i < n; i++) {
526 igt_assert(gem_bo_busy(fd, spin[i]->handle));
527 igt_spin_batch_free(fd, spin[i]);
528 }
529
530 n++;
531 for (i = 0; i <= n; i++)
532 igt_assert_eq_u32(ptr[i], i);
533
534 gem_context_destroy(fd, ctx[NOISE]);
535 gem_context_destroy(fd, ctx[HI]);
536
537 munmap(ptr, 4096);
538 gem_close(fd, result);
539}
540
Chris Wilson721d8742016-10-27 11:32:47 +0100541static void deep(int fd, unsigned ring)
542{
543#define XS 8
Chris Wilsonb9c88302017-09-28 11:09:17 +0100544 const unsigned int nctx = MAX_PRIO - MIN_PRIO;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100545 const unsigned size = ALIGN(4*nctx, 4096);
Chris Wilsonb9c88302017-09-28 11:09:17 +0100546 struct timespec tv = {};
Chris Wilson721d8742016-10-27 11:32:47 +0100547 struct cork cork;
548 uint32_t result, dep[XS];
Chris Wilsonb9c88302017-09-28 11:09:17 +0100549 uint32_t expected = 0;
Chris Wilson721d8742016-10-27 11:32:47 +0100550 uint32_t *ptr;
551 uint32_t *ctx;
552
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100553 ctx = malloc(sizeof(*ctx) * nctx);
554 for (int n = 0; n < nctx; n++) {
Chris Wilson721d8742016-10-27 11:32:47 +0100555 ctx[n] = gem_context_create(fd);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100556 ctx_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
Chris Wilson721d8742016-10-27 11:32:47 +0100557 }
558
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100559 result = gem_create(fd, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100560 for (int m = 0; m < XS; m ++)
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100561 dep[m] = gem_create(fd, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100562
Chris Wilsonb9c88302017-09-28 11:09:17 +0100563 /* Bind all surfaces and contexts before starting the timeout. */
564 {
565 struct drm_i915_gem_exec_object2 obj[XS + 2];
566 struct drm_i915_gem_execbuffer2 execbuf;
567 const uint32_t bbe = MI_BATCH_BUFFER_END;
568
569 memset(obj, 0, sizeof(obj));
570 for (int n = 0; n < XS; n++)
571 obj[n].handle = dep[n];
572 obj[XS].handle = result;
573 obj[XS+1].handle = gem_create(fd, 4096);
574 gem_write(fd, obj[XS+1].handle, 0, &bbe, sizeof(bbe));
575
576 memset(&execbuf, 0, sizeof(execbuf));
577 execbuf.buffers_ptr = to_user_pointer(obj);
578 execbuf.buffer_count = XS + 2;
579 execbuf.flags = ring;
580 for (int n = 0; n < nctx; n++) {
581 execbuf.rsvd1 = ctx[n];
582 gem_execbuf(fd, &execbuf);
583 }
584 gem_close(fd, obj[XS+1].handle);
585 gem_sync(fd, result);
586 }
587
Chris Wilson721d8742016-10-27 11:32:47 +0100588 plug(fd, &cork);
589
590 /* Create a deep dependency chain, with a few branches */
Chris Wilsonb9c88302017-09-28 11:09:17 +0100591 for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 8; n++)
Chris Wilson721d8742016-10-27 11:32:47 +0100592 for (int m = 0; m < XS; m++)
593 store_dword(fd, ctx[n], ring, dep[m], 4*n, ctx[n], cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
594
Chris Wilsonb9c88302017-09-28 11:09:17 +0100595 for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 6; n++) {
Chris Wilson721d8742016-10-27 11:32:47 +0100596 for (int m = 0; m < XS; m++) {
597 store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], dep[m], 0);
598 store_dword(fd, ctx[n], ring, result, 4*m, ctx[n], 0, I915_GEM_DOMAIN_INSTRUCTION);
599 }
Chris Wilsonb9c88302017-09-28 11:09:17 +0100600 expected = ctx[n];
Chris Wilson721d8742016-10-27 11:32:47 +0100601 }
602
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100603 unplug_show_queue(fd, &cork, ring);
Chris Wilsonb9c88302017-09-28 11:09:17 +0100604 igt_require(expected); /* too slow */
Chris Wilson721d8742016-10-27 11:32:47 +0100605
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100606 for (int n = 0; n < nctx; n++)
Chris Wilson721d8742016-10-27 11:32:47 +0100607 gem_context_destroy(fd, ctx[n]);
608
609 for (int m = 0; m < XS; m++) {
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100610 ptr = gem_mmap__gtt(fd, dep[m], size, PROT_READ);
Chris Wilson721d8742016-10-27 11:32:47 +0100611 gem_set_domain(fd, dep[m], /* no write hazard lies! */
612 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
613 gem_close(fd, dep[m]);
614
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100615 for (int n = 0; n < nctx; n++)
Chris Wilson721d8742016-10-27 11:32:47 +0100616 igt_assert_eq_u32(ptr[n], ctx[n]);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100617 munmap(ptr, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100618 }
619
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100620 ptr = gem_mmap__gtt(fd, result, size, PROT_READ);
Chris Wilson721d8742016-10-27 11:32:47 +0100621 gem_set_domain(fd, result, /* no write hazard lies! */
622 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
623 gem_close(fd, result);
624
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100625 /* No reordering due to PI on all contexts because of the common dep */
Chris Wilson721d8742016-10-27 11:32:47 +0100626 for (int m = 0; m < XS; m++)
Chris Wilsonb9c88302017-09-28 11:09:17 +0100627 igt_assert_eq_u32(ptr[m], expected);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100628 munmap(ptr, size);
Chris Wilson721d8742016-10-27 11:32:47 +0100629
630 free(ctx);
Chris Wilsonf6920752017-04-24 13:20:04 +0100631#undef XS
632}
633
Chris Wilsona19ef052017-07-31 14:15:59 +0100634static void alarm_handler(int sig)
635{
636}
637
638static int __execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
639{
Chris Wilson976ed7c2017-08-24 12:16:17 +0100640 int err = 0;
641 if (ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf))
642 err = -errno;
643 return err;
Chris Wilsona19ef052017-07-31 14:15:59 +0100644}
645
646static unsigned int measure_ring_size(int fd, unsigned int ring)
647{
648 struct sigaction sa = { .sa_handler = alarm_handler };
649 struct drm_i915_gem_exec_object2 obj[2];
650 struct drm_i915_gem_execbuffer2 execbuf;
651 const uint32_t bbe = MI_BATCH_BUFFER_END;
652 unsigned int count, last;
653 struct itimerval itv;
654 struct cork c;
655
656 memset(obj, 0, sizeof(obj));
657 obj[1].handle = gem_create(fd, 4096);
658 gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
659
660 memset(&execbuf, 0, sizeof(execbuf));
661 execbuf.buffers_ptr = to_user_pointer(obj + 1);
662 execbuf.buffer_count = 1;
663 execbuf.flags = ring;
664 gem_execbuf(fd, &execbuf);
665 gem_sync(fd, obj[1].handle);
666
667 plug(fd, &c);
668 obj[0].handle = c.handle;
669
670 execbuf.buffers_ptr = to_user_pointer(obj);
671 execbuf.buffer_count = 2;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100672 execbuf.rsvd1 = gem_context_create(fd);
Chris Wilsona19ef052017-07-31 14:15:59 +0100673
674 sigaction(SIGALRM, &sa, NULL);
675 itv.it_interval.tv_sec = 0;
676 itv.it_interval.tv_usec = 100;
677 itv.it_value.tv_sec = 0;
678 itv.it_value.tv_usec = 1000;
679 setitimer(ITIMER_REAL, &itv, NULL);
680
681 last = -1;
682 count = 0;
683 do {
684 if (__execbuf(fd, &execbuf) == 0) {
685 count++;
686 continue;
687 }
688
689 if (last == count)
690 break;
691
692 last = count;
693 } while (1);
694
695 memset(&itv, 0, sizeof(itv));
696 setitimer(ITIMER_REAL, &itv, NULL);
697
698 unplug(&c);
699 gem_close(fd, obj[1].handle);
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100700 gem_context_destroy(fd, execbuf.rsvd1);
Chris Wilsona19ef052017-07-31 14:15:59 +0100701
702 return count;
703}
704
Chris Wilsonf6920752017-04-24 13:20:04 +0100705static void wide(int fd, unsigned ring)
706{
Chris Wilsonf6920752017-04-24 13:20:04 +0100707#define NCTX 4096
Chris Wilsona19ef052017-07-31 14:15:59 +0100708 struct timespec tv = {};
709 unsigned int ring_size = measure_ring_size(fd, ring);
Chris Wilsonf6920752017-04-24 13:20:04 +0100710
711 struct cork cork;
712 uint32_t result;
Chris Wilsonf6920752017-04-24 13:20:04 +0100713 uint32_t *ptr;
714 uint32_t *ctx;
Chris Wilsona19ef052017-07-31 14:15:59 +0100715 unsigned int count;
Chris Wilsonf6920752017-04-24 13:20:04 +0100716
717 ctx = malloc(sizeof(*ctx)*NCTX);
718 for (int n = 0; n < NCTX; n++)
719 ctx[n] = gem_context_create(fd);
720
Chris Wilsonf6920752017-04-24 13:20:04 +0100721 result = gem_create(fd, 4*NCTX);
722
Chris Wilsonf6920752017-04-24 13:20:04 +0100723 plug(fd, &cork);
724
725 /* Lots of in-order requests, plugged and submitted simultaneously */
Chris Wilsona19ef052017-07-31 14:15:59 +0100726 for (count = 0;
727 igt_seconds_elapsed(&tv) < 5 && count < ring_size;
728 count++) {
729 for (int n = 0; n < NCTX; n++) {
730 store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
731 }
Chris Wilsonf6920752017-04-24 13:20:04 +0100732 }
Chris Wilsona19ef052017-07-31 14:15:59 +0100733 igt_info("Submitted %d requests over %d contexts in %.1fms\n",
734 count, NCTX, igt_nsec_elapsed(&tv) * 1e-6);
Chris Wilsonf6920752017-04-24 13:20:04 +0100735
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100736 unplug_show_queue(fd, &cork, ring);
Chris Wilsonf6920752017-04-24 13:20:04 +0100737
738 for (int n = 0; n < NCTX; n++)
739 gem_context_destroy(fd, ctx[n]);
740
741 ptr = gem_mmap__gtt(fd, result, 4*NCTX, PROT_READ);
742 gem_set_domain(fd, result, /* no write hazard lies! */
743 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
744 for (int n = 0; n < NCTX; n++)
745 igt_assert_eq_u32(ptr[n], ctx[n]);
Chris Wilson61f8de72017-07-20 10:08:28 +0100746 munmap(ptr, 4*NCTX);
Chris Wilsonf6920752017-04-24 13:20:04 +0100747
Chris Wilsonf6920752017-04-24 13:20:04 +0100748 gem_close(fd, result);
749 free(ctx);
Chris Wilson61f8de72017-07-20 10:08:28 +0100750#undef NCTX
751}
752
753static void reorder_wide(int fd, unsigned ring)
754{
755 const int gen = intel_gen(intel_get_drm_devid(fd));
756 struct drm_i915_gem_relocation_entry reloc;
757 struct drm_i915_gem_exec_object2 obj[3];
758 struct drm_i915_gem_execbuffer2 execbuf;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100759 struct timespec tv = {};
760 unsigned int ring_size = measure_ring_size(fd, ring);
Chris Wilson61f8de72017-07-20 10:08:28 +0100761 struct cork cork;
762 uint32_t result, target;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100763 uint32_t *found, *expected;
Chris Wilson61f8de72017-07-20 10:08:28 +0100764
765 result = gem_create(fd, 4096);
766 target = gem_create(fd, 4096);
767
Chris Wilson61f8de72017-07-20 10:08:28 +0100768 plug(fd, &cork);
769
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100770 expected = gem_mmap__cpu(fd, target, 0, 4096, PROT_WRITE);
Chris Wilson61f8de72017-07-20 10:08:28 +0100771 gem_set_domain(fd, target, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
772
773 memset(obj, 0, sizeof(obj));
774 obj[0].handle = cork.handle;
775 obj[1].handle = result;
776 obj[2].relocs_ptr = to_user_pointer(&reloc);
777 obj[2].relocation_count = 1;
778
779 memset(&reloc, 0, sizeof(reloc));
780 reloc.target_handle = result;
781 reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
782 reloc.write_domain = 0; /* lies */
783
784 memset(&execbuf, 0, sizeof(execbuf));
785 execbuf.buffers_ptr = to_user_pointer(obj);
786 execbuf.buffer_count = 3;
787 execbuf.flags = ring;
788 if (gen < 6)
789 execbuf.flags |= I915_EXEC_SECURE;
790
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100791 for (int n = MIN_PRIO, x = 1;
792 igt_seconds_elapsed(&tv) < 5 && n <= MAX_PRIO;
793 n++, x++) {
794 unsigned int sz = ALIGN(ring_size * 64, 4096);
Chris Wilson61f8de72017-07-20 10:08:28 +0100795 uint32_t *batch;
796
797 execbuf.rsvd1 = gem_context_create(fd);
798 ctx_set_priority(fd, execbuf.rsvd1, n);
799
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100800 obj[2].handle = gem_create(fd, sz);
801 batch = gem_mmap__gtt(fd, obj[2].handle, sz, PROT_WRITE);
Chris Wilson61f8de72017-07-20 10:08:28 +0100802 gem_set_domain(fd, obj[2].handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
803
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100804 for (int m = 0; m < ring_size; m++) {
Chris Wilson61f8de72017-07-20 10:08:28 +0100805 uint64_t addr;
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100806 int idx = hars_petruska_f54_1_random_unsafe_max(1024);
Chris Wilson61f8de72017-07-20 10:08:28 +0100807 int i;
808
809 execbuf.batch_start_offset = m * 64;
810 reloc.offset = execbuf.batch_start_offset + sizeof(uint32_t);
811 reloc.delta = idx * sizeof(uint32_t);
812 addr = reloc.presumed_offset + reloc.delta;
813
814 i = execbuf.batch_start_offset / sizeof(uint32_t);
815 batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
816 if (gen >= 8) {
817 batch[++i] = addr;
818 batch[++i] = addr >> 32;
819 } else if (gen >= 4) {
820 batch[++i] = 0;
821 batch[++i] = addr;
822 reloc.offset += sizeof(uint32_t);
823 } else {
824 batch[i]--;
825 batch[++i] = addr;
826 }
827 batch[++i] = x;
828 batch[++i] = MI_BATCH_BUFFER_END;
829
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100830 if (!expected[idx])
831 expected[idx] = x;
Chris Wilson61f8de72017-07-20 10:08:28 +0100832
833 gem_execbuf(fd, &execbuf);
834 }
835
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100836 munmap(batch, sz);
Chris Wilson61f8de72017-07-20 10:08:28 +0100837 gem_close(fd, obj[2].handle);
838 gem_context_destroy(fd, execbuf.rsvd1);
839 }
840
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100841 unplug_show_queue(fd, &cork, ring);
Chris Wilson61f8de72017-07-20 10:08:28 +0100842
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100843 found = gem_mmap__gtt(fd, result, 4096, PROT_READ);
Chris Wilson61f8de72017-07-20 10:08:28 +0100844 gem_set_domain(fd, result, /* no write hazard lies! */
845 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
846 for (int n = 0; n < 1024; n++)
Chris Wilsonbf1c7ee2017-09-22 10:46:02 +0100847 igt_assert_eq_u32(found[n], expected[n]);
848 munmap(found, 4096);
849 munmap(expected, 4096);
Chris Wilson61f8de72017-07-20 10:08:28 +0100850
851 gem_close(fd, result);
852 gem_close(fd, target);
Chris Wilson721d8742016-10-27 11:32:47 +0100853}
854
Chris Wilson976ed7c2017-08-24 12:16:17 +0100855static void bind_to_cpu(int cpu)
856{
857 const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
858 struct sched_param rt = {.sched_priority = 99 };
859 cpu_set_t allowed;
860
861 igt_assert(sched_setscheduler(getpid(), SCHED_RR | SCHED_RESET_ON_FORK, &rt) == 0);
862
863 CPU_ZERO(&allowed);
864 CPU_SET(cpu % ncpus, &allowed);
865 igt_assert(sched_setaffinity(getpid(), sizeof(cpu_set_t), &allowed) == 0);
866}
867
868static void test_pi_ringfull(int fd, unsigned int engine)
869{
870 const uint32_t bbe = MI_BATCH_BUFFER_END;
871 struct sigaction sa = { .sa_handler = alarm_handler };
872 struct drm_i915_gem_execbuffer2 execbuf;
873 struct drm_i915_gem_exec_object2 obj[2];
874 unsigned int last, count;
875 struct itimerval itv;
876 struct cork c;
877 bool *result;
878
879 result = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
880 igt_assert(result != MAP_FAILED);
881
882 memset(&execbuf, 0, sizeof(execbuf));
883 memset(&obj, 0, sizeof(obj));
884
885 obj[1].handle = gem_create(fd, 4096);
886 gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
887
888 execbuf.buffers_ptr = to_user_pointer(&obj[1]);
889 execbuf.buffer_count = 1;
890 execbuf.flags = engine;
891 execbuf.rsvd1 = gem_context_create(fd);
Chris Wilsonda553ff2017-09-22 10:32:44 +0100892 ctx_set_priority(fd, execbuf.rsvd1, MIN_PRIO);
Chris Wilson976ed7c2017-08-24 12:16:17 +0100893
894 gem_execbuf(fd, &execbuf);
895 gem_sync(fd, obj[1].handle);
896
897 /* Fill the low-priority ring */
898 plug(fd, &c);
899 obj[0].handle = c.handle;
900
901 execbuf.buffers_ptr = to_user_pointer(obj);
902 execbuf.buffer_count = 2;
903
904 sigaction(SIGALRM, &sa, NULL);
905 itv.it_interval.tv_sec = 0;
906 itv.it_interval.tv_usec = 100;
907 itv.it_value.tv_sec = 0;
908 itv.it_value.tv_usec = 1000;
909 setitimer(ITIMER_REAL, &itv, NULL);
910
911 last = -1;
912 count = 0;
913 do {
914 if (__execbuf(fd, &execbuf) == 0) {
915 count++;
916 continue;
917 }
918
919 if (last == count)
920 break;
921
922 last = count;
923 } while (1);
924 igt_debug("Filled low-priority ring with %d batches\n", count);
925
926 memset(&itv, 0, sizeof(itv));
927 setitimer(ITIMER_REAL, &itv, NULL);
928
929 execbuf.buffers_ptr = to_user_pointer(&obj[1]);
930 execbuf.buffer_count = 1;
931
932 /* both parent + child on the same cpu, only parent is RT */
933 bind_to_cpu(0);
934
935 igt_fork(child, 1) {
936 result[0] = true;
937
938 igt_debug("Creating HP context\n");
939 execbuf.rsvd1 = gem_context_create(fd);
940 ctx_set_priority(fd, execbuf.rsvd1, MAX_PRIO);
941
942 kill(getppid(), SIGALRM);
943 sched_yield();
944 result[1] = true;
945
946 itv.it_value.tv_sec = 0;
947 itv.it_value.tv_usec = 10000;
948 setitimer(ITIMER_REAL, &itv, NULL);
949
950 /* Since we are the high priority task, we expect to be
951 * able to add ourselves to *our* ring without interruption.
952 */
953 igt_debug("HP child executing\n");
954 result[2] = __execbuf(fd, &execbuf) == 0;
955 gem_context_destroy(fd, execbuf.rsvd1);
956 }
957
958 /* Relinquish CPU just to allow child to create a context */
959 sleep(1);
960 igt_assert_f(result[0], "HP context (child) not created");
961 igt_assert_f(!result[1], "Child released too early!\n");
962
963 /* Parent sleeps waiting for ringspace, releasing child */
964 itv.it_value.tv_sec = 0;
965 itv.it_value.tv_usec = 50000;
966 setitimer(ITIMER_REAL, &itv, NULL);
967 igt_debug("LP parent executing\n");
968 igt_assert_eq(__execbuf(fd, &execbuf), -EINTR);
969 igt_assert_f(result[1], "Child was not released!\n");
970 igt_assert_f(result[2],
971 "High priority child unable to submit within 10ms\n");
972
973 unplug(&c);
974 igt_waitchildren();
975
976 gem_context_destroy(fd, execbuf.rsvd1);
977 gem_close(fd, obj[1].handle);
978 gem_close(fd, obj[0].handle);
979 munmap(result, 4096);
980}
981
Chris Wilson765f7b02017-09-25 21:45:21 +0100982static unsigned int has_scheduler(int fd)
Chris Wilson721d8742016-10-27 11:32:47 +0100983{
984 drm_i915_getparam_t gp;
Chris Wilson765f7b02017-09-25 21:45:21 +0100985 unsigned int caps = 0;
Chris Wilson721d8742016-10-27 11:32:47 +0100986
987 gp.param = LOCAL_PARAM_HAS_SCHEDULER;
Chris Wilson765f7b02017-09-25 21:45:21 +0100988 gp.value = (int *)&caps;
Chris Wilson721d8742016-10-27 11:32:47 +0100989 drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
990
Chris Wilson765f7b02017-09-25 21:45:21 +0100991 if (!caps)
992 return 0;
993
994 igt_info("Has kernel scheduler\n");
995 if (caps & HAS_PRIORITY)
996 igt_info(" - With priority sorting\n");
997 if (caps & HAS_PREEMPTION)
998 igt_info(" - With preemption enabled\n");
999
1000 return caps;
Chris Wilson721d8742016-10-27 11:32:47 +01001001}
1002
1003igt_main
1004{
1005 const struct intel_execution_engine *e;
Chris Wilson765f7b02017-09-25 21:45:21 +01001006 unsigned int sched_caps = 0;
Chris Wilson721d8742016-10-27 11:32:47 +01001007 int fd = -1;
1008
1009 igt_skip_on_simulation();
1010
1011 igt_fixture {
1012 fd = drm_open_driver_master(DRIVER_INTEL);
Michał Winiarskif6dfe552017-10-16 11:05:14 +02001013 gem_show_submission_method(fd);
Chris Wilson765f7b02017-09-25 21:45:21 +01001014 sched_caps = has_scheduler(fd);
Chris Wilson9518cb52017-02-22 15:24:54 +00001015 igt_require_gem(fd);
Chris Wilson721d8742016-10-27 11:32:47 +01001016 gem_require_mmap_wc(fd);
1017 igt_fork_hang_detector(fd);
1018 }
1019
1020 igt_subtest_group {
1021 for (e = intel_execution_engines; e->name; e++) {
1022 /* default exec-id is purely symbolic */
1023 if (e->exec_id == 0)
1024 continue;
1025
1026 igt_subtest_f("fifo-%s", e->name) {
1027 gem_require_ring(fd, e->exec_id | e->flags);
Chris Wilsonbc787762017-05-18 12:11:59 +01001028 igt_require(gem_can_store_dword(fd, e->exec_id) | e->flags);
Chris Wilson721d8742016-10-27 11:32:47 +01001029 fifo(fd, e->exec_id | e->flags);
1030 }
1031 }
1032 }
1033
1034 igt_subtest_group {
1035 igt_fixture {
Chris Wilson765f7b02017-09-25 21:45:21 +01001036 igt_require(sched_caps & HAS_SCHEDULER);
Chris Wilsonaf0e1c52017-02-21 18:25:58 +00001037 ctx_has_priority(fd);
Chris Wilson721d8742016-10-27 11:32:47 +01001038 }
1039
Chris Wilsond8954f02017-10-05 12:49:03 +01001040 igt_subtest("smoketest-all")
1041 smoketest(fd, -1, 30);
1042
Chris Wilson721d8742016-10-27 11:32:47 +01001043 for (e = intel_execution_engines; e->name; e++) {
1044 /* default exec-id is purely symbolic */
1045 if (e->exec_id == 0)
1046 continue;
1047
1048 igt_subtest_group {
Chris Wilson073cfd72017-03-17 11:52:51 +00001049 igt_fixture {
Chris Wilson721d8742016-10-27 11:32:47 +01001050 gem_require_ring(fd, e->exec_id | e->flags);
Chris Wilsonbc787762017-05-18 12:11:59 +01001051 igt_require(gem_can_store_dword(fd, e->exec_id) | e->flags);
Chris Wilson073cfd72017-03-17 11:52:51 +00001052 }
Chris Wilson721d8742016-10-27 11:32:47 +01001053
1054 igt_subtest_f("in-order-%s", e->name)
1055 reorder(fd, e->exec_id | e->flags, EQUAL);
1056
1057 igt_subtest_f("out-order-%s", e->name)
1058 reorder(fd, e->exec_id | e->flags, 0);
1059
1060 igt_subtest_f("promotion-%s", e->name)
1061 promotion(fd, e->exec_id | e->flags);
1062
Chris Wilson765f7b02017-09-25 21:45:21 +01001063 igt_subtest_group {
1064 igt_fixture {
1065 igt_require(sched_caps & HAS_PREEMPTION);
1066 }
Chris Wilsona3801342017-07-16 16:28:41 +01001067
Chris Wilson765f7b02017-09-25 21:45:21 +01001068 igt_subtest_f("preempt-%s", e->name)
1069 preempt(fd, e->exec_id | e->flags, 0);
Chris Wilsona3801342017-07-16 16:28:41 +01001070
Chris Wilson765f7b02017-09-25 21:45:21 +01001071 igt_subtest_f("preempt-contexts-%s", e->name)
1072 preempt(fd, e->exec_id | e->flags, NEW_CTX);
Chris Wilsona3801342017-07-16 16:28:41 +01001073
Chris Wilson765f7b02017-09-25 21:45:21 +01001074 igt_subtest_f("preempt-other-%s", e->name)
1075 preempt_other(fd, e->exec_id | e->flags);
1076
1077 igt_subtest_f("preempt-self-%s", e->name)
1078 preempt_self(fd, e->exec_id | e->flags);
1079 }
Chris Wilsona3801342017-07-16 16:28:41 +01001080
Chris Wilson721d8742016-10-27 11:32:47 +01001081 igt_subtest_f("deep-%s", e->name)
1082 deep(fd, e->exec_id | e->flags);
Chris Wilsonf6920752017-04-24 13:20:04 +01001083
1084 igt_subtest_f("wide-%s", e->name)
1085 wide(fd, e->exec_id | e->flags);
Chris Wilson61f8de72017-07-20 10:08:28 +01001086
1087 igt_subtest_f("reorder-wide-%s", e->name)
1088 reorder_wide(fd, e->exec_id | e->flags);
Chris Wilsond8954f02017-10-05 12:49:03 +01001089
1090 igt_subtest_f("smoketest-%s", e->name)
1091 smoketest(fd, e->exec_id | e->flags, 5);
Chris Wilson721d8742016-10-27 11:32:47 +01001092 }
1093 }
1094 }
1095
Chris Wilson976ed7c2017-08-24 12:16:17 +01001096 igt_subtest_group {
1097 igt_fixture {
Chris Wilson765f7b02017-09-25 21:45:21 +01001098 igt_require(sched_caps & HAS_SCHEDULER);
Chris Wilson976ed7c2017-08-24 12:16:17 +01001099 ctx_has_priority(fd);
1100
1101 /* need separate rings */
Michał Winiarskif6dfe552017-10-16 11:05:14 +02001102 igt_require(gem_has_execlists(fd));
Chris Wilson976ed7c2017-08-24 12:16:17 +01001103 }
1104
1105 for (e = intel_execution_engines; e->name; e++) {
1106 igt_subtest_group {
1107 igt_fixture {
1108 gem_require_ring(fd, e->exec_id | e->flags);
Chris Wilson765f7b02017-09-25 21:45:21 +01001109 igt_require(sched_caps & HAS_PREEMPTION);
Chris Wilson976ed7c2017-08-24 12:16:17 +01001110 }
1111
1112 igt_subtest_f("pi-ringfull-%s", e->name)
1113 test_pi_ringfull(fd, e->exec_id | e->flags);
1114 }
1115 }
1116 }
1117
Chris Wilson721d8742016-10-27 11:32:47 +01001118 igt_fixture {
1119 igt_stop_hang_detector();
1120 close(fd);
1121 }
1122}