blob: 8f906c9c3b1ad57490f27d254fa13efd8be0699c [file] [log] [blame]
Chris Wilson721d8742016-10-27 11:32:47 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <sys/poll.h>
Chris Wilsona19ef052017-07-31 14:15:59 +010025#include <sys/ioctl.h>
Chris Wilson721d8742016-10-27 11:32:47 +010026
27#include "igt.h"
28#include "igt_vgem.h"
Chris Wilson61f8de72017-07-20 10:08:28 +010029#include "igt_rand.h"
Chris Wilson721d8742016-10-27 11:32:47 +010030
Chris Wilson49f44c72016-11-14 21:24:52 +000031#define LOCAL_PARAM_HAS_SCHEDULER 41
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000032#define LOCAL_CONTEXT_PARAM_PRIORITY 6
Chris Wilson721d8742016-10-27 11:32:47 +010033
34#define LO 0
35#define HI 1
36#define NOISE 2
37
38#define MAX_PRIO 1023
39
40#define BUSY_QLEN 8
41
42IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
43
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000044static int __ctx_set_priority(int fd, uint32_t ctx, int prio)
Chris Wilson721d8742016-10-27 11:32:47 +010045{
46 struct local_i915_gem_context_param param;
47
48 memset(&param, 0, sizeof(param));
49 param.context = ctx;
50 param.size = 0;
51 param.param = LOCAL_CONTEXT_PARAM_PRIORITY;
52 param.value = prio;
53
Chris Wilsonaf0e1c52017-02-21 18:25:58 +000054 return __gem_context_set_param(fd, &param);
55}
56
57static void ctx_set_priority(int fd, uint32_t ctx, int prio)
58{
59 igt_assert_eq(__ctx_set_priority(fd, ctx, prio), 0);
60}
61
62static void ctx_has_priority(int fd)
63{
64 igt_require(__ctx_set_priority(fd, 0, MAX_PRIO) == 0);
Chris Wilson721d8742016-10-27 11:32:47 +010065}
66
67static void store_dword(int fd, uint32_t ctx, unsigned ring,
68 uint32_t target, uint32_t offset, uint32_t value,
69 uint32_t cork, unsigned write_domain)
70{
71 const int gen = intel_gen(intel_get_drm_devid(fd));
72 struct drm_i915_gem_exec_object2 obj[3];
73 struct drm_i915_gem_relocation_entry reloc;
74 struct drm_i915_gem_execbuffer2 execbuf;
75 uint32_t batch[16];
76 int i;
77
78 memset(&execbuf, 0, sizeof(execbuf));
Chris Wilson4de67b22017-01-02 11:05:21 +000079 execbuf.buffers_ptr = to_user_pointer(obj + !cork);
Chris Wilson721d8742016-10-27 11:32:47 +010080 execbuf.buffer_count = 2 + !!cork;
81 execbuf.flags = ring;
82 if (gen < 6)
83 execbuf.flags |= I915_EXEC_SECURE;
84 execbuf.rsvd1 = ctx;
85
86 memset(obj, 0, sizeof(obj));
87 obj[0].handle = cork;
88 obj[1].handle = target;
89 obj[2].handle = gem_create(fd, 4096);
90
91 memset(&reloc, 0, sizeof(reloc));
92 reloc.target_handle = obj[1].handle;
93 reloc.presumed_offset = 0;
94 reloc.offset = sizeof(uint32_t);
95 reloc.delta = offset;
96 reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
97 reloc.write_domain = write_domain;
Chris Wilson4de67b22017-01-02 11:05:21 +000098 obj[2].relocs_ptr = to_user_pointer(&reloc);
Chris Wilson721d8742016-10-27 11:32:47 +010099 obj[2].relocation_count = 1;
100
101 i = 0;
102 batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
103 if (gen >= 8) {
104 batch[++i] = offset;
105 batch[++i] = 0;
106 } else if (gen >= 4) {
107 batch[++i] = 0;
108 batch[++i] = offset;
109 reloc.offset += sizeof(uint32_t);
110 } else {
111 batch[i]--;
112 batch[++i] = offset;
113 }
114 batch[++i] = value;
115 batch[++i] = MI_BATCH_BUFFER_END;
116 gem_write(fd, obj[2].handle, 0, batch, sizeof(batch));
117 gem_execbuf(fd, &execbuf);
118 gem_close(fd, obj[2].handle);
119}
120
121static uint32_t *make_busy(int fd, uint32_t target, unsigned ring)
122{
123 const int gen = intel_gen(intel_get_drm_devid(fd));
124 struct drm_i915_gem_exec_object2 obj[2];
125 struct drm_i915_gem_relocation_entry reloc[2];
126 struct drm_i915_gem_execbuffer2 execbuf;
127 uint32_t *batch;
128 int i;
129
130 memset(&execbuf, 0, sizeof(execbuf));
Chris Wilson4de67b22017-01-02 11:05:21 +0000131 execbuf.buffers_ptr = to_user_pointer(obj + !target);
Chris Wilson721d8742016-10-27 11:32:47 +0100132 execbuf.buffer_count = 1 + !!target;
133
134 memset(obj, 0, sizeof(obj));
135 obj[0].handle = target;
136 obj[1].handle = gem_create(fd, 4096);
137 batch = gem_mmap__wc(fd, obj[1].handle, 0, 4096, PROT_WRITE);
138 gem_set_domain(fd, obj[1].handle,
139 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
140
Chris Wilson4de67b22017-01-02 11:05:21 +0000141 obj[1].relocs_ptr = to_user_pointer(reloc);
Chris Wilson721d8742016-10-27 11:32:47 +0100142 obj[1].relocation_count = 1 + !!target;
143 memset(reloc, 0, sizeof(reloc));
144
145 reloc[0].target_handle = obj[1].handle; /* recurse */
146 reloc[0].presumed_offset = 0;
147 reloc[0].offset = sizeof(uint32_t);
148 reloc[0].delta = 0;
149 reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;
150 reloc[0].write_domain = 0;
151
152 reloc[1].target_handle = target;
153 reloc[1].presumed_offset = 0;
154 reloc[1].offset = 1024;
155 reloc[1].delta = 0;
156 reloc[1].read_domains = I915_GEM_DOMAIN_COMMAND;
157 reloc[1].write_domain = 0;
158
159 i = 0;
160 batch[i] = MI_BATCH_BUFFER_START;
161 if (gen >= 8) {
162 batch[i] |= 1 << 8 | 1;
163 batch[++i] = 0;
164 batch[++i] = 0;
165 } else if (gen >= 6) {
166 batch[i] |= 1 << 8;
167 batch[++i] = 0;
168 } else {
169 batch[i] |= 2 << 6;
170 batch[++i] = 0;
171 if (gen < 4) {
172 batch[i] |= 1;
173 reloc[0].delta = 1;
174 }
175 }
176 i++;
177
178 if (ring != -1) {
179 execbuf.flags = ring;
180 for (int n = 0; n < BUSY_QLEN; n++)
181 gem_execbuf(fd, &execbuf);
182 } else {
183 for_each_engine(fd, ring) {
184 if (ring == 0)
185 continue;
186
187 execbuf.flags = ring;
188 for (int n = 0; n < BUSY_QLEN; n++)
189 gem_execbuf(fd, &execbuf);
190 igt_assert(execbuf.flags == ring);
191 }
192 }
193
194 if (target) {
195 execbuf.flags = 0;
196 reloc[1].write_domain = I915_GEM_DOMAIN_COMMAND;
197 gem_execbuf(fd, &execbuf);
198 }
199
200 gem_close(fd, obj[1].handle);
201
202 return batch;
203}
204
205static void finish_busy(uint32_t *busy)
206{
207 *busy = MI_BATCH_BUFFER_END;
208 munmap(busy, 4096);
209}
210
211struct cork {
212 int device;
213 uint32_t handle;
214 uint32_t fence;
215};
216
217static void plug(int fd, struct cork *c)
218{
219 struct vgem_bo bo;
220 int dmabuf;
221
222 c->device = drm_open_driver(DRIVER_VGEM);
223
224 bo.width = bo.height = 1;
225 bo.bpp = 4;
226 vgem_create(c->device, &bo);
227 c->fence = vgem_fence_attach(c->device, &bo, VGEM_FENCE_WRITE);
228
229 dmabuf = prime_handle_to_fd(c->device, bo.handle);
230 c->handle = prime_fd_to_handle(fd, dmabuf);
231 close(dmabuf);
232}
233
234static void unplug(struct cork *c)
235{
236 vgem_fence_signal(c->device, c->fence);
237 close(c->device);
238}
239
240static void fifo(int fd, unsigned ring)
241{
242 struct cork cork;
243 uint32_t *busy;
244 uint32_t scratch;
245 uint32_t *ptr;
246
247 scratch = gem_create(fd, 4096);
248
249 busy = make_busy(fd, scratch, ring);
250 plug(fd, &cork);
251
252 /* Same priority, same timeline, final result will be the second eb */
253 store_dword(fd, 0, ring, scratch, 0, 1, cork.handle, 0);
254 store_dword(fd, 0, ring, scratch, 0, 2, cork.handle, 0);
255
256 unplug(&cork); /* only now submit our batches */
257 igt_debugfs_dump(fd, "i915_engine_info");
258 finish_busy(busy);
259
260 ptr = gem_mmap__gtt(fd, scratch, 4096, PROT_READ);
261 gem_set_domain(fd, scratch, /* no write hazard lies! */
262 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
263 gem_close(fd, scratch);
264
265 igt_assert_eq_u32(ptr[0], 2);
266 munmap(ptr, 4096);
267}
268
269static void reorder(int fd, unsigned ring, unsigned flags)
270#define EQUAL 1
271{
272 struct cork cork;
273 uint32_t scratch;
274 uint32_t *busy;
275 uint32_t *ptr;
276 uint32_t ctx[2];
277
278 ctx[LO] = gem_context_create(fd);
279 ctx_set_priority(fd, ctx[LO], -MAX_PRIO);
280
281 ctx[HI] = gem_context_create(fd);
282 ctx_set_priority(fd, ctx[HI], flags & EQUAL ? -MAX_PRIO : 0);
283
284 scratch = gem_create(fd, 4096);
285
286 busy = make_busy(fd, scratch, ring);
287 plug(fd, &cork);
288
289 /* We expect the high priority context to be executed first, and
290 * so the final result will be value from the low priority context.
291 */
292 store_dword(fd, ctx[LO], ring, scratch, 0, ctx[LO], cork.handle, 0);
293 store_dword(fd, ctx[HI], ring, scratch, 0, ctx[HI], cork.handle, 0);
294
295 unplug(&cork); /* only now submit our batches */
296 igt_debugfs_dump(fd, "i915_engine_info");
297 finish_busy(busy);
298
299 gem_context_destroy(fd, ctx[LO]);
300 gem_context_destroy(fd, ctx[HI]);
301
302 ptr = gem_mmap__gtt(fd, scratch, 4096, PROT_READ);
303 gem_set_domain(fd, scratch, /* no write hazard lies! */
304 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
305 gem_close(fd, scratch);
306
307 if (flags & EQUAL) /* equal priority, result will be fifo */
308 igt_assert_eq_u32(ptr[0], ctx[HI]);
309 else
310 igt_assert_eq_u32(ptr[0], ctx[LO]);
311 munmap(ptr, 4096);
312}
313
314static void promotion(int fd, unsigned ring)
315{
316 struct cork cork;
317 uint32_t result, dep;
318 uint32_t *busy;
319 uint32_t *ptr;
320 uint32_t ctx[3];
321
322 ctx[LO] = gem_context_create(fd);
323 ctx_set_priority(fd, ctx[LO], -MAX_PRIO);
324
325 ctx[HI] = gem_context_create(fd);
326 ctx_set_priority(fd, ctx[HI], 0);
327
328 ctx[NOISE] = gem_context_create(fd);
329 ctx_set_priority(fd, ctx[NOISE], -MAX_PRIO/2);
330
331 result = gem_create(fd, 4096);
332 dep = gem_create(fd, 4096);
333
334 busy = make_busy(fd, result, ring);
335 plug(fd, &cork);
336
337 /* Expect that HI promotes LO, so the order will be LO, HI, NOISE.
338 *
339 * fifo would be NOISE, LO, HI.
340 * strict priority would be HI, NOISE, LO
341 */
342 store_dword(fd, ctx[NOISE], ring, result, 0, ctx[NOISE], cork.handle, 0);
343 store_dword(fd, ctx[LO], ring, result, 0, ctx[LO], cork.handle, 0);
344
345 /* link LO <-> HI via a dependency on another buffer */
346 store_dword(fd, ctx[LO], ring, dep, 0, ctx[LO], 0, I915_GEM_DOMAIN_INSTRUCTION);
347 store_dword(fd, ctx[HI], ring, dep, 0, ctx[HI], 0, 0);
348
349 store_dword(fd, ctx[HI], ring, result, 0, ctx[HI], 0, 0);
350
351 unplug(&cork); /* only now submit our batches */
352 igt_debugfs_dump(fd, "i915_engine_info");
353 finish_busy(busy);
354
355 gem_context_destroy(fd, ctx[NOISE]);
356 gem_context_destroy(fd, ctx[LO]);
357 gem_context_destroy(fd, ctx[HI]);
358
359 ptr = gem_mmap__gtt(fd, dep, 4096, PROT_READ);
360 gem_set_domain(fd, dep, /* no write hazard lies! */
361 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
362 gem_close(fd, dep);
363
364 igt_assert_eq_u32(ptr[0], ctx[HI]);
365 munmap(ptr, 4096);
366
367 ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
368 gem_set_domain(fd, result, /* no write hazard lies! */
369 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
370 gem_close(fd, result);
371
372 igt_assert_eq_u32(ptr[0], ctx[NOISE]);
373 munmap(ptr, 4096);
374}
375
Chris Wilsona3801342017-07-16 16:28:41 +0100376#define NEW_CTX 0x1
377static void preempt(int fd, unsigned ring, unsigned flags)
378{
379 uint32_t result = gem_create(fd, 4096);
380 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
381 igt_spin_t *spin[16];
382 uint32_t ctx[2];
383
384 ctx[LO] = gem_context_create(fd);
385 ctx_set_priority(fd, ctx[LO], -MAX_PRIO);
386
387 ctx[HI] = gem_context_create(fd);
388 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
389
390 for (int n = 0; n < 16; n++) {
391 if (flags & NEW_CTX) {
392 gem_context_destroy(fd, ctx[LO]);
393 ctx[LO] = gem_context_create(fd);
394 ctx_set_priority(fd, ctx[LO], -MAX_PRIO);
395 }
396 spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0);
397 igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
398
399 store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER);
400
401 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
402 igt_assert_eq_u32(ptr[0], n + 1);
403 igt_assert(gem_bo_busy(fd, spin[0]->handle));
404 }
405
406 for (int n = 0; n < 16; n++)
407 igt_spin_batch_free(fd, spin[n]);
408
409 gem_context_destroy(fd, ctx[LO]);
410 gem_context_destroy(fd, ctx[HI]);
411
412 munmap(ptr, 4096);
413 gem_close(fd, result);
414}
415
416static void preempt_other(int fd, unsigned ring)
417{
418 uint32_t result = gem_create(fd, 4096);
419 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
420 igt_spin_t *spin[16];
421 unsigned int other;
422 unsigned int n, i;
423 uint32_t ctx[3];
424
425 /* On each engine, insert
426 * [NOISE] spinner,
427 * [LOW] write
428 *
429 * Then on our target engine do a [HIGH] write which should then
430 * prompt its dependent LOW writes in front of the spinner on
431 * each engine. The purpose of this test is to check that preemption
432 * can cross engines.
433 */
434
435 ctx[LO] = gem_context_create(fd);
436 ctx_set_priority(fd, ctx[LO], -MAX_PRIO);
437
438 ctx[NOISE] = gem_context_create(fd);
439
440 ctx[HI] = gem_context_create(fd);
441 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
442
443 n = 0;
444 for_each_engine(fd, other) {
445 spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
446 store_dword(fd, ctx[LO], other,
447 result, (n + 1)*sizeof(uint32_t), n + 1,
448 0, I915_GEM_DOMAIN_RENDER);
449 n++;
450 }
451 store_dword(fd, ctx[HI], ring,
452 result, (n + 1)*sizeof(uint32_t), n + 1,
453 0, I915_GEM_DOMAIN_RENDER);
454
455 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
456
457 for (i = 0; i < n; i++) {
458 igt_assert(gem_bo_busy(fd, spin[i]->handle));
459 igt_spin_batch_free(fd, spin[i]);
460 }
461
462 n++;
463 for (i = 0; i <= n; i++)
464 igt_assert_eq_u32(ptr[i], i);
465
466 gem_context_destroy(fd, ctx[LO]);
467 gem_context_destroy(fd, ctx[NOISE]);
468 gem_context_destroy(fd, ctx[HI]);
469
470 munmap(ptr, 4096);
471 gem_close(fd, result);
472}
473
474static void preempt_self(int fd, unsigned ring)
475{
476 uint32_t result = gem_create(fd, 4096);
477 uint32_t *ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
478 igt_spin_t *spin[16];
479 unsigned int other;
480 unsigned int n, i;
481 uint32_t ctx[3];
482
483 /* On each engine, insert
484 * [NOISE] spinner,
485 * [self/LOW] write
486 *
487 * Then on our target engine do a [self/HIGH] write which should then
488 * preempt its own lower priority task on any engine.
489 */
490
491 ctx[NOISE] = gem_context_create(fd);
492
493 ctx[HI] = gem_context_create(fd);
494
495 n = 0;
496 ctx_set_priority(fd, ctx[HI], -MAX_PRIO);
497 for_each_engine(fd, other) {
498 spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
499 store_dword(fd, ctx[HI], other,
500 result, (n + 1)*sizeof(uint32_t), n + 1,
501 0, I915_GEM_DOMAIN_RENDER);
502 n++;
503 }
504 ctx_set_priority(fd, ctx[HI], MAX_PRIO);
505 store_dword(fd, ctx[HI], ring,
506 result, (n + 1)*sizeof(uint32_t), n + 1,
507 0, I915_GEM_DOMAIN_RENDER);
508
509 gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
510
511 for (i = 0; i < n; i++) {
512 igt_assert(gem_bo_busy(fd, spin[i]->handle));
513 igt_spin_batch_free(fd, spin[i]);
514 }
515
516 n++;
517 for (i = 0; i <= n; i++)
518 igt_assert_eq_u32(ptr[i], i);
519
520 gem_context_destroy(fd, ctx[NOISE]);
521 gem_context_destroy(fd, ctx[HI]);
522
523 munmap(ptr, 4096);
524 gem_close(fd, result);
525}
526
Chris Wilson721d8742016-10-27 11:32:47 +0100527static void deep(int fd, unsigned ring)
528{
529#define XS 8
530 struct cork cork;
531 uint32_t result, dep[XS];
532 uint32_t *busy;
533 uint32_t *ptr;
534 uint32_t *ctx;
535
536 ctx = malloc(sizeof(*ctx)*(MAX_PRIO + 1));
537 for (int n = 0; n <= MAX_PRIO; n++) {
538 ctx[n] = gem_context_create(fd);
539 ctx_set_priority(fd, ctx[n], n);
540 }
541
542 result = gem_create(fd, 4096);
543 for (int m = 0; m < XS; m ++)
544 dep[m] = gem_create(fd, 4096);
545
546 busy = make_busy(fd, result, ring);
547 plug(fd, &cork);
548
549 /* Create a deep dependency chain, with a few branches */
550 for (int n = 0; n <= MAX_PRIO; n++)
551 for (int m = 0; m < XS; m++)
552 store_dword(fd, ctx[n], ring, dep[m], 4*n, ctx[n], cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
553
554 for (int n = 0; n <= MAX_PRIO; n++) {
555 for (int m = 0; m < XS; m++) {
556 store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], dep[m], 0);
557 store_dword(fd, ctx[n], ring, result, 4*m, ctx[n], 0, I915_GEM_DOMAIN_INSTRUCTION);
558 }
559 }
560
561 igt_assert(gem_bo_busy(fd, result));
562 unplug(&cork); /* only now submit our batches */
563 igt_debugfs_dump(fd, "i915_engine_info");
564 finish_busy(busy);
565
566 for (int n = 0; n <= MAX_PRIO; n++)
567 gem_context_destroy(fd, ctx[n]);
568
569 for (int m = 0; m < XS; m++) {
570 ptr = gem_mmap__gtt(fd, dep[m], 4096, PROT_READ);
571 gem_set_domain(fd, dep[m], /* no write hazard lies! */
572 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
573 gem_close(fd, dep[m]);
574
575 for (int n = 0; n <= MAX_PRIO; n++)
576 igt_assert_eq_u32(ptr[n], ctx[n]);
577 munmap(ptr, 4096);
578 }
579
580 ptr = gem_mmap__gtt(fd, result, 4096, PROT_READ);
581 gem_set_domain(fd, result, /* no write hazard lies! */
582 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
583 gem_close(fd, result);
584
585 for (int m = 0; m < XS; m++)
586 igt_assert_eq_u32(ptr[m], ctx[MAX_PRIO]);
587 munmap(ptr, 4096);
588
589 free(ctx);
Chris Wilsonf6920752017-04-24 13:20:04 +0100590#undef XS
591}
592
Chris Wilsona19ef052017-07-31 14:15:59 +0100593static void alarm_handler(int sig)
594{
595}
596
597static int __execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
598{
599 return ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
600}
601
602static unsigned int measure_ring_size(int fd, unsigned int ring)
603{
604 struct sigaction sa = { .sa_handler = alarm_handler };
605 struct drm_i915_gem_exec_object2 obj[2];
606 struct drm_i915_gem_execbuffer2 execbuf;
607 const uint32_t bbe = MI_BATCH_BUFFER_END;
608 unsigned int count, last;
609 struct itimerval itv;
610 struct cork c;
611
612 memset(obj, 0, sizeof(obj));
613 obj[1].handle = gem_create(fd, 4096);
614 gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
615
616 memset(&execbuf, 0, sizeof(execbuf));
617 execbuf.buffers_ptr = to_user_pointer(obj + 1);
618 execbuf.buffer_count = 1;
619 execbuf.flags = ring;
620 gem_execbuf(fd, &execbuf);
621 gem_sync(fd, obj[1].handle);
622
623 plug(fd, &c);
624 obj[0].handle = c.handle;
625
626 execbuf.buffers_ptr = to_user_pointer(obj);
627 execbuf.buffer_count = 2;
628
629 sigaction(SIGALRM, &sa, NULL);
630 itv.it_interval.tv_sec = 0;
631 itv.it_interval.tv_usec = 100;
632 itv.it_value.tv_sec = 0;
633 itv.it_value.tv_usec = 1000;
634 setitimer(ITIMER_REAL, &itv, NULL);
635
636 last = -1;
637 count = 0;
638 do {
639 if (__execbuf(fd, &execbuf) == 0) {
640 count++;
641 continue;
642 }
643
644 if (last == count)
645 break;
646
647 last = count;
648 } while (1);
649
650 memset(&itv, 0, sizeof(itv));
651 setitimer(ITIMER_REAL, &itv, NULL);
652
653 unplug(&c);
654 gem_close(fd, obj[1].handle);
655
656 return count;
657}
658
Chris Wilsonf6920752017-04-24 13:20:04 +0100659static void wide(int fd, unsigned ring)
660{
Chris Wilsonf6920752017-04-24 13:20:04 +0100661#define NCTX 4096
Chris Wilsona19ef052017-07-31 14:15:59 +0100662 struct timespec tv = {};
663 unsigned int ring_size = measure_ring_size(fd, ring);
Chris Wilsonf6920752017-04-24 13:20:04 +0100664
665 struct cork cork;
666 uint32_t result;
667 uint32_t *busy;
668 uint32_t *ptr;
669 uint32_t *ctx;
Chris Wilsona19ef052017-07-31 14:15:59 +0100670 unsigned int count;
Chris Wilsonf6920752017-04-24 13:20:04 +0100671
672 ctx = malloc(sizeof(*ctx)*NCTX);
673 for (int n = 0; n < NCTX; n++)
674 ctx[n] = gem_context_create(fd);
675
Chris Wilsonf6920752017-04-24 13:20:04 +0100676 result = gem_create(fd, 4*NCTX);
677
678 busy = make_busy(fd, result, ring);
679 plug(fd, &cork);
680
681 /* Lots of in-order requests, plugged and submitted simultaneously */
Chris Wilsona19ef052017-07-31 14:15:59 +0100682 for (count = 0;
683 igt_seconds_elapsed(&tv) < 5 && count < ring_size;
684 count++) {
685 for (int n = 0; n < NCTX; n++) {
686 store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
687 }
Chris Wilsonf6920752017-04-24 13:20:04 +0100688 }
Chris Wilsona19ef052017-07-31 14:15:59 +0100689 igt_info("Submitted %d requests over %d contexts in %.1fms\n",
690 count, NCTX, igt_nsec_elapsed(&tv) * 1e-6);
Chris Wilsonf6920752017-04-24 13:20:04 +0100691
692 igt_assert(gem_bo_busy(fd, result));
693 unplug(&cork); /* only now submit our batches */
694 igt_debugfs_dump(fd, "i915_engine_info");
695 finish_busy(busy);
696
697 for (int n = 0; n < NCTX; n++)
698 gem_context_destroy(fd, ctx[n]);
699
700 ptr = gem_mmap__gtt(fd, result, 4*NCTX, PROT_READ);
701 gem_set_domain(fd, result, /* no write hazard lies! */
702 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
703 for (int n = 0; n < NCTX; n++)
704 igt_assert_eq_u32(ptr[n], ctx[n]);
Chris Wilson61f8de72017-07-20 10:08:28 +0100705 munmap(ptr, 4*NCTX);
Chris Wilsonf6920752017-04-24 13:20:04 +0100706
Chris Wilsonf6920752017-04-24 13:20:04 +0100707 gem_close(fd, result);
708 free(ctx);
Chris Wilson61f8de72017-07-20 10:08:28 +0100709#undef NCTX
710}
711
712static void reorder_wide(int fd, unsigned ring)
713{
714 const int gen = intel_gen(intel_get_drm_devid(fd));
715 struct drm_i915_gem_relocation_entry reloc;
716 struct drm_i915_gem_exec_object2 obj[3];
717 struct drm_i915_gem_execbuffer2 execbuf;
718 struct cork cork;
719 uint32_t result, target;
720 uint32_t *busy;
721 uint32_t *r, *t;
722
723 result = gem_create(fd, 4096);
724 target = gem_create(fd, 4096);
725
726 busy = make_busy(fd, result, ring);
727 plug(fd, &cork);
728
729 t = gem_mmap__cpu(fd, target, 0, 4096, PROT_WRITE);
730 gem_set_domain(fd, target, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
731
732 memset(obj, 0, sizeof(obj));
733 obj[0].handle = cork.handle;
734 obj[1].handle = result;
735 obj[2].relocs_ptr = to_user_pointer(&reloc);
736 obj[2].relocation_count = 1;
737
738 memset(&reloc, 0, sizeof(reloc));
739 reloc.target_handle = result;
740 reloc.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
741 reloc.write_domain = 0; /* lies */
742
743 memset(&execbuf, 0, sizeof(execbuf));
744 execbuf.buffers_ptr = to_user_pointer(obj);
745 execbuf.buffer_count = 3;
746 execbuf.flags = ring;
747 if (gen < 6)
748 execbuf.flags |= I915_EXEC_SECURE;
749
750 for (int n = -MAX_PRIO, x = 1; n <= MAX_PRIO; n++, x++) {
751 uint32_t *batch;
752
753 execbuf.rsvd1 = gem_context_create(fd);
754 ctx_set_priority(fd, execbuf.rsvd1, n);
755
756 obj[2].handle = gem_create(fd, 128 * 64);
757 batch = gem_mmap__gtt(fd, obj[2].handle, 128 * 64, PROT_WRITE);
758 gem_set_domain(fd, obj[2].handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
759
760 for (int m = 0; m < 128; m++) {
761 uint64_t addr;
762 int idx = hars_petruska_f54_1_random_unsafe_max( 1024);
763 int i;
764
765 execbuf.batch_start_offset = m * 64;
766 reloc.offset = execbuf.batch_start_offset + sizeof(uint32_t);
767 reloc.delta = idx * sizeof(uint32_t);
768 addr = reloc.presumed_offset + reloc.delta;
769
770 i = execbuf.batch_start_offset / sizeof(uint32_t);
771 batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
772 if (gen >= 8) {
773 batch[++i] = addr;
774 batch[++i] = addr >> 32;
775 } else if (gen >= 4) {
776 batch[++i] = 0;
777 batch[++i] = addr;
778 reloc.offset += sizeof(uint32_t);
779 } else {
780 batch[i]--;
781 batch[++i] = addr;
782 }
783 batch[++i] = x;
784 batch[++i] = MI_BATCH_BUFFER_END;
785
786 if (!t[idx])
787 t[idx] = x;
788
789 gem_execbuf(fd, &execbuf);
790 }
791
792 munmap(batch, 128 * 64);
793 gem_close(fd, obj[2].handle);
794 gem_context_destroy(fd, execbuf.rsvd1);
795 }
796
797 igt_assert(gem_bo_busy(fd, result));
798 unplug(&cork); /* only now submit our batches */
799 igt_debugfs_dump(fd, "i915_engine_info");
800 finish_busy(busy);
801
802 r = gem_mmap__gtt(fd, result, 4096, PROT_READ);
803 gem_set_domain(fd, result, /* no write hazard lies! */
804 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
805 for (int n = 0; n < 1024; n++)
806 igt_assert_eq_u32(r[n], t[n]);
807 munmap(r, 4096);
808 munmap(t, 4096);
809
810 gem_close(fd, result);
811 gem_close(fd, target);
Chris Wilson721d8742016-10-27 11:32:47 +0100812}
813
814static bool has_scheduler(int fd)
815{
816 drm_i915_getparam_t gp;
817 int has = -1;
818
819 gp.param = LOCAL_PARAM_HAS_SCHEDULER;
820 gp.value = &has;
821 drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
822
823 return has > 0;
824}
825
826igt_main
827{
828 const struct intel_execution_engine *e;
829 int fd = -1;
830
831 igt_skip_on_simulation();
832
833 igt_fixture {
834 fd = drm_open_driver_master(DRIVER_INTEL);
Chris Wilson9518cb52017-02-22 15:24:54 +0000835 igt_require_gem(fd);
Chris Wilson721d8742016-10-27 11:32:47 +0100836 gem_require_mmap_wc(fd);
837 igt_fork_hang_detector(fd);
838 }
839
840 igt_subtest_group {
841 for (e = intel_execution_engines; e->name; e++) {
842 /* default exec-id is purely symbolic */
843 if (e->exec_id == 0)
844 continue;
845
846 igt_subtest_f("fifo-%s", e->name) {
847 gem_require_ring(fd, e->exec_id | e->flags);
Chris Wilsonbc787762017-05-18 12:11:59 +0100848 igt_require(gem_can_store_dword(fd, e->exec_id) | e->flags);
Chris Wilson721d8742016-10-27 11:32:47 +0100849 fifo(fd, e->exec_id | e->flags);
850 }
851 }
852 }
853
854 igt_subtest_group {
855 igt_fixture {
856 igt_require(has_scheduler(fd));
Chris Wilsonaf0e1c52017-02-21 18:25:58 +0000857 ctx_has_priority(fd);
Chris Wilson721d8742016-10-27 11:32:47 +0100858 }
859
860 for (e = intel_execution_engines; e->name; e++) {
861 /* default exec-id is purely symbolic */
862 if (e->exec_id == 0)
863 continue;
864
865 igt_subtest_group {
Chris Wilson073cfd72017-03-17 11:52:51 +0000866 igt_fixture {
Chris Wilson721d8742016-10-27 11:32:47 +0100867 gem_require_ring(fd, e->exec_id | e->flags);
Chris Wilsonbc787762017-05-18 12:11:59 +0100868 igt_require(gem_can_store_dword(fd, e->exec_id) | e->flags);
Chris Wilson073cfd72017-03-17 11:52:51 +0000869 }
Chris Wilson721d8742016-10-27 11:32:47 +0100870
871 igt_subtest_f("in-order-%s", e->name)
872 reorder(fd, e->exec_id | e->flags, EQUAL);
873
874 igt_subtest_f("out-order-%s", e->name)
875 reorder(fd, e->exec_id | e->flags, 0);
876
877 igt_subtest_f("promotion-%s", e->name)
878 promotion(fd, e->exec_id | e->flags);
879
Chris Wilsona3801342017-07-16 16:28:41 +0100880 igt_subtest_f("preempt-%s", e->name)
881 preempt(fd, e->exec_id | e->flags, 0);
882
883 igt_subtest_f("preempt-contexts-%s", e->name)
884 preempt(fd, e->exec_id | e->flags, NEW_CTX);
885
886 igt_subtest_f("preempt-other-%s", e->name)
887 preempt_other(fd, e->exec_id | e->flags);
888
889 igt_subtest_f("preempt-self-%s", e->name)
890 preempt_self(fd, e->exec_id | e->flags);
891
Chris Wilson721d8742016-10-27 11:32:47 +0100892 igt_subtest_f("deep-%s", e->name)
893 deep(fd, e->exec_id | e->flags);
Chris Wilsonf6920752017-04-24 13:20:04 +0100894
895 igt_subtest_f("wide-%s", e->name)
896 wide(fd, e->exec_id | e->flags);
Chris Wilson61f8de72017-07-20 10:08:28 +0100897
898 igt_subtest_f("reorder-wide-%s", e->name)
899 reorder_wide(fd, e->exec_id | e->flags);
Chris Wilson721d8742016-10-27 11:32:47 +0100900 }
901 }
902 }
903
904 igt_fixture {
905 igt_stop_hang_detector();
906 close(fd);
907 }
908}