blob: 95557ef917fe2e9df30d03e5bffc3ba4c187cbae [file] [log] [blame]
Chris Wilsonc3440442016-06-18 00:42:19 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "igt.h"
25#include "igt_vgem.h"
26
27#include <sys/poll.h>
Chris Wilson43370912016-08-23 13:17:23 +010028#include <signal.h>
29#include <time.h>
Chris Wilsonc3440442016-06-18 00:42:19 +010030
31IGT_TEST_DESCRIPTION("Basic check of polling for prime/vgem fences.");
32
33static void test_read(int vgem, int i915)
34{
35 struct vgem_bo scratch;
36 uint32_t handle;
37 uint32_t *ptr;
38 int dmabuf, i;
39
40 scratch.width = 1024;
41 scratch.height = 1024;
42 scratch.bpp = 32;
43 vgem_create(vgem, &scratch);
44
45 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
46 handle = prime_fd_to_handle(i915, dmabuf);
47 close(dmabuf);
48
49 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
50 for (i = 0; i < 1024; i++)
51 ptr[1024*i] = i;
52 munmap(ptr, scratch.size);
Chris Wilson0e1f5e32016-06-20 13:27:17 +010053 gem_close(vgem, scratch.handle);
Chris Wilsonc3440442016-06-18 00:42:19 +010054
55 for (i = 0; i < 1024; i++) {
56 uint32_t tmp;
57 gem_read(i915, handle, 4096*i, &tmp, sizeof(tmp));
58 igt_assert_eq(tmp, i);
59 }
Chris Wilsonc3440442016-06-18 00:42:19 +010060 gem_close(i915, handle);
Chris Wilsonc3440442016-06-18 00:42:19 +010061}
62
Chris Wilsonec194972016-07-19 10:30:54 +010063static void test_fence_read(int i915, int vgem)
Chris Wilson8cca1102016-07-18 10:25:24 +010064{
65 struct vgem_bo scratch;
66 uint32_t handle;
67 uint32_t *ptr;
68 uint32_t fence;
69 int dmabuf, i;
Chris Wilson2cc78a92016-07-19 10:50:51 +010070 int master[2], slave[2];
71
72 igt_assert(pipe(master) == 0);
73 igt_assert(pipe(slave) == 0);
Chris Wilson8cca1102016-07-18 10:25:24 +010074
75 scratch.width = 1024;
76 scratch.height = 1024;
77 scratch.bpp = 32;
78 vgem_create(vgem, &scratch);
79
80 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
81 handle = prime_fd_to_handle(i915, dmabuf);
82 close(dmabuf);
83
Chris Wilson8cca1102016-07-18 10:25:24 +010084 igt_fork(child, 1) {
85 for (i = 0; i < 1024; i++) {
86 uint32_t tmp;
87 gem_read(i915, handle, 4096*i, &tmp, sizeof(tmp));
Chris Wilson2cc78a92016-07-19 10:50:51 +010088 igt_assert_eq(tmp, 0);
89 }
90 write(master[1], &child, sizeof(child));
91 read(slave[0], &child, sizeof(child));
92 for (i = 0; i < 1024; i++) {
93 uint32_t tmp;
94 gem_read(i915, handle, 4096*i, &tmp, sizeof(tmp));
Chris Wilson8cca1102016-07-18 10:25:24 +010095 igt_assert_eq(tmp, i);
96 }
97 gem_close(i915, handle);
98 }
99
Chris Wilson2cc78a92016-07-19 10:50:51 +0100100 read(master[0], &i, sizeof(i));
101 fence = vgem_fence_attach(vgem, &scratch, VGEM_FENCE_WRITE);
102 write(slave[1], &i, sizeof(i));
Chris Wilsonec194972016-07-19 10:30:54 +0100103
Chris Wilson8cca1102016-07-18 10:25:24 +0100104 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
105 for (i = 0; i < 1024; i++)
106 ptr[1024*i] = i;
107 munmap(ptr, scratch.size);
108 vgem_fence_signal(vgem, fence);
109 gem_close(vgem, scratch.handle);
110
111 igt_waitchildren();
Chris Wilson2cc78a92016-07-19 10:50:51 +0100112 close(master[0]);
113 close(master[1]);
114 close(slave[0]);
115 close(slave[1]);
116}
117
118static void test_fence_mmap(int i915, int vgem)
119{
120 struct vgem_bo scratch;
121 uint32_t handle;
122 uint32_t *ptr;
123 uint32_t fence;
124 int dmabuf, i;
125 int master[2], slave[2];
126
127 igt_assert(pipe(master) == 0);
128 igt_assert(pipe(slave) == 0);
129
130 scratch.width = 1024;
131 scratch.height = 1024;
132 scratch.bpp = 32;
133 vgem_create(vgem, &scratch);
134
135 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
136 handle = prime_fd_to_handle(i915, dmabuf);
137 close(dmabuf);
138
139 igt_fork(child, 1) {
140 ptr = gem_mmap__gtt(i915, handle, 4096*1024, PROT_READ);
Chris Wilson2cc78a92016-07-19 10:50:51 +0100141
Chris Wilson4d034672016-07-19 10:54:54 +0100142 gem_set_domain(i915, handle, I915_GEM_DOMAIN_GTT, 0);
Chris Wilson2cc78a92016-07-19 10:50:51 +0100143 for (i = 0; i < 1024; i++)
144 igt_assert_eq(ptr[1024*i], 0);
145
146 write(master[1], &child, sizeof(child));
147 read(slave[0], &child, sizeof(child));
148
Chris Wilson4d034672016-07-19 10:54:54 +0100149 gem_set_domain(i915, handle, I915_GEM_DOMAIN_GTT, 0);
Chris Wilson2cc78a92016-07-19 10:50:51 +0100150 for (i = 0; i < 1024; i++)
151 igt_assert_eq(ptr[1024*i], i);
Chris Wilson4d034672016-07-19 10:54:54 +0100152
153 gem_close(i915, handle);
Chris Wilson2cc78a92016-07-19 10:50:51 +0100154 }
155
156 read(master[0], &i, sizeof(i));
157 fence = vgem_fence_attach(vgem, &scratch, VGEM_FENCE_WRITE);
158 write(slave[1], &i, sizeof(i));
159
160 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
161 for (i = 0; i < 1024; i++)
162 ptr[1024*i] = i;
163 munmap(ptr, scratch.size);
164 vgem_fence_signal(vgem, fence);
165 gem_close(vgem, scratch.handle);
166
167 igt_waitchildren();
168 close(master[0]);
169 close(master[1]);
170 close(slave[0]);
171 close(slave[1]);
Chris Wilson8cca1102016-07-18 10:25:24 +0100172}
173
Chris Wilsonc3440442016-06-18 00:42:19 +0100174static void test_write(int vgem, int i915)
175{
176 struct vgem_bo scratch;
177 uint32_t handle;
178 uint32_t *ptr;
179 int dmabuf, i;
180
181 scratch.width = 1024;
182 scratch.height = 1024;
183 scratch.bpp = 32;
184 vgem_create(vgem, &scratch);
185
186 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
187 handle = prime_fd_to_handle(i915, dmabuf);
188 close(dmabuf);
189
Chris Wilson0e1f5e32016-06-20 13:27:17 +0100190 ptr = vgem_mmap(vgem, &scratch, PROT_READ);
191 gem_close(vgem, scratch.handle);
192
Chris Wilsonc3440442016-06-18 00:42:19 +0100193 for (i = 0; i < 1024; i++)
194 gem_write(i915, handle, 4096*i, &i, sizeof(i));
Chris Wilson0e1f5e32016-06-20 13:27:17 +0100195 gem_close(i915, handle);
Chris Wilsonc3440442016-06-18 00:42:19 +0100196
Chris Wilsonc3440442016-06-18 00:42:19 +0100197 for (i = 0; i < 1024; i++)
198 igt_assert_eq(ptr[1024*i], i);
199 munmap(ptr, scratch.size);
Chris Wilsonc3440442016-06-18 00:42:19 +0100200}
201
202static void test_gtt(int vgem, int i915)
203{
204 struct vgem_bo scratch;
205 uint32_t handle;
Chris Wilson50200292016-06-20 20:11:37 +0100206 uint32_t *ptr, *gtt;
Chris Wilsonc3440442016-06-18 00:42:19 +0100207 int dmabuf, i;
208
209 scratch.width = 1024;
210 scratch.height = 1024;
211 scratch.bpp = 32;
212 vgem_create(vgem, &scratch);
213
214 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
215 handle = prime_fd_to_handle(i915, dmabuf);
216 close(dmabuf);
217
218 ptr = gem_mmap__gtt(i915, handle, scratch.size, PROT_WRITE);
219 for (i = 0; i < 1024; i++)
220 ptr[1024*i] = i;
221 munmap(ptr, scratch.size);
222
223 ptr = vgem_mmap(vgem, &scratch, PROT_READ | PROT_WRITE);
224 for (i = 0; i < 1024; i++) {
225 igt_assert_eq(ptr[1024*i], i);
226 ptr[1024*i] = ~i;
227 }
228 munmap(ptr, scratch.size);
229
230 ptr = gem_mmap__gtt(i915, handle, scratch.size, PROT_READ);
231 for (i = 0; i < 1024; i++)
232 igt_assert_eq(ptr[1024*i], ~i);
233 munmap(ptr, scratch.size);
234
Chris Wilson50200292016-06-20 20:11:37 +0100235 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
236 gtt = gem_mmap__gtt(i915, handle, scratch.size, PROT_WRITE);
Chris Wilsona153f952016-06-20 21:50:07 +0100237#if defined(__x86_64__)
Chris Wilson50200292016-06-20 20:11:37 +0100238 for (i = 0; i < 1024; i++) {
239 gtt[1024*i] = i;
Chris Wilsona153f952016-06-20 21:50:07 +0100240 __builtin_ia32_sfence();
Chris Wilson50200292016-06-20 20:11:37 +0100241 igt_assert_eq(ptr[1024*i], i);
242 ptr[1024*i] = ~i;
Chris Wilsona153f952016-06-20 21:50:07 +0100243 __builtin_ia32_sfence();
Chris Wilson50200292016-06-20 20:11:37 +0100244 igt_assert_eq(gtt[1024*i], ~i);
245 }
Chris Wilsona153f952016-06-20 21:50:07 +0100246#endif
Chris Wilson50200292016-06-20 20:11:37 +0100247 munmap(gtt, scratch.size);
248 munmap(ptr, scratch.size);
249
Chris Wilsonc3440442016-06-18 00:42:19 +0100250 gem_close(i915, handle);
251 gem_close(vgem, scratch.handle);
252}
253
254static bool prime_busy(int fd, bool excl)
255{
256 struct pollfd pfd = { .fd = fd, .events = excl ? POLLOUT : POLLIN };
257 return poll(&pfd, 1, 0) == 0;
258}
259
260static void work(int i915, int dmabuf, unsigned ring, uint32_t flags)
261{
262 const int SCRATCH = 0;
263 const int BATCH = 1;
264 const int gen = intel_gen(intel_get_drm_devid(i915));
265 struct drm_i915_gem_exec_object2 obj[2];
266 struct drm_i915_gem_relocation_entry store[1024+1];
267 struct drm_i915_gem_execbuffer2 execbuf;
268 unsigned size = ALIGN(ARRAY_SIZE(store)*16 + 4, 4096);
Chris Wilson5b558c52016-06-20 23:19:18 +0100269 bool read_busy, write_busy;
Chris Wilsonc3440442016-06-18 00:42:19 +0100270 uint32_t *batch, *bbe;
271 int i, count;
272
273 memset(&execbuf, 0, sizeof(execbuf));
274 execbuf.buffers_ptr = (uintptr_t)obj;
275 execbuf.buffer_count = 2;
276 execbuf.flags = ring | flags;
277 if (gen < 6)
278 execbuf.flags |= I915_EXEC_SECURE;
279
280 memset(obj, 0, sizeof(obj));
281 obj[SCRATCH].handle = prime_fd_to_handle(i915, dmabuf);
282
283 obj[BATCH].handle = gem_create(i915, size);
284 obj[BATCH].relocs_ptr = (uintptr_t)store;
285 obj[BATCH].relocation_count = ARRAY_SIZE(store);
286 memset(store, 0, sizeof(store));
287
288 batch = gem_mmap__wc(i915, obj[BATCH].handle, 0, size, PROT_WRITE);
289 gem_set_domain(i915, obj[BATCH].handle,
290 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
291
292 i = 0;
293 for (count = 0; count < 1024; count++) {
294 store[count].target_handle = obj[SCRATCH].handle;
295 store[count].presumed_offset = -1;
296 store[count].offset = sizeof(uint32_t) * (i + 1);
297 store[count].delta = sizeof(uint32_t) * count;
298 store[count].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
299 store[count].write_domain = I915_GEM_DOMAIN_INSTRUCTION;
300 batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
301 if (gen >= 8) {
302 batch[++i] = 0;
303 batch[++i] = 0;
304 } else if (gen >= 4) {
305 batch[++i] = 0;
306 batch[++i] = 0;
307 store[count].offset += sizeof(uint32_t);
308 } else {
309 batch[i]--;
310 batch[++i] = 0;
311 }
312 batch[++i] = count;
313 i++;
314 }
315
316 bbe = &batch[i];
317 store[count].target_handle = obj[BATCH].handle; /* recurse */
318 store[count].presumed_offset = 0;
319 store[count].offset = sizeof(uint32_t) * (i + 1);
320 store[count].delta = 0;
321 store[count].read_domains = I915_GEM_DOMAIN_COMMAND;
322 store[count].write_domain = 0;
323 batch[i] = MI_BATCH_BUFFER_START;
324 if (gen >= 8) {
325 batch[i] |= 1 << 8 | 1;
326 batch[++i] = 0;
327 batch[++i] = 0;
328 } else if (gen >= 6) {
329 batch[i] |= 1 << 8;
330 batch[++i] = 0;
331 } else {
332 batch[i] |= 2 << 6;
333 batch[++i] = 0;
334 if (gen < 4) {
335 batch[i] |= 1;
336 store[count].delta = 1;
337 }
338 }
339 i++;
340 igt_assert(i < size/sizeof(*batch));
341 igt_require(__gem_execbuf(i915, &execbuf) == 0);
342 gem_close(i915, obj[BATCH].handle);
343 gem_close(i915, obj[SCRATCH].handle);
344
Chris Wilson5b558c52016-06-20 23:19:18 +0100345 write_busy = prime_busy(dmabuf, false);
346 read_busy = prime_busy(dmabuf, true);
Chris Wilsonc3440442016-06-18 00:42:19 +0100347
348 *bbe = MI_BATCH_BUFFER_END;
349 __sync_synchronize();
350 munmap(batch, size);
Chris Wilson5b558c52016-06-20 23:19:18 +0100351
352 igt_assert(read_busy && write_busy);
Chris Wilsonc3440442016-06-18 00:42:19 +0100353}
354
355static void test_busy(int i915, int vgem, unsigned ring, uint32_t flags)
356{
357 struct vgem_bo scratch;
358 struct timespec tv;
359 uint32_t *ptr;
360 int dmabuf;
361 int i;
362
363 scratch.width = 1024;
364 scratch.height = 1;
365 scratch.bpp = 32;
366 vgem_create(vgem, &scratch);
367 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
368
369 work(i915, dmabuf, ring, flags);
370
371 /* Calling busy in a loop should be enough to flush the rendering */
372 memset(&tv, 0, sizeof(tv));
373 while (prime_busy(dmabuf, false))
374 igt_assert(igt_seconds_elapsed(&tv) < 10);
375
376 ptr = vgem_mmap(vgem, &scratch, PROT_READ);
377 for (i = 0; i < 1024; i++)
378 igt_assert_eq_u32(ptr[i], i);
379 munmap(ptr, 4096);
380
381 gem_close(vgem, scratch.handle);
382 close(dmabuf);
383}
384
385static void test_wait(int i915, int vgem, unsigned ring, uint32_t flags)
386{
387 struct vgem_bo scratch;
388 struct pollfd pfd;
389 uint32_t *ptr;
390 int i;
391
392 scratch.width = 1024;
393 scratch.height = 1;
394 scratch.bpp = 32;
395 vgem_create(vgem, &scratch);
396 pfd.fd = prime_handle_to_fd(vgem, scratch.handle);
397
398 work(i915, pfd.fd, ring, flags);
399
400 pfd.events = POLLIN;
401 igt_assert_eq(poll(&pfd, 1, 10000), 1);
402
403 ptr = vgem_mmap(vgem, &scratch, PROT_READ);
404 for (i = 0; i < 1024; i++)
405 igt_assert_eq_u32(ptr[i], i);
406 munmap(ptr, 4096);
407
408 gem_close(vgem, scratch.handle);
409 close(pfd.fd);
410}
411
412static void test_sync(int i915, int vgem, unsigned ring, uint32_t flags)
413{
414 struct vgem_bo scratch;
415 uint32_t *ptr;
416 int dmabuf;
417 int i;
418
419 scratch.width = 1024;
420 scratch.height = 1;
421 scratch.bpp = 32;
422 vgem_create(vgem, &scratch);
423 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
424
Chris Wilson93256e32016-06-22 07:21:09 +0100425 ptr = mmap(NULL, scratch.size, PROT_READ, MAP_SHARED, dmabuf, 0);
426 igt_assert(ptr != MAP_FAILED);
427 gem_close(vgem, scratch.handle);
428
Chris Wilsonc3440442016-06-18 00:42:19 +0100429 work(i915, dmabuf, ring, flags);
430
431 prime_sync_start(dmabuf, false);
Chris Wilsonc3440442016-06-18 00:42:19 +0100432 for (i = 0; i < 1024; i++)
433 igt_assert_eq_u32(ptr[i], i);
Chris Wilsonc3440442016-06-18 00:42:19 +0100434
435 prime_sync_end(dmabuf, false);
Chris Wilsonc3440442016-06-18 00:42:19 +0100436 close(dmabuf);
Chris Wilson93256e32016-06-22 07:21:09 +0100437
438 munmap(ptr, scratch.size);
439}
440
441static void test_fence_wait(int i915, int vgem, unsigned ring, unsigned flags)
442{
443 struct vgem_bo scratch;
444 uint32_t fence;
445 uint32_t *ptr;
446 int dmabuf;
447
448 scratch.width = 1024;
449 scratch.height = 1;
450 scratch.bpp = 32;
451 vgem_create(vgem, &scratch);
452
453 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100454 fence = vgem_fence_attach(vgem, &scratch, VGEM_FENCE_WRITE);
Chris Wilson93256e32016-06-22 07:21:09 +0100455 igt_assert(prime_busy(dmabuf, false));
456 gem_close(vgem, scratch.handle);
457
458 ptr = mmap(NULL, scratch.size, PROT_READ, MAP_SHARED, dmabuf, 0);
459 igt_assert(ptr != MAP_FAILED);
460
461 igt_fork(child, 1)
462 work(i915, dmabuf, ring, flags);
463
464 sleep(1);
465
466 /* Check for invalidly completing the task early */
467 for (int i = 0; i < 1024; i++)
468 igt_assert_eq_u32(ptr[i], 0);
469
470 igt_assert(prime_busy(dmabuf, false));
471 vgem_fence_signal(vgem, fence);
472 igt_waitchildren();
473
474 /* But after signaling and waiting, it should be done */
475 prime_sync_start(dmabuf, false);
476 for (int i = 0; i < 1024; i++)
477 igt_assert_eq_u32(ptr[i], i);
478 prime_sync_end(dmabuf, false);
479
480 close(dmabuf);
481
482 munmap(ptr, scratch.size);
483}
484
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100485static void test_fence_hang(int i915, int vgem, unsigned flags)
Chris Wilson93256e32016-06-22 07:21:09 +0100486{
487 struct vgem_bo scratch;
488 uint32_t *ptr;
489 int dmabuf;
490 int i;
491
492 scratch.width = 1024;
493 scratch.height = 1;
494 scratch.bpp = 32;
495 vgem_create(vgem, &scratch);
496 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100497 vgem_fence_attach(vgem, &scratch, flags | WIP_VGEM_FENCE_NOTIMEOUT);
Chris Wilson93256e32016-06-22 07:21:09 +0100498
499 ptr = mmap(NULL, scratch.size, PROT_READ, MAP_SHARED, dmabuf, 0);
500 igt_assert(ptr != MAP_FAILED);
501 gem_close(vgem, scratch.handle);
502
503 work(i915, dmabuf, I915_EXEC_DEFAULT, 0);
504
505 /* The work should have been cancelled */
506
507 prime_sync_start(dmabuf, false);
508 for (i = 0; i < 1024; i++)
509 igt_assert_eq_u32(ptr[i], 0);
510 prime_sync_end(dmabuf, false);
511 close(dmabuf);
512
513 munmap(ptr, scratch.size);
Chris Wilsonc3440442016-06-18 00:42:19 +0100514}
515
516static bool has_prime_export(int fd)
517{
518 uint64_t value;
519
520 if (drmGetCap(fd, DRM_CAP_PRIME, &value))
521 return false;
522
523 return value & DRM_PRIME_CAP_EXPORT;
524}
525
526static bool has_prime_import(int fd)
527{
528 uint64_t value;
529
530 if (drmGetCap(fd, DRM_CAP_PRIME, &value))
531 return false;
532
533 return value & DRM_PRIME_CAP_IMPORT;
534}
535
Chris Wilson93256e32016-06-22 07:21:09 +0100536static uint32_t set_fb_on_crtc(int fd, int pipe, struct vgem_bo *bo, uint32_t fb_id)
537{
538 drmModeRes *resources = drmModeGetResources(fd);
539 struct drm_mode_modeinfo *modes = malloc(4096*sizeof(*modes));
540 uint32_t encoders[32];
541
542 for (int o = 0; o < resources->count_connectors; o++) {
543 struct drm_mode_get_connector conn;
544 struct drm_mode_crtc set;
545 int e, m;
546
547 memset(&conn, 0, sizeof(conn));
548 conn.connector_id = resources->connectors[o];
Chris Wilson93256e32016-06-22 07:21:09 +0100549 drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn);
Chris Wilson2e190c32016-08-23 13:08:39 +0100550 if (!conn.count_modes)
551 continue;
552
553 igt_assert(conn.count_modes <= 4096);
554 igt_assert(conn.count_encoders <= 32);
555
556 conn.modes_ptr = (uintptr_t)modes;
557 conn.encoders_ptr = (uintptr_t)encoders;
558 conn.count_props = 0;
559 do_or_die(drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn));
Chris Wilson93256e32016-06-22 07:21:09 +0100560
561 for (e = 0; e < conn.count_encoders; e++) {
562 struct drm_mode_get_encoder enc;
563
564 memset(&enc, 0, sizeof(enc));
565 enc.encoder_id = encoders[e];
566 drmIoctl(fd, DRM_IOCTL_MODE_GETENCODER, &enc);
567 if (enc.possible_crtcs & (1 << pipe))
568 break;
569 }
570 if (e == conn.count_encoders)
571 continue;
572
573 for (m = 0; m < conn.count_modes; m++) {
Chris Wilson2e190c32016-08-23 13:08:39 +0100574 if (modes[m].hdisplay <= bo->width &&
575 modes[m].vdisplay <= bo->height)
Chris Wilson93256e32016-06-22 07:21:09 +0100576 break;
577 }
578 if (m == conn.count_modes)
579 continue;
580
581 memset(&set, 0, sizeof(set));
582 set.crtc_id = resources->crtcs[pipe];
583 set.fb_id = fb_id;
584 set.set_connectors_ptr = (uintptr_t)&conn.connector_id;
585 set.count_connectors = 1;
586 set.mode = modes[m];
587 set.mode_valid = 1;
588 if (drmIoctl(fd, DRM_IOCTL_MODE_SETCRTC, &set) == 0) {
589 drmModeFreeResources(resources);
590 return set.crtc_id;
591 }
592 }
593
594 drmModeFreeResources(resources);
595 return 0;
596}
597
598static inline uint32_t pipe_select(int pipe)
599{
600 if (pipe > 1)
601 return pipe << DRM_VBLANK_HIGH_CRTC_SHIFT;
602 else if (pipe > 0)
603 return DRM_VBLANK_SECONDARY;
604 else
605 return 0;
606}
607
608static unsigned get_vblank(int fd, int pipe, unsigned flags)
609{
610 union drm_wait_vblank vbl;
611
612 memset(&vbl, 0, sizeof(vbl));
613 vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe) | flags;
614 if (drmIoctl(fd, DRM_IOCTL_WAIT_VBLANK, &vbl))
615 return 0;
616
617 return vbl.reply.sequence;
618}
619
Chris Wilson43370912016-08-23 13:17:23 +0100620static void sighandler(int sig)
621{
622}
623
Chris Wilsona6886342016-08-23 14:03:01 +0100624static void flip_to_vgem(int i915, int vgem,
625 struct vgem_bo *bo,
626 uint32_t fb_id,
627 uint32_t crtc_id,
628 unsigned hang,
629 const char *name)
Chris Wilson93256e32016-06-22 07:21:09 +0100630{
Chris Wilson652ad3e2016-09-01 13:57:31 +0100631 const struct timespec tv = { 1, 0 };
Chris Wilson8ad24ee2016-09-21 12:48:14 +0100632 struct pollfd pfd = { i915, POLLIN };
Chris Wilsona6886342016-08-23 14:03:01 +0100633 struct drm_event_vblank vbl;
634 uint32_t fence;
Chris Wilson93256e32016-06-22 07:21:09 +0100635
Chris Wilsondb97b692016-08-30 14:35:36 +0100636 fence = vgem_fence_attach(vgem, bo, VGEM_FENCE_WRITE | hang);
637
Chris Wilson43370912016-08-23 13:17:23 +0100638 igt_fork(child, 1) {
Chris Wilson43370912016-08-23 13:17:23 +0100639 do_or_die(drmModePageFlip(i915, crtc_id, fb_id,
640 DRM_MODE_PAGE_FLIP_EVENT, &fb_id));
Chris Wilsondb97b692016-08-30 14:35:36 +0100641 kill(getppid(), SIGHUP);
Chris Wilson93256e32016-06-22 07:21:09 +0100642
Chris Wilson43370912016-08-23 13:17:23 +0100643 /* Check we don't flip before the fence is ready */
Chris Wilson43370912016-08-23 13:17:23 +0100644 for (int n = 0; n < 5; n++) {
Chris Wilsona6886342016-08-23 14:03:01 +0100645 igt_assert_f(poll(&pfd, 1, 0) == 0,
646 "flip to %s completed whilst busy\n",
647 name);
Chris Wilson43370912016-08-23 13:17:23 +0100648 get_vblank(i915, 0, DRM_VBLANK_NEXTONMISS);
649 }
Chris Wilson93256e32016-06-22 07:21:09 +0100650 }
651
Chris Wilson652ad3e2016-09-01 13:57:31 +0100652 igt_assert_f(nanosleep(&tv, NULL) == -1,
653 "flip to busy %s blocked\n", name);
654
Chris Wilson93256e32016-06-22 07:21:09 +0100655 /* And then the flip is completed as soon as it is ready */
656 if (!hang) {
Chris Wilson43370912016-08-23 13:17:23 +0100657 union drm_wait_vblank wait;
Chris Wilson43370912016-08-23 13:17:23 +0100658
659 memset(&wait, 0, sizeof(wait));
660 wait.request.type = DRM_VBLANK_RELATIVE | pipe_select(0);
661 wait.request.sequence = 10;
662 do_or_die(drmIoctl(i915, DRM_IOCTL_WAIT_VBLANK, &wait));
663
Chris Wilson93256e32016-06-22 07:21:09 +0100664 vgem_fence_signal(vgem, fence);
665 get_vblank(i915, 0, DRM_VBLANK_NEXTONMISS);
666 igt_assert_eq(poll(&pfd, 1, 0), 1);
667 }
668 /* Even if hung, the flip must complete *eventually* */
669 igt_set_timeout(20, "Ignored hang"); /* XXX lower fail threshold? */
670 igt_assert_eq(read(i915, &vbl, sizeof(vbl)), sizeof(vbl));
671 igt_reset_timeout();
672
Chris Wilson43370912016-08-23 13:17:23 +0100673 igt_waitchildren();
Chris Wilsona6886342016-08-23 14:03:01 +0100674}
Chris Wilson43370912016-08-23 13:17:23 +0100675
Chris Wilsona6886342016-08-23 14:03:01 +0100676static void test_flip(int i915, int vgem, unsigned hang)
677{
678 struct vgem_bo bo[2];
679 uint32_t fb_id[2], handle[2], crtc_id;
Chris Wilsona6886342016-08-23 14:03:01 +0100680
Chris Wilsondb97b692016-08-30 14:35:36 +0100681 signal(SIGHUP, sighandler);
Chris Wilsona6886342016-08-23 14:03:01 +0100682
683 for (int i = 0; i < 2; i++) {
Chris Wilsondb97b692016-08-30 14:35:36 +0100684 int fd;
685
Chris Wilsona6886342016-08-23 14:03:01 +0100686 bo[i].width = 1024;
687 bo[i].height = 768;
688 bo[i].bpp = 32;
689 vgem_create(vgem, &bo[i]);
690
691 fd = prime_handle_to_fd(vgem, bo[i].handle);
692 handle[i] = prime_fd_to_handle(i915, fd);
693 igt_assert(handle[i]);
694 close(fd);
Chris Wilsondb97b692016-08-30 14:35:36 +0100695
Chris Wilsona6886342016-08-23 14:03:01 +0100696 do_or_die(__kms_addfb(i915, handle[i],
697 bo[i].width, bo[i].height, bo[i].pitch,
698 DRM_FORMAT_XRGB8888, I915_TILING_NONE,
699 LOCAL_DRM_MODE_FB_MODIFIERS, &fb_id[i]));
700 igt_assert(fb_id[i]);
701 }
702
703 igt_require((crtc_id = set_fb_on_crtc(i915, 0, &bo[0], fb_id[0])));
704
705 /* Bind both fb for use by flipping */
706 for (int i = 1; i >= 0; i--) {
707 struct drm_event_vblank vbl;
708
709 do_or_die(drmModePageFlip(i915, crtc_id, fb_id[i],
710 DRM_MODE_PAGE_FLIP_EVENT, &fb_id[i]));
711 igt_assert_eq(read(i915, &vbl, sizeof(vbl)), sizeof(vbl));
712 }
713
714 /* Schedule a flip to wait upon the frontbuffer vgem being written */
715 flip_to_vgem(i915, vgem, &bo[0], fb_id[0], crtc_id, hang, "front");
716
717 /* Schedule a flip to wait upon the backbuffer vgem being written */
718 flip_to_vgem(i915, vgem, &bo[1], fb_id[1], crtc_id, hang, "back");
719
720 for (int i = 0; i < 2; i++) {
721 do_or_die(drmModeRmFB(i915, fb_id[i]));
722 gem_close(i915, handle[i]);
723 gem_close(vgem, bo[i].handle);
724 }
Chris Wilson43370912016-08-23 13:17:23 +0100725
Chris Wilsondb97b692016-08-30 14:35:36 +0100726 signal(SIGHUP, SIG_DFL);
Chris Wilson93256e32016-06-22 07:21:09 +0100727}
728
Chris Wilsonc3440442016-06-18 00:42:19 +0100729igt_main
730{
731 const struct intel_execution_engine *e;
732 int i915 = -1;
733 int vgem = -1;
Chris Wilsonc3440442016-06-18 00:42:19 +0100734
735 igt_skip_on_simulation();
736
737 igt_fixture {
738 vgem = drm_open_driver(DRIVER_VGEM);
739 igt_require(has_prime_export(vgem));
740
741 i915 = drm_open_driver_master(DRIVER_INTEL);
Chris Wilson9518cb52017-02-22 15:24:54 +0000742 igt_require_gem(i915);
Chris Wilsonc3440442016-06-18 00:42:19 +0100743 igt_require(has_prime_import(i915));
744 gem_require_mmap_wc(i915);
Chris Wilsonc3440442016-06-18 00:42:19 +0100745 }
746
747 igt_subtest("basic-read")
748 test_read(vgem, i915);
749
750 igt_subtest("basic-write")
751 test_write(vgem, i915);
752
753 igt_subtest("basic-gtt")
754 test_gtt(vgem, i915);
755
756 for (e = intel_execution_engines; e->name; e++) {
757 igt_subtest_f("%ssync-%s",
Chris Wilsonec91c562017-07-06 14:01:56 +0100758 e->exec_id == 0 ? "basic-" : "",
Chris Wilsonc3440442016-06-18 00:42:19 +0100759 e->name) {
760 gem_require_ring(i915, e->exec_id | e->flags);
Chris Wilsonbc787762017-05-18 12:11:59 +0100761 igt_require(gem_can_store_dword(i915, e->exec_id) | e->flags);
762
Chris Wilsonc3440442016-06-18 00:42:19 +0100763 gem_quiescent_gpu(i915);
764 test_sync(i915, vgem, e->exec_id, e->flags);
765 }
766 }
767
768 for (e = intel_execution_engines; e->name; e++) {
769 igt_subtest_f("%sbusy-%s",
Chris Wilsonec91c562017-07-06 14:01:56 +0100770 e->exec_id == 0 ? "basic-" : "",
Chris Wilsonc3440442016-06-18 00:42:19 +0100771 e->name) {
772 gem_require_ring(i915, e->exec_id | e->flags);
Chris Wilsonbc787762017-05-18 12:11:59 +0100773 igt_require(gem_can_store_dword(i915, e->exec_id) | e->flags);
774
Chris Wilsonc3440442016-06-18 00:42:19 +0100775 gem_quiescent_gpu(i915);
776 test_busy(i915, vgem, e->exec_id, e->flags);
777 }
778 }
779
780 for (e = intel_execution_engines; e->name; e++) {
781 igt_subtest_f("%swait-%s",
Chris Wilsonec91c562017-07-06 14:01:56 +0100782 e->exec_id == 0 ? "basic-" : "",
Chris Wilsonc3440442016-06-18 00:42:19 +0100783 e->name) {
784 gem_require_ring(i915, e->exec_id | e->flags);
Chris Wilsonbc787762017-05-18 12:11:59 +0100785 igt_require(gem_can_store_dword(i915, e->exec_id) | e->flags);
786
Chris Wilsonc3440442016-06-18 00:42:19 +0100787 gem_quiescent_gpu(i915);
788 test_wait(i915, vgem, e->exec_id, e->flags);
789 }
790 }
791
Chris Wilson93256e32016-06-22 07:21:09 +0100792 /* Fence testing */
793 igt_subtest_group {
794 igt_fixture {
795 igt_require(vgem_has_fences(vgem));
796 }
797
Chris Wilson8cca1102016-07-18 10:25:24 +0100798 igt_subtest("basic-fence-read")
799 test_fence_read(i915, vgem);
Chris Wilson2cc78a92016-07-19 10:50:51 +0100800 igt_subtest("basic-fence-mmap")
801 test_fence_mmap(i915, vgem);
Chris Wilson8cca1102016-07-18 10:25:24 +0100802
Chris Wilson93256e32016-06-22 07:21:09 +0100803 for (e = intel_execution_engines; e->name; e++) {
804 igt_subtest_f("%sfence-wait-%s",
Chris Wilsonec91c562017-07-06 14:01:56 +0100805 e->exec_id == 0 ? "basic-" : "",
806 e->name) {
Chris Wilson93256e32016-06-22 07:21:09 +0100807 gem_require_ring(i915, e->exec_id | e->flags);
Chris Wilsonbc787762017-05-18 12:11:59 +0100808 igt_require(gem_can_store_dword(i915, e->exec_id) | e->flags);
809
Chris Wilson93256e32016-06-22 07:21:09 +0100810 gem_quiescent_gpu(i915);
811 test_fence_wait(i915, vgem, e->exec_id, e->flags);
812 }
813 }
814
Chris Wilsoncb42b0d2016-08-22 15:59:05 +0100815 igt_subtest("basic-fence-flip")
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100816 test_flip(i915, vgem, 0);
817
818 igt_subtest_group {
819 igt_fixture {
820 igt_require(vgem_fence_has_flag(vgem, WIP_VGEM_FENCE_NOTIMEOUT));
821 }
822
823 igt_subtest("fence-read-hang")
824 test_fence_hang(i915, vgem, 0);
825 igt_subtest("fence-write-hang")
826 test_fence_hang(i915, vgem, VGEM_FENCE_WRITE);
827
828 igt_subtest("fence-flip-hang")
829 test_flip(i915, vgem, WIP_VGEM_FENCE_NOTIMEOUT);
830 }
Chris Wilson93256e32016-06-22 07:21:09 +0100831 }
832
Chris Wilsonc3440442016-06-18 00:42:19 +0100833 igt_fixture {
834 close(i915);
835 close(vgem);
836 }
837}