blob: 5bd13cda9dd11f5f387b5861d866830c65c9a5c4 [file] [log] [blame]
Chris Wilsonc3440442016-06-18 00:42:19 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "igt.h"
25#include "igt_vgem.h"
26
27#include <sys/poll.h>
28
29IGT_TEST_DESCRIPTION("Basic check of polling for prime/vgem fences.");
30
31static void test_read(int vgem, int i915)
32{
33 struct vgem_bo scratch;
34 uint32_t handle;
35 uint32_t *ptr;
36 int dmabuf, i;
37
38 scratch.width = 1024;
39 scratch.height = 1024;
40 scratch.bpp = 32;
41 vgem_create(vgem, &scratch);
42
43 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
44 handle = prime_fd_to_handle(i915, dmabuf);
45 close(dmabuf);
46
47 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
48 for (i = 0; i < 1024; i++)
49 ptr[1024*i] = i;
50 munmap(ptr, scratch.size);
Chris Wilson0e1f5e32016-06-20 13:27:17 +010051 gem_close(vgem, scratch.handle);
Chris Wilsonc3440442016-06-18 00:42:19 +010052
53 for (i = 0; i < 1024; i++) {
54 uint32_t tmp;
55 gem_read(i915, handle, 4096*i, &tmp, sizeof(tmp));
56 igt_assert_eq(tmp, i);
57 }
Chris Wilsonc3440442016-06-18 00:42:19 +010058 gem_close(i915, handle);
Chris Wilsonc3440442016-06-18 00:42:19 +010059}
60
Chris Wilson8cca1102016-07-18 10:25:24 +010061static void test_fence_read(int vgem, int i915)
62{
63 struct vgem_bo scratch;
64 uint32_t handle;
65 uint32_t *ptr;
66 uint32_t fence;
67 int dmabuf, i;
68
69 scratch.width = 1024;
70 scratch.height = 1024;
71 scratch.bpp = 32;
72 vgem_create(vgem, &scratch);
73
74 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
75 handle = prime_fd_to_handle(i915, dmabuf);
76 close(dmabuf);
77
78 fence = vgem_fence_attach(vgem, &scratch, VGEM_FENCE_WRITE);
79
80 igt_fork(child, 1) {
81 for (i = 0; i < 1024; i++) {
82 uint32_t tmp;
83 gem_read(i915, handle, 4096*i, &tmp, sizeof(tmp));
84 igt_assert_eq(tmp, i);
85 }
86 gem_close(i915, handle);
87 }
88
89 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
90 for (i = 0; i < 1024; i++)
91 ptr[1024*i] = i;
92 munmap(ptr, scratch.size);
93 vgem_fence_signal(vgem, fence);
94 gem_close(vgem, scratch.handle);
95
96 igt_waitchildren();
97}
98
Chris Wilsonc3440442016-06-18 00:42:19 +010099static void test_write(int vgem, int i915)
100{
101 struct vgem_bo scratch;
102 uint32_t handle;
103 uint32_t *ptr;
104 int dmabuf, i;
105
106 scratch.width = 1024;
107 scratch.height = 1024;
108 scratch.bpp = 32;
109 vgem_create(vgem, &scratch);
110
111 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
112 handle = prime_fd_to_handle(i915, dmabuf);
113 close(dmabuf);
114
Chris Wilson0e1f5e32016-06-20 13:27:17 +0100115 ptr = vgem_mmap(vgem, &scratch, PROT_READ);
116 gem_close(vgem, scratch.handle);
117
Chris Wilsonc3440442016-06-18 00:42:19 +0100118 for (i = 0; i < 1024; i++)
119 gem_write(i915, handle, 4096*i, &i, sizeof(i));
Chris Wilson0e1f5e32016-06-20 13:27:17 +0100120 gem_close(i915, handle);
Chris Wilsonc3440442016-06-18 00:42:19 +0100121
Chris Wilsonc3440442016-06-18 00:42:19 +0100122 for (i = 0; i < 1024; i++)
123 igt_assert_eq(ptr[1024*i], i);
124 munmap(ptr, scratch.size);
Chris Wilsonc3440442016-06-18 00:42:19 +0100125}
126
127static void test_gtt(int vgem, int i915)
128{
129 struct vgem_bo scratch;
130 uint32_t handle;
Chris Wilson50200292016-06-20 20:11:37 +0100131 uint32_t *ptr, *gtt;
Chris Wilsonc3440442016-06-18 00:42:19 +0100132 int dmabuf, i;
133
134 scratch.width = 1024;
135 scratch.height = 1024;
136 scratch.bpp = 32;
137 vgem_create(vgem, &scratch);
138
139 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
140 handle = prime_fd_to_handle(i915, dmabuf);
141 close(dmabuf);
142
143 ptr = gem_mmap__gtt(i915, handle, scratch.size, PROT_WRITE);
144 for (i = 0; i < 1024; i++)
145 ptr[1024*i] = i;
146 munmap(ptr, scratch.size);
147
148 ptr = vgem_mmap(vgem, &scratch, PROT_READ | PROT_WRITE);
149 for (i = 0; i < 1024; i++) {
150 igt_assert_eq(ptr[1024*i], i);
151 ptr[1024*i] = ~i;
152 }
153 munmap(ptr, scratch.size);
154
155 ptr = gem_mmap__gtt(i915, handle, scratch.size, PROT_READ);
156 for (i = 0; i < 1024; i++)
157 igt_assert_eq(ptr[1024*i], ~i);
158 munmap(ptr, scratch.size);
159
Chris Wilson50200292016-06-20 20:11:37 +0100160 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
161 gtt = gem_mmap__gtt(i915, handle, scratch.size, PROT_WRITE);
Chris Wilsona153f952016-06-20 21:50:07 +0100162#if defined(__x86_64__)
Chris Wilson50200292016-06-20 20:11:37 +0100163 for (i = 0; i < 1024; i++) {
164 gtt[1024*i] = i;
Chris Wilsona153f952016-06-20 21:50:07 +0100165 __builtin_ia32_sfence();
Chris Wilson50200292016-06-20 20:11:37 +0100166 igt_assert_eq(ptr[1024*i], i);
167 ptr[1024*i] = ~i;
Chris Wilsona153f952016-06-20 21:50:07 +0100168 __builtin_ia32_sfence();
Chris Wilson50200292016-06-20 20:11:37 +0100169 igt_assert_eq(gtt[1024*i], ~i);
170 }
Chris Wilsona153f952016-06-20 21:50:07 +0100171#endif
Chris Wilson50200292016-06-20 20:11:37 +0100172 munmap(gtt, scratch.size);
173 munmap(ptr, scratch.size);
174
Chris Wilsonc3440442016-06-18 00:42:19 +0100175 gem_close(i915, handle);
176 gem_close(vgem, scratch.handle);
177}
178
179static bool prime_busy(int fd, bool excl)
180{
181 struct pollfd pfd = { .fd = fd, .events = excl ? POLLOUT : POLLIN };
182 return poll(&pfd, 1, 0) == 0;
183}
184
185static void work(int i915, int dmabuf, unsigned ring, uint32_t flags)
186{
187 const int SCRATCH = 0;
188 const int BATCH = 1;
189 const int gen = intel_gen(intel_get_drm_devid(i915));
190 struct drm_i915_gem_exec_object2 obj[2];
191 struct drm_i915_gem_relocation_entry store[1024+1];
192 struct drm_i915_gem_execbuffer2 execbuf;
193 unsigned size = ALIGN(ARRAY_SIZE(store)*16 + 4, 4096);
Chris Wilson5b558c52016-06-20 23:19:18 +0100194 bool read_busy, write_busy;
Chris Wilsonc3440442016-06-18 00:42:19 +0100195 uint32_t *batch, *bbe;
196 int i, count;
197
198 memset(&execbuf, 0, sizeof(execbuf));
199 execbuf.buffers_ptr = (uintptr_t)obj;
200 execbuf.buffer_count = 2;
201 execbuf.flags = ring | flags;
202 if (gen < 6)
203 execbuf.flags |= I915_EXEC_SECURE;
204
205 memset(obj, 0, sizeof(obj));
206 obj[SCRATCH].handle = prime_fd_to_handle(i915, dmabuf);
207
208 obj[BATCH].handle = gem_create(i915, size);
209 obj[BATCH].relocs_ptr = (uintptr_t)store;
210 obj[BATCH].relocation_count = ARRAY_SIZE(store);
211 memset(store, 0, sizeof(store));
212
213 batch = gem_mmap__wc(i915, obj[BATCH].handle, 0, size, PROT_WRITE);
214 gem_set_domain(i915, obj[BATCH].handle,
215 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
216
217 i = 0;
218 for (count = 0; count < 1024; count++) {
219 store[count].target_handle = obj[SCRATCH].handle;
220 store[count].presumed_offset = -1;
221 store[count].offset = sizeof(uint32_t) * (i + 1);
222 store[count].delta = sizeof(uint32_t) * count;
223 store[count].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
224 store[count].write_domain = I915_GEM_DOMAIN_INSTRUCTION;
225 batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
226 if (gen >= 8) {
227 batch[++i] = 0;
228 batch[++i] = 0;
229 } else if (gen >= 4) {
230 batch[++i] = 0;
231 batch[++i] = 0;
232 store[count].offset += sizeof(uint32_t);
233 } else {
234 batch[i]--;
235 batch[++i] = 0;
236 }
237 batch[++i] = count;
238 i++;
239 }
240
241 bbe = &batch[i];
242 store[count].target_handle = obj[BATCH].handle; /* recurse */
243 store[count].presumed_offset = 0;
244 store[count].offset = sizeof(uint32_t) * (i + 1);
245 store[count].delta = 0;
246 store[count].read_domains = I915_GEM_DOMAIN_COMMAND;
247 store[count].write_domain = 0;
248 batch[i] = MI_BATCH_BUFFER_START;
249 if (gen >= 8) {
250 batch[i] |= 1 << 8 | 1;
251 batch[++i] = 0;
252 batch[++i] = 0;
253 } else if (gen >= 6) {
254 batch[i] |= 1 << 8;
255 batch[++i] = 0;
256 } else {
257 batch[i] |= 2 << 6;
258 batch[++i] = 0;
259 if (gen < 4) {
260 batch[i] |= 1;
261 store[count].delta = 1;
262 }
263 }
264 i++;
265 igt_assert(i < size/sizeof(*batch));
266 igt_require(__gem_execbuf(i915, &execbuf) == 0);
267 gem_close(i915, obj[BATCH].handle);
268 gem_close(i915, obj[SCRATCH].handle);
269
Chris Wilson5b558c52016-06-20 23:19:18 +0100270 write_busy = prime_busy(dmabuf, false);
271 read_busy = prime_busy(dmabuf, true);
Chris Wilsonc3440442016-06-18 00:42:19 +0100272
273 *bbe = MI_BATCH_BUFFER_END;
274 __sync_synchronize();
275 munmap(batch, size);
Chris Wilson5b558c52016-06-20 23:19:18 +0100276
277 igt_assert(read_busy && write_busy);
Chris Wilsonc3440442016-06-18 00:42:19 +0100278}
279
280static void test_busy(int i915, int vgem, unsigned ring, uint32_t flags)
281{
282 struct vgem_bo scratch;
283 struct timespec tv;
284 uint32_t *ptr;
285 int dmabuf;
286 int i;
287
288 scratch.width = 1024;
289 scratch.height = 1;
290 scratch.bpp = 32;
291 vgem_create(vgem, &scratch);
292 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
293
294 work(i915, dmabuf, ring, flags);
295
296 /* Calling busy in a loop should be enough to flush the rendering */
297 memset(&tv, 0, sizeof(tv));
298 while (prime_busy(dmabuf, false))
299 igt_assert(igt_seconds_elapsed(&tv) < 10);
300
301 ptr = vgem_mmap(vgem, &scratch, PROT_READ);
302 for (i = 0; i < 1024; i++)
303 igt_assert_eq_u32(ptr[i], i);
304 munmap(ptr, 4096);
305
306 gem_close(vgem, scratch.handle);
307 close(dmabuf);
308}
309
310static void test_wait(int i915, int vgem, unsigned ring, uint32_t flags)
311{
312 struct vgem_bo scratch;
313 struct pollfd pfd;
314 uint32_t *ptr;
315 int i;
316
317 scratch.width = 1024;
318 scratch.height = 1;
319 scratch.bpp = 32;
320 vgem_create(vgem, &scratch);
321 pfd.fd = prime_handle_to_fd(vgem, scratch.handle);
322
323 work(i915, pfd.fd, ring, flags);
324
325 pfd.events = POLLIN;
326 igt_assert_eq(poll(&pfd, 1, 10000), 1);
327
328 ptr = vgem_mmap(vgem, &scratch, PROT_READ);
329 for (i = 0; i < 1024; i++)
330 igt_assert_eq_u32(ptr[i], i);
331 munmap(ptr, 4096);
332
333 gem_close(vgem, scratch.handle);
334 close(pfd.fd);
335}
336
337static void test_sync(int i915, int vgem, unsigned ring, uint32_t flags)
338{
339 struct vgem_bo scratch;
340 uint32_t *ptr;
341 int dmabuf;
342 int i;
343
344 scratch.width = 1024;
345 scratch.height = 1;
346 scratch.bpp = 32;
347 vgem_create(vgem, &scratch);
348 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
349
Chris Wilson93256e32016-06-22 07:21:09 +0100350 ptr = mmap(NULL, scratch.size, PROT_READ, MAP_SHARED, dmabuf, 0);
351 igt_assert(ptr != MAP_FAILED);
352 gem_close(vgem, scratch.handle);
353
Chris Wilsonc3440442016-06-18 00:42:19 +0100354 work(i915, dmabuf, ring, flags);
355
356 prime_sync_start(dmabuf, false);
Chris Wilsonc3440442016-06-18 00:42:19 +0100357 for (i = 0; i < 1024; i++)
358 igt_assert_eq_u32(ptr[i], i);
Chris Wilsonc3440442016-06-18 00:42:19 +0100359
360 prime_sync_end(dmabuf, false);
Chris Wilsonc3440442016-06-18 00:42:19 +0100361 close(dmabuf);
Chris Wilson93256e32016-06-22 07:21:09 +0100362
363 munmap(ptr, scratch.size);
364}
365
366static void test_fence_wait(int i915, int vgem, unsigned ring, unsigned flags)
367{
368 struct vgem_bo scratch;
369 uint32_t fence;
370 uint32_t *ptr;
371 int dmabuf;
372
373 scratch.width = 1024;
374 scratch.height = 1;
375 scratch.bpp = 32;
376 vgem_create(vgem, &scratch);
377
378 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100379 fence = vgem_fence_attach(vgem, &scratch, VGEM_FENCE_WRITE);
Chris Wilson93256e32016-06-22 07:21:09 +0100380 igt_assert(prime_busy(dmabuf, false));
381 gem_close(vgem, scratch.handle);
382
383 ptr = mmap(NULL, scratch.size, PROT_READ, MAP_SHARED, dmabuf, 0);
384 igt_assert(ptr != MAP_FAILED);
385
386 igt_fork(child, 1)
387 work(i915, dmabuf, ring, flags);
388
389 sleep(1);
390
391 /* Check for invalidly completing the task early */
392 for (int i = 0; i < 1024; i++)
393 igt_assert_eq_u32(ptr[i], 0);
394
395 igt_assert(prime_busy(dmabuf, false));
396 vgem_fence_signal(vgem, fence);
397 igt_waitchildren();
398
399 /* But after signaling and waiting, it should be done */
400 prime_sync_start(dmabuf, false);
401 for (int i = 0; i < 1024; i++)
402 igt_assert_eq_u32(ptr[i], i);
403 prime_sync_end(dmabuf, false);
404
405 close(dmabuf);
406
407 munmap(ptr, scratch.size);
408}
409
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100410static void test_fence_hang(int i915, int vgem, unsigned flags)
Chris Wilson93256e32016-06-22 07:21:09 +0100411{
412 struct vgem_bo scratch;
413 uint32_t *ptr;
414 int dmabuf;
415 int i;
416
417 scratch.width = 1024;
418 scratch.height = 1;
419 scratch.bpp = 32;
420 vgem_create(vgem, &scratch);
421 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100422 vgem_fence_attach(vgem, &scratch, flags | WIP_VGEM_FENCE_NOTIMEOUT);
Chris Wilson93256e32016-06-22 07:21:09 +0100423
424 ptr = mmap(NULL, scratch.size, PROT_READ, MAP_SHARED, dmabuf, 0);
425 igt_assert(ptr != MAP_FAILED);
426 gem_close(vgem, scratch.handle);
427
428 work(i915, dmabuf, I915_EXEC_DEFAULT, 0);
429
430 /* The work should have been cancelled */
431
432 prime_sync_start(dmabuf, false);
433 for (i = 0; i < 1024; i++)
434 igt_assert_eq_u32(ptr[i], 0);
435 prime_sync_end(dmabuf, false);
436 close(dmabuf);
437
438 munmap(ptr, scratch.size);
Chris Wilsonc3440442016-06-18 00:42:19 +0100439}
440
441static bool has_prime_export(int fd)
442{
443 uint64_t value;
444
445 if (drmGetCap(fd, DRM_CAP_PRIME, &value))
446 return false;
447
448 return value & DRM_PRIME_CAP_EXPORT;
449}
450
451static bool has_prime_import(int fd)
452{
453 uint64_t value;
454
455 if (drmGetCap(fd, DRM_CAP_PRIME, &value))
456 return false;
457
458 return value & DRM_PRIME_CAP_IMPORT;
459}
460
Chris Wilson93256e32016-06-22 07:21:09 +0100461static uint32_t set_fb_on_crtc(int fd, int pipe, struct vgem_bo *bo, uint32_t fb_id)
462{
463 drmModeRes *resources = drmModeGetResources(fd);
464 struct drm_mode_modeinfo *modes = malloc(4096*sizeof(*modes));
465 uint32_t encoders[32];
466
467 for (int o = 0; o < resources->count_connectors; o++) {
468 struct drm_mode_get_connector conn;
469 struct drm_mode_crtc set;
470 int e, m;
471
472 memset(&conn, 0, sizeof(conn));
473 conn.connector_id = resources->connectors[o];
474 conn.count_modes = 4096;
475 conn.modes_ptr = (uintptr_t)modes;
476 conn.count_encoders = 32;
477 conn.encoders_ptr = (uintptr_t)encoders;
478
479 drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn);
480
481 for (e = 0; e < conn.count_encoders; e++) {
482 struct drm_mode_get_encoder enc;
483
484 memset(&enc, 0, sizeof(enc));
485 enc.encoder_id = encoders[e];
486 drmIoctl(fd, DRM_IOCTL_MODE_GETENCODER, &enc);
487 if (enc.possible_crtcs & (1 << pipe))
488 break;
489 }
490 if (e == conn.count_encoders)
491 continue;
492
493 for (m = 0; m < conn.count_modes; m++) {
494 if (modes[m].hdisplay == bo->width &&
495 modes[m].vdisplay == bo->height)
496 break;
497 }
498 if (m == conn.count_modes)
499 continue;
500
501 memset(&set, 0, sizeof(set));
502 set.crtc_id = resources->crtcs[pipe];
503 set.fb_id = fb_id;
504 set.set_connectors_ptr = (uintptr_t)&conn.connector_id;
505 set.count_connectors = 1;
506 set.mode = modes[m];
507 set.mode_valid = 1;
508 if (drmIoctl(fd, DRM_IOCTL_MODE_SETCRTC, &set) == 0) {
509 drmModeFreeResources(resources);
510 return set.crtc_id;
511 }
512 }
513
514 drmModeFreeResources(resources);
515 return 0;
516}
517
518static inline uint32_t pipe_select(int pipe)
519{
520 if (pipe > 1)
521 return pipe << DRM_VBLANK_HIGH_CRTC_SHIFT;
522 else if (pipe > 0)
523 return DRM_VBLANK_SECONDARY;
524 else
525 return 0;
526}
527
528static unsigned get_vblank(int fd, int pipe, unsigned flags)
529{
530 union drm_wait_vblank vbl;
531
532 memset(&vbl, 0, sizeof(vbl));
533 vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe) | flags;
534 if (drmIoctl(fd, DRM_IOCTL_WAIT_VBLANK, &vbl))
535 return 0;
536
537 return vbl.reply.sequence;
538}
539
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100540static void test_flip(int i915, int vgem, unsigned hang)
Chris Wilson93256e32016-06-22 07:21:09 +0100541{
542 struct drm_event_vblank vbl;
543 uint32_t fb_id, crtc_id;
544 uint32_t handle, fence;
545 struct pollfd pfd;
546 struct vgem_bo bo;
547
548 bo.width = 1024;
549 bo.height = 768;
550 bo.bpp = 32;
551 vgem_create(vgem, &bo);
552
553 pfd.fd = prime_handle_to_fd(vgem, bo.handle);
554 handle = prime_fd_to_handle(i915, pfd.fd);
555 igt_assert(handle);
556 close(pfd.fd);
557
558 do_or_die(__kms_addfb(i915, handle, bo.width, bo.height, bo.pitch,
559 DRM_FORMAT_XRGB8888, I915_TILING_NONE,
560 LOCAL_DRM_MODE_FB_MODIFIERS, &fb_id));
561 igt_assert(fb_id);
562 igt_require((crtc_id = set_fb_on_crtc(i915, 0, &bo, fb_id)));
563
564 /* Schedule a flip to wait upon vgem being written */
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100565 fence = vgem_fence_attach(vgem, &bo, VGEM_FENCE_WRITE | hang);
Chris Wilson93256e32016-06-22 07:21:09 +0100566 do_or_die(drmModePageFlip(i915, crtc_id, fb_id,
567 DRM_MODE_PAGE_FLIP_EVENT, &fb_id));
568
569 /* Check we don't flip before the fence is ready */
570 pfd.fd = i915;
571 pfd.events = POLLIN;
572 for (int n = 0; n < 5; n++) {
573 igt_assert_eq(poll(&pfd, 1, 0), 0);
574 get_vblank(i915, 0, DRM_VBLANK_NEXTONMISS);
575 }
576
577 /* And then the flip is completed as soon as it is ready */
578 if (!hang) {
579 vgem_fence_signal(vgem, fence);
580 get_vblank(i915, 0, DRM_VBLANK_NEXTONMISS);
581 igt_assert_eq(poll(&pfd, 1, 0), 1);
582 }
583 /* Even if hung, the flip must complete *eventually* */
584 igt_set_timeout(20, "Ignored hang"); /* XXX lower fail threshold? */
585 igt_assert_eq(read(i915, &vbl, sizeof(vbl)), sizeof(vbl));
586 igt_reset_timeout();
587
588 do_or_die(drmModeRmFB(i915, fb_id));
589 gem_close(i915, handle);
590 gem_close(vgem, bo.handle);
591}
592
Chris Wilsonc3440442016-06-18 00:42:19 +0100593igt_main
594{
595 const struct intel_execution_engine *e;
596 int i915 = -1;
597 int vgem = -1;
598 int gen = 0;
599
600 igt_skip_on_simulation();
601
602 igt_fixture {
603 vgem = drm_open_driver(DRIVER_VGEM);
604 igt_require(has_prime_export(vgem));
605
606 i915 = drm_open_driver_master(DRIVER_INTEL);
607 igt_require(has_prime_import(i915));
608 gem_require_mmap_wc(i915);
609 gen = intel_gen(intel_get_drm_devid(i915));
610 }
611
612 igt_subtest("basic-read")
613 test_read(vgem, i915);
614
615 igt_subtest("basic-write")
616 test_write(vgem, i915);
617
618 igt_subtest("basic-gtt")
619 test_gtt(vgem, i915);
620
621 for (e = intel_execution_engines; e->name; e++) {
622 igt_subtest_f("%ssync-%s",
623 e->exec_id == 0 ? "basic-" : "",
624 e->name) {
625 gem_require_ring(i915, e->exec_id | e->flags);
626 igt_skip_on_f(gen == 6 &&
627 e->exec_id == I915_EXEC_BSD,
628 "MI_STORE_DATA broken on gen6 bsd\n");
629 gem_quiescent_gpu(i915);
630 test_sync(i915, vgem, e->exec_id, e->flags);
631 }
632 }
633
634 for (e = intel_execution_engines; e->name; e++) {
635 igt_subtest_f("%sbusy-%s",
636 e->exec_id == 0 ? "basic-" : "",
637 e->name) {
638 gem_require_ring(i915, e->exec_id | e->flags);
639 igt_skip_on_f(gen == 6 &&
640 e->exec_id == I915_EXEC_BSD,
641 "MI_STORE_DATA broken on gen6 bsd\n");
642 gem_quiescent_gpu(i915);
643 test_busy(i915, vgem, e->exec_id, e->flags);
644 }
645 }
646
647 for (e = intel_execution_engines; e->name; e++) {
648 igt_subtest_f("%swait-%s",
649 e->exec_id == 0 ? "basic-" : "",
650 e->name) {
651 gem_require_ring(i915, e->exec_id | e->flags);
652 igt_skip_on_f(gen == 6 &&
653 e->exec_id == I915_EXEC_BSD,
654 "MI_STORE_DATA broken on gen6 bsd\n");
655 gem_quiescent_gpu(i915);
656 test_wait(i915, vgem, e->exec_id, e->flags);
657 }
658 }
659
Chris Wilson93256e32016-06-22 07:21:09 +0100660 /* Fence testing */
661 igt_subtest_group {
662 igt_fixture {
663 igt_require(vgem_has_fences(vgem));
664 }
665
Chris Wilson8cca1102016-07-18 10:25:24 +0100666 igt_subtest("basic-fence-read")
667 test_fence_read(i915, vgem);
668
Chris Wilson93256e32016-06-22 07:21:09 +0100669 for (e = intel_execution_engines; e->name; e++) {
670 igt_subtest_f("%sfence-wait-%s",
671 e->exec_id == 0 ? "basic-" : "",
672 e->name) {
673 gem_require_ring(i915, e->exec_id | e->flags);
674 igt_skip_on_f(gen == 6 &&
675 e->exec_id == I915_EXEC_BSD,
676 "MI_STORE_DATA broken on gen6 bsd\n");
677 gem_quiescent_gpu(i915);
678 test_fence_wait(i915, vgem, e->exec_id, e->flags);
679 }
680 }
681
Chris Wilson93256e32016-06-22 07:21:09 +0100682 igt_subtest("basic-fence-flip")
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100683 test_flip(i915, vgem, 0);
684
685 igt_subtest_group {
686 igt_fixture {
687 igt_require(vgem_fence_has_flag(vgem, WIP_VGEM_FENCE_NOTIMEOUT));
688 }
689
690 igt_subtest("fence-read-hang")
691 test_fence_hang(i915, vgem, 0);
692 igt_subtest("fence-write-hang")
693 test_fence_hang(i915, vgem, VGEM_FENCE_WRITE);
694
695 igt_subtest("fence-flip-hang")
696 test_flip(i915, vgem, WIP_VGEM_FENCE_NOTIMEOUT);
697 }
Chris Wilson93256e32016-06-22 07:21:09 +0100698 }
699
Chris Wilsonc3440442016-06-18 00:42:19 +0100700 igt_fixture {
701 close(i915);
702 close(vgem);
703 }
704}