blob: ae69965b484490cb0c5c9880facc37792d7acbd2 [file] [log] [blame]
Chris Wilsonc3440442016-06-18 00:42:19 +01001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "igt.h"
25#include "igt_vgem.h"
26
27#include <sys/poll.h>
28
29IGT_TEST_DESCRIPTION("Basic check of polling for prime/vgem fences.");
30
31static void test_read(int vgem, int i915)
32{
33 struct vgem_bo scratch;
34 uint32_t handle;
35 uint32_t *ptr;
36 int dmabuf, i;
37
38 scratch.width = 1024;
39 scratch.height = 1024;
40 scratch.bpp = 32;
41 vgem_create(vgem, &scratch);
42
43 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
44 handle = prime_fd_to_handle(i915, dmabuf);
45 close(dmabuf);
46
47 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
48 for (i = 0; i < 1024; i++)
49 ptr[1024*i] = i;
50 munmap(ptr, scratch.size);
Chris Wilson0e1f5e32016-06-20 13:27:17 +010051 gem_close(vgem, scratch.handle);
Chris Wilsonc3440442016-06-18 00:42:19 +010052
53 for (i = 0; i < 1024; i++) {
54 uint32_t tmp;
55 gem_read(i915, handle, 4096*i, &tmp, sizeof(tmp));
56 igt_assert_eq(tmp, i);
57 }
Chris Wilsonc3440442016-06-18 00:42:19 +010058 gem_close(i915, handle);
Chris Wilsonc3440442016-06-18 00:42:19 +010059}
60
Chris Wilsonec194972016-07-19 10:30:54 +010061static void test_fence_read(int i915, int vgem)
Chris Wilson8cca1102016-07-18 10:25:24 +010062{
63 struct vgem_bo scratch;
64 uint32_t handle;
65 uint32_t *ptr;
66 uint32_t fence;
67 int dmabuf, i;
Chris Wilson2cc78a92016-07-19 10:50:51 +010068 int master[2], slave[2];
69
70 igt_assert(pipe(master) == 0);
71 igt_assert(pipe(slave) == 0);
Chris Wilson8cca1102016-07-18 10:25:24 +010072
73 scratch.width = 1024;
74 scratch.height = 1024;
75 scratch.bpp = 32;
76 vgem_create(vgem, &scratch);
77
78 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
79 handle = prime_fd_to_handle(i915, dmabuf);
80 close(dmabuf);
81
Chris Wilson8cca1102016-07-18 10:25:24 +010082 igt_fork(child, 1) {
83 for (i = 0; i < 1024; i++) {
84 uint32_t tmp;
85 gem_read(i915, handle, 4096*i, &tmp, sizeof(tmp));
Chris Wilson2cc78a92016-07-19 10:50:51 +010086 igt_assert_eq(tmp, 0);
87 }
88 write(master[1], &child, sizeof(child));
89 read(slave[0], &child, sizeof(child));
90 for (i = 0; i < 1024; i++) {
91 uint32_t tmp;
92 gem_read(i915, handle, 4096*i, &tmp, sizeof(tmp));
Chris Wilson8cca1102016-07-18 10:25:24 +010093 igt_assert_eq(tmp, i);
94 }
95 gem_close(i915, handle);
96 }
97
Chris Wilson2cc78a92016-07-19 10:50:51 +010098 read(master[0], &i, sizeof(i));
99 fence = vgem_fence_attach(vgem, &scratch, VGEM_FENCE_WRITE);
100 write(slave[1], &i, sizeof(i));
Chris Wilsonec194972016-07-19 10:30:54 +0100101
Chris Wilson8cca1102016-07-18 10:25:24 +0100102 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
103 for (i = 0; i < 1024; i++)
104 ptr[1024*i] = i;
105 munmap(ptr, scratch.size);
106 vgem_fence_signal(vgem, fence);
107 gem_close(vgem, scratch.handle);
108
109 igt_waitchildren();
Chris Wilson2cc78a92016-07-19 10:50:51 +0100110 close(master[0]);
111 close(master[1]);
112 close(slave[0]);
113 close(slave[1]);
114}
115
116static void test_fence_mmap(int i915, int vgem)
117{
118 struct vgem_bo scratch;
119 uint32_t handle;
120 uint32_t *ptr;
121 uint32_t fence;
122 int dmabuf, i;
123 int master[2], slave[2];
124
125 igt_assert(pipe(master) == 0);
126 igt_assert(pipe(slave) == 0);
127
128 scratch.width = 1024;
129 scratch.height = 1024;
130 scratch.bpp = 32;
131 vgem_create(vgem, &scratch);
132
133 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
134 handle = prime_fd_to_handle(i915, dmabuf);
135 close(dmabuf);
136
137 igt_fork(child, 1) {
138 ptr = gem_mmap__gtt(i915, handle, 4096*1024, PROT_READ);
Chris Wilson2cc78a92016-07-19 10:50:51 +0100139
Chris Wilson4d034672016-07-19 10:54:54 +0100140 gem_set_domain(i915, handle, I915_GEM_DOMAIN_GTT, 0);
Chris Wilson2cc78a92016-07-19 10:50:51 +0100141 for (i = 0; i < 1024; i++)
142 igt_assert_eq(ptr[1024*i], 0);
143
144 write(master[1], &child, sizeof(child));
145 read(slave[0], &child, sizeof(child));
146
Chris Wilson4d034672016-07-19 10:54:54 +0100147 gem_set_domain(i915, handle, I915_GEM_DOMAIN_GTT, 0);
Chris Wilson2cc78a92016-07-19 10:50:51 +0100148 for (i = 0; i < 1024; i++)
149 igt_assert_eq(ptr[1024*i], i);
Chris Wilson4d034672016-07-19 10:54:54 +0100150
151 gem_close(i915, handle);
Chris Wilson2cc78a92016-07-19 10:50:51 +0100152 }
153
154 read(master[0], &i, sizeof(i));
155 fence = vgem_fence_attach(vgem, &scratch, VGEM_FENCE_WRITE);
156 write(slave[1], &i, sizeof(i));
157
158 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
159 for (i = 0; i < 1024; i++)
160 ptr[1024*i] = i;
161 munmap(ptr, scratch.size);
162 vgem_fence_signal(vgem, fence);
163 gem_close(vgem, scratch.handle);
164
165 igt_waitchildren();
166 close(master[0]);
167 close(master[1]);
168 close(slave[0]);
169 close(slave[1]);
Chris Wilson8cca1102016-07-18 10:25:24 +0100170}
171
Chris Wilsonc3440442016-06-18 00:42:19 +0100172static void test_write(int vgem, int i915)
173{
174 struct vgem_bo scratch;
175 uint32_t handle;
176 uint32_t *ptr;
177 int dmabuf, i;
178
179 scratch.width = 1024;
180 scratch.height = 1024;
181 scratch.bpp = 32;
182 vgem_create(vgem, &scratch);
183
184 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
185 handle = prime_fd_to_handle(i915, dmabuf);
186 close(dmabuf);
187
Chris Wilson0e1f5e32016-06-20 13:27:17 +0100188 ptr = vgem_mmap(vgem, &scratch, PROT_READ);
189 gem_close(vgem, scratch.handle);
190
Chris Wilsonc3440442016-06-18 00:42:19 +0100191 for (i = 0; i < 1024; i++)
192 gem_write(i915, handle, 4096*i, &i, sizeof(i));
Chris Wilson0e1f5e32016-06-20 13:27:17 +0100193 gem_close(i915, handle);
Chris Wilsonc3440442016-06-18 00:42:19 +0100194
Chris Wilsonc3440442016-06-18 00:42:19 +0100195 for (i = 0; i < 1024; i++)
196 igt_assert_eq(ptr[1024*i], i);
197 munmap(ptr, scratch.size);
Chris Wilsonc3440442016-06-18 00:42:19 +0100198}
199
200static void test_gtt(int vgem, int i915)
201{
202 struct vgem_bo scratch;
203 uint32_t handle;
Chris Wilson50200292016-06-20 20:11:37 +0100204 uint32_t *ptr, *gtt;
Chris Wilsonc3440442016-06-18 00:42:19 +0100205 int dmabuf, i;
206
207 scratch.width = 1024;
208 scratch.height = 1024;
209 scratch.bpp = 32;
210 vgem_create(vgem, &scratch);
211
212 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
213 handle = prime_fd_to_handle(i915, dmabuf);
214 close(dmabuf);
215
216 ptr = gem_mmap__gtt(i915, handle, scratch.size, PROT_WRITE);
217 for (i = 0; i < 1024; i++)
218 ptr[1024*i] = i;
219 munmap(ptr, scratch.size);
220
221 ptr = vgem_mmap(vgem, &scratch, PROT_READ | PROT_WRITE);
222 for (i = 0; i < 1024; i++) {
223 igt_assert_eq(ptr[1024*i], i);
224 ptr[1024*i] = ~i;
225 }
226 munmap(ptr, scratch.size);
227
228 ptr = gem_mmap__gtt(i915, handle, scratch.size, PROT_READ);
229 for (i = 0; i < 1024; i++)
230 igt_assert_eq(ptr[1024*i], ~i);
231 munmap(ptr, scratch.size);
232
Chris Wilson50200292016-06-20 20:11:37 +0100233 ptr = vgem_mmap(vgem, &scratch, PROT_WRITE);
234 gtt = gem_mmap__gtt(i915, handle, scratch.size, PROT_WRITE);
Chris Wilsona153f952016-06-20 21:50:07 +0100235#if defined(__x86_64__)
Chris Wilson50200292016-06-20 20:11:37 +0100236 for (i = 0; i < 1024; i++) {
237 gtt[1024*i] = i;
Chris Wilsona153f952016-06-20 21:50:07 +0100238 __builtin_ia32_sfence();
Chris Wilson50200292016-06-20 20:11:37 +0100239 igt_assert_eq(ptr[1024*i], i);
240 ptr[1024*i] = ~i;
Chris Wilsona153f952016-06-20 21:50:07 +0100241 __builtin_ia32_sfence();
Chris Wilson50200292016-06-20 20:11:37 +0100242 igt_assert_eq(gtt[1024*i], ~i);
243 }
Chris Wilsona153f952016-06-20 21:50:07 +0100244#endif
Chris Wilson50200292016-06-20 20:11:37 +0100245 munmap(gtt, scratch.size);
246 munmap(ptr, scratch.size);
247
Chris Wilsonc3440442016-06-18 00:42:19 +0100248 gem_close(i915, handle);
249 gem_close(vgem, scratch.handle);
250}
251
252static bool prime_busy(int fd, bool excl)
253{
254 struct pollfd pfd = { .fd = fd, .events = excl ? POLLOUT : POLLIN };
255 return poll(&pfd, 1, 0) == 0;
256}
257
258static void work(int i915, int dmabuf, unsigned ring, uint32_t flags)
259{
260 const int SCRATCH = 0;
261 const int BATCH = 1;
262 const int gen = intel_gen(intel_get_drm_devid(i915));
263 struct drm_i915_gem_exec_object2 obj[2];
264 struct drm_i915_gem_relocation_entry store[1024+1];
265 struct drm_i915_gem_execbuffer2 execbuf;
266 unsigned size = ALIGN(ARRAY_SIZE(store)*16 + 4, 4096);
Chris Wilson5b558c52016-06-20 23:19:18 +0100267 bool read_busy, write_busy;
Chris Wilsonc3440442016-06-18 00:42:19 +0100268 uint32_t *batch, *bbe;
269 int i, count;
270
271 memset(&execbuf, 0, sizeof(execbuf));
272 execbuf.buffers_ptr = (uintptr_t)obj;
273 execbuf.buffer_count = 2;
274 execbuf.flags = ring | flags;
275 if (gen < 6)
276 execbuf.flags |= I915_EXEC_SECURE;
277
278 memset(obj, 0, sizeof(obj));
279 obj[SCRATCH].handle = prime_fd_to_handle(i915, dmabuf);
280
281 obj[BATCH].handle = gem_create(i915, size);
282 obj[BATCH].relocs_ptr = (uintptr_t)store;
283 obj[BATCH].relocation_count = ARRAY_SIZE(store);
284 memset(store, 0, sizeof(store));
285
286 batch = gem_mmap__wc(i915, obj[BATCH].handle, 0, size, PROT_WRITE);
287 gem_set_domain(i915, obj[BATCH].handle,
288 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
289
290 i = 0;
291 for (count = 0; count < 1024; count++) {
292 store[count].target_handle = obj[SCRATCH].handle;
293 store[count].presumed_offset = -1;
294 store[count].offset = sizeof(uint32_t) * (i + 1);
295 store[count].delta = sizeof(uint32_t) * count;
296 store[count].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
297 store[count].write_domain = I915_GEM_DOMAIN_INSTRUCTION;
298 batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
299 if (gen >= 8) {
300 batch[++i] = 0;
301 batch[++i] = 0;
302 } else if (gen >= 4) {
303 batch[++i] = 0;
304 batch[++i] = 0;
305 store[count].offset += sizeof(uint32_t);
306 } else {
307 batch[i]--;
308 batch[++i] = 0;
309 }
310 batch[++i] = count;
311 i++;
312 }
313
314 bbe = &batch[i];
315 store[count].target_handle = obj[BATCH].handle; /* recurse */
316 store[count].presumed_offset = 0;
317 store[count].offset = sizeof(uint32_t) * (i + 1);
318 store[count].delta = 0;
319 store[count].read_domains = I915_GEM_DOMAIN_COMMAND;
320 store[count].write_domain = 0;
321 batch[i] = MI_BATCH_BUFFER_START;
322 if (gen >= 8) {
323 batch[i] |= 1 << 8 | 1;
324 batch[++i] = 0;
325 batch[++i] = 0;
326 } else if (gen >= 6) {
327 batch[i] |= 1 << 8;
328 batch[++i] = 0;
329 } else {
330 batch[i] |= 2 << 6;
331 batch[++i] = 0;
332 if (gen < 4) {
333 batch[i] |= 1;
334 store[count].delta = 1;
335 }
336 }
337 i++;
338 igt_assert(i < size/sizeof(*batch));
339 igt_require(__gem_execbuf(i915, &execbuf) == 0);
340 gem_close(i915, obj[BATCH].handle);
341 gem_close(i915, obj[SCRATCH].handle);
342
Chris Wilson5b558c52016-06-20 23:19:18 +0100343 write_busy = prime_busy(dmabuf, false);
344 read_busy = prime_busy(dmabuf, true);
Chris Wilsonc3440442016-06-18 00:42:19 +0100345
346 *bbe = MI_BATCH_BUFFER_END;
347 __sync_synchronize();
348 munmap(batch, size);
Chris Wilson5b558c52016-06-20 23:19:18 +0100349
350 igt_assert(read_busy && write_busy);
Chris Wilsonc3440442016-06-18 00:42:19 +0100351}
352
353static void test_busy(int i915, int vgem, unsigned ring, uint32_t flags)
354{
355 struct vgem_bo scratch;
356 struct timespec tv;
357 uint32_t *ptr;
358 int dmabuf;
359 int i;
360
361 scratch.width = 1024;
362 scratch.height = 1;
363 scratch.bpp = 32;
364 vgem_create(vgem, &scratch);
365 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
366
367 work(i915, dmabuf, ring, flags);
368
369 /* Calling busy in a loop should be enough to flush the rendering */
370 memset(&tv, 0, sizeof(tv));
371 while (prime_busy(dmabuf, false))
372 igt_assert(igt_seconds_elapsed(&tv) < 10);
373
374 ptr = vgem_mmap(vgem, &scratch, PROT_READ);
375 for (i = 0; i < 1024; i++)
376 igt_assert_eq_u32(ptr[i], i);
377 munmap(ptr, 4096);
378
379 gem_close(vgem, scratch.handle);
380 close(dmabuf);
381}
382
383static void test_wait(int i915, int vgem, unsigned ring, uint32_t flags)
384{
385 struct vgem_bo scratch;
386 struct pollfd pfd;
387 uint32_t *ptr;
388 int i;
389
390 scratch.width = 1024;
391 scratch.height = 1;
392 scratch.bpp = 32;
393 vgem_create(vgem, &scratch);
394 pfd.fd = prime_handle_to_fd(vgem, scratch.handle);
395
396 work(i915, pfd.fd, ring, flags);
397
398 pfd.events = POLLIN;
399 igt_assert_eq(poll(&pfd, 1, 10000), 1);
400
401 ptr = vgem_mmap(vgem, &scratch, PROT_READ);
402 for (i = 0; i < 1024; i++)
403 igt_assert_eq_u32(ptr[i], i);
404 munmap(ptr, 4096);
405
406 gem_close(vgem, scratch.handle);
407 close(pfd.fd);
408}
409
410static void test_sync(int i915, int vgem, unsigned ring, uint32_t flags)
411{
412 struct vgem_bo scratch;
413 uint32_t *ptr;
414 int dmabuf;
415 int i;
416
417 scratch.width = 1024;
418 scratch.height = 1;
419 scratch.bpp = 32;
420 vgem_create(vgem, &scratch);
421 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
422
Chris Wilson93256e32016-06-22 07:21:09 +0100423 ptr = mmap(NULL, scratch.size, PROT_READ, MAP_SHARED, dmabuf, 0);
424 igt_assert(ptr != MAP_FAILED);
425 gem_close(vgem, scratch.handle);
426
Chris Wilsonc3440442016-06-18 00:42:19 +0100427 work(i915, dmabuf, ring, flags);
428
429 prime_sync_start(dmabuf, false);
Chris Wilsonc3440442016-06-18 00:42:19 +0100430 for (i = 0; i < 1024; i++)
431 igt_assert_eq_u32(ptr[i], i);
Chris Wilsonc3440442016-06-18 00:42:19 +0100432
433 prime_sync_end(dmabuf, false);
Chris Wilsonc3440442016-06-18 00:42:19 +0100434 close(dmabuf);
Chris Wilson93256e32016-06-22 07:21:09 +0100435
436 munmap(ptr, scratch.size);
437}
438
439static void test_fence_wait(int i915, int vgem, unsigned ring, unsigned flags)
440{
441 struct vgem_bo scratch;
442 uint32_t fence;
443 uint32_t *ptr;
444 int dmabuf;
445
446 scratch.width = 1024;
447 scratch.height = 1;
448 scratch.bpp = 32;
449 vgem_create(vgem, &scratch);
450
451 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100452 fence = vgem_fence_attach(vgem, &scratch, VGEM_FENCE_WRITE);
Chris Wilson93256e32016-06-22 07:21:09 +0100453 igt_assert(prime_busy(dmabuf, false));
454 gem_close(vgem, scratch.handle);
455
456 ptr = mmap(NULL, scratch.size, PROT_READ, MAP_SHARED, dmabuf, 0);
457 igt_assert(ptr != MAP_FAILED);
458
459 igt_fork(child, 1)
460 work(i915, dmabuf, ring, flags);
461
462 sleep(1);
463
464 /* Check for invalidly completing the task early */
465 for (int i = 0; i < 1024; i++)
466 igt_assert_eq_u32(ptr[i], 0);
467
468 igt_assert(prime_busy(dmabuf, false));
469 vgem_fence_signal(vgem, fence);
470 igt_waitchildren();
471
472 /* But after signaling and waiting, it should be done */
473 prime_sync_start(dmabuf, false);
474 for (int i = 0; i < 1024; i++)
475 igt_assert_eq_u32(ptr[i], i);
476 prime_sync_end(dmabuf, false);
477
478 close(dmabuf);
479
480 munmap(ptr, scratch.size);
481}
482
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100483static void test_fence_hang(int i915, int vgem, unsigned flags)
Chris Wilson93256e32016-06-22 07:21:09 +0100484{
485 struct vgem_bo scratch;
486 uint32_t *ptr;
487 int dmabuf;
488 int i;
489
490 scratch.width = 1024;
491 scratch.height = 1;
492 scratch.bpp = 32;
493 vgem_create(vgem, &scratch);
494 dmabuf = prime_handle_to_fd(vgem, scratch.handle);
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100495 vgem_fence_attach(vgem, &scratch, flags | WIP_VGEM_FENCE_NOTIMEOUT);
Chris Wilson93256e32016-06-22 07:21:09 +0100496
497 ptr = mmap(NULL, scratch.size, PROT_READ, MAP_SHARED, dmabuf, 0);
498 igt_assert(ptr != MAP_FAILED);
499 gem_close(vgem, scratch.handle);
500
501 work(i915, dmabuf, I915_EXEC_DEFAULT, 0);
502
503 /* The work should have been cancelled */
504
505 prime_sync_start(dmabuf, false);
506 for (i = 0; i < 1024; i++)
507 igt_assert_eq_u32(ptr[i], 0);
508 prime_sync_end(dmabuf, false);
509 close(dmabuf);
510
511 munmap(ptr, scratch.size);
Chris Wilsonc3440442016-06-18 00:42:19 +0100512}
513
514static bool has_prime_export(int fd)
515{
516 uint64_t value;
517
518 if (drmGetCap(fd, DRM_CAP_PRIME, &value))
519 return false;
520
521 return value & DRM_PRIME_CAP_EXPORT;
522}
523
524static bool has_prime_import(int fd)
525{
526 uint64_t value;
527
528 if (drmGetCap(fd, DRM_CAP_PRIME, &value))
529 return false;
530
531 return value & DRM_PRIME_CAP_IMPORT;
532}
533
Chris Wilson93256e32016-06-22 07:21:09 +0100534static uint32_t set_fb_on_crtc(int fd, int pipe, struct vgem_bo *bo, uint32_t fb_id)
535{
536 drmModeRes *resources = drmModeGetResources(fd);
537 struct drm_mode_modeinfo *modes = malloc(4096*sizeof(*modes));
538 uint32_t encoders[32];
539
540 for (int o = 0; o < resources->count_connectors; o++) {
541 struct drm_mode_get_connector conn;
542 struct drm_mode_crtc set;
543 int e, m;
544
545 memset(&conn, 0, sizeof(conn));
546 conn.connector_id = resources->connectors[o];
547 conn.count_modes = 4096;
548 conn.modes_ptr = (uintptr_t)modes;
549 conn.count_encoders = 32;
550 conn.encoders_ptr = (uintptr_t)encoders;
551
552 drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn);
553
554 for (e = 0; e < conn.count_encoders; e++) {
555 struct drm_mode_get_encoder enc;
556
557 memset(&enc, 0, sizeof(enc));
558 enc.encoder_id = encoders[e];
559 drmIoctl(fd, DRM_IOCTL_MODE_GETENCODER, &enc);
560 if (enc.possible_crtcs & (1 << pipe))
561 break;
562 }
563 if (e == conn.count_encoders)
564 continue;
565
566 for (m = 0; m < conn.count_modes; m++) {
567 if (modes[m].hdisplay == bo->width &&
568 modes[m].vdisplay == bo->height)
569 break;
570 }
571 if (m == conn.count_modes)
572 continue;
573
574 memset(&set, 0, sizeof(set));
575 set.crtc_id = resources->crtcs[pipe];
576 set.fb_id = fb_id;
577 set.set_connectors_ptr = (uintptr_t)&conn.connector_id;
578 set.count_connectors = 1;
579 set.mode = modes[m];
580 set.mode_valid = 1;
581 if (drmIoctl(fd, DRM_IOCTL_MODE_SETCRTC, &set) == 0) {
582 drmModeFreeResources(resources);
583 return set.crtc_id;
584 }
585 }
586
587 drmModeFreeResources(resources);
588 return 0;
589}
590
591static inline uint32_t pipe_select(int pipe)
592{
593 if (pipe > 1)
594 return pipe << DRM_VBLANK_HIGH_CRTC_SHIFT;
595 else if (pipe > 0)
596 return DRM_VBLANK_SECONDARY;
597 else
598 return 0;
599}
600
601static unsigned get_vblank(int fd, int pipe, unsigned flags)
602{
603 union drm_wait_vblank vbl;
604
605 memset(&vbl, 0, sizeof(vbl));
606 vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe) | flags;
607 if (drmIoctl(fd, DRM_IOCTL_WAIT_VBLANK, &vbl))
608 return 0;
609
610 return vbl.reply.sequence;
611}
612
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100613static void test_flip(int i915, int vgem, unsigned hang)
Chris Wilson93256e32016-06-22 07:21:09 +0100614{
615 struct drm_event_vblank vbl;
616 uint32_t fb_id, crtc_id;
617 uint32_t handle, fence;
618 struct pollfd pfd;
619 struct vgem_bo bo;
620
621 bo.width = 1024;
622 bo.height = 768;
623 bo.bpp = 32;
624 vgem_create(vgem, &bo);
625
626 pfd.fd = prime_handle_to_fd(vgem, bo.handle);
627 handle = prime_fd_to_handle(i915, pfd.fd);
628 igt_assert(handle);
629 close(pfd.fd);
630
631 do_or_die(__kms_addfb(i915, handle, bo.width, bo.height, bo.pitch,
632 DRM_FORMAT_XRGB8888, I915_TILING_NONE,
633 LOCAL_DRM_MODE_FB_MODIFIERS, &fb_id));
634 igt_assert(fb_id);
635 igt_require((crtc_id = set_fb_on_crtc(i915, 0, &bo, fb_id)));
636
637 /* Schedule a flip to wait upon vgem being written */
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100638 fence = vgem_fence_attach(vgem, &bo, VGEM_FENCE_WRITE | hang);
Chris Wilson93256e32016-06-22 07:21:09 +0100639 do_or_die(drmModePageFlip(i915, crtc_id, fb_id,
640 DRM_MODE_PAGE_FLIP_EVENT, &fb_id));
641
642 /* Check we don't flip before the fence is ready */
643 pfd.fd = i915;
644 pfd.events = POLLIN;
645 for (int n = 0; n < 5; n++) {
646 igt_assert_eq(poll(&pfd, 1, 0), 0);
647 get_vblank(i915, 0, DRM_VBLANK_NEXTONMISS);
648 }
649
650 /* And then the flip is completed as soon as it is ready */
651 if (!hang) {
652 vgem_fence_signal(vgem, fence);
653 get_vblank(i915, 0, DRM_VBLANK_NEXTONMISS);
654 igt_assert_eq(poll(&pfd, 1, 0), 1);
655 }
656 /* Even if hung, the flip must complete *eventually* */
657 igt_set_timeout(20, "Ignored hang"); /* XXX lower fail threshold? */
658 igt_assert_eq(read(i915, &vbl, sizeof(vbl)), sizeof(vbl));
659 igt_reset_timeout();
660
661 do_or_die(drmModeRmFB(i915, fb_id));
662 gem_close(i915, handle);
663 gem_close(vgem, bo.handle);
664}
665
Chris Wilsonc3440442016-06-18 00:42:19 +0100666igt_main
667{
668 const struct intel_execution_engine *e;
669 int i915 = -1;
670 int vgem = -1;
671 int gen = 0;
672
673 igt_skip_on_simulation();
674
675 igt_fixture {
676 vgem = drm_open_driver(DRIVER_VGEM);
677 igt_require(has_prime_export(vgem));
678
679 i915 = drm_open_driver_master(DRIVER_INTEL);
680 igt_require(has_prime_import(i915));
681 gem_require_mmap_wc(i915);
682 gen = intel_gen(intel_get_drm_devid(i915));
683 }
684
685 igt_subtest("basic-read")
686 test_read(vgem, i915);
687
688 igt_subtest("basic-write")
689 test_write(vgem, i915);
690
691 igt_subtest("basic-gtt")
692 test_gtt(vgem, i915);
693
694 for (e = intel_execution_engines; e->name; e++) {
695 igt_subtest_f("%ssync-%s",
696 e->exec_id == 0 ? "basic-" : "",
697 e->name) {
698 gem_require_ring(i915, e->exec_id | e->flags);
699 igt_skip_on_f(gen == 6 &&
700 e->exec_id == I915_EXEC_BSD,
701 "MI_STORE_DATA broken on gen6 bsd\n");
702 gem_quiescent_gpu(i915);
703 test_sync(i915, vgem, e->exec_id, e->flags);
704 }
705 }
706
707 for (e = intel_execution_engines; e->name; e++) {
708 igt_subtest_f("%sbusy-%s",
709 e->exec_id == 0 ? "basic-" : "",
710 e->name) {
711 gem_require_ring(i915, e->exec_id | e->flags);
712 igt_skip_on_f(gen == 6 &&
713 e->exec_id == I915_EXEC_BSD,
714 "MI_STORE_DATA broken on gen6 bsd\n");
715 gem_quiescent_gpu(i915);
716 test_busy(i915, vgem, e->exec_id, e->flags);
717 }
718 }
719
720 for (e = intel_execution_engines; e->name; e++) {
721 igt_subtest_f("%swait-%s",
722 e->exec_id == 0 ? "basic-" : "",
723 e->name) {
724 gem_require_ring(i915, e->exec_id | e->flags);
725 igt_skip_on_f(gen == 6 &&
726 e->exec_id == I915_EXEC_BSD,
727 "MI_STORE_DATA broken on gen6 bsd\n");
728 gem_quiescent_gpu(i915);
729 test_wait(i915, vgem, e->exec_id, e->flags);
730 }
731 }
732
Chris Wilson93256e32016-06-22 07:21:09 +0100733 /* Fence testing */
734 igt_subtest_group {
735 igt_fixture {
736 igt_require(vgem_has_fences(vgem));
737 }
738
Chris Wilson8cca1102016-07-18 10:25:24 +0100739 igt_subtest("basic-fence-read")
740 test_fence_read(i915, vgem);
Chris Wilson2cc78a92016-07-19 10:50:51 +0100741 igt_subtest("basic-fence-mmap")
742 test_fence_mmap(i915, vgem);
Chris Wilson8cca1102016-07-18 10:25:24 +0100743
Chris Wilson93256e32016-06-22 07:21:09 +0100744 for (e = intel_execution_engines; e->name; e++) {
745 igt_subtest_f("%sfence-wait-%s",
746 e->exec_id == 0 ? "basic-" : "",
747 e->name) {
748 gem_require_ring(i915, e->exec_id | e->flags);
749 igt_skip_on_f(gen == 6 &&
750 e->exec_id == I915_EXEC_BSD,
751 "MI_STORE_DATA broken on gen6 bsd\n");
752 gem_quiescent_gpu(i915);
753 test_fence_wait(i915, vgem, e->exec_id, e->flags);
754 }
755 }
756
Chris Wilson93256e32016-06-22 07:21:09 +0100757 igt_subtest("basic-fence-flip")
Chris Wilsonc8ab5772016-07-15 09:01:59 +0100758 test_flip(i915, vgem, 0);
759
760 igt_subtest_group {
761 igt_fixture {
762 igt_require(vgem_fence_has_flag(vgem, WIP_VGEM_FENCE_NOTIMEOUT));
763 }
764
765 igt_subtest("fence-read-hang")
766 test_fence_hang(i915, vgem, 0);
767 igt_subtest("fence-write-hang")
768 test_fence_hang(i915, vgem, VGEM_FENCE_WRITE);
769
770 igt_subtest("fence-flip-hang")
771 test_flip(i915, vgem, WIP_VGEM_FENCE_NOTIMEOUT);
772 }
Chris Wilson93256e32016-06-22 07:21:09 +0100773 }
774
Chris Wilsonc3440442016-06-18 00:42:19 +0100775 igt_fixture {
776 close(i915);
777 close(vgem);
778 }
779}