blob: c30212f4a4f700d2b48f51db4557a2f8ad82400d [file] [log] [blame]
Chris Wilsoneb7d60e2015-06-17 18:29:49 +01001/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25/*
26 * Testcase: Test that only specific ioctl report a wedged GPU.
27 *
28 */
29
30#include <stdlib.h>
31#include <stdio.h>
32#include <string.h>
33#include <unistd.h>
34#include <fcntl.h>
35#include <inttypes.h>
36#include <errno.h>
37#include <sys/ioctl.h>
38
39#include <drm.h>
40
Chris Wilson639d6402017-09-08 13:45:21 +010041#include "igt.h"
42#include "igt_sysfs.h"
Chris Wilson92e457d2017-09-08 11:33:15 +010043#include "sw_sync.h"
Chris Wilsoneb7d60e2015-06-17 18:29:49 +010044
45IGT_TEST_DESCRIPTION("Test that specific ioctls report a wedged GPU (EIO).");
46
47static bool i915_reset_control(bool enable)
48{
49 const char *path = "/sys/module/i915/parameters/reset";
50 int fd, ret;
51
52 igt_debug("%s GPU reset\n", enable ? "Enabling" : "Disabling");
53
54 fd = open(path, O_RDWR);
55 igt_require(fd >= 0);
56
Chris Wilson6b2dddd2017-07-25 17:11:16 +010057 ret = write(fd, &"01"[enable], 1) == 1;
Chris Wilsoneb7d60e2015-06-17 18:29:49 +010058 close(fd);
59
60 return ret;
61}
62
Chris Wilsoneb7d60e2015-06-17 18:29:49 +010063static void trigger_reset(int fd)
64{
Chris Wilson83884e92017-03-21 17:16:03 +000065 igt_force_gpu_reset(fd);
Chris Wilsoneb7d60e2015-06-17 18:29:49 +010066
67 /* And just check the gpu is indeed running again */
68 igt_debug("Checking that the GPU recovered\n");
69 gem_quiescent_gpu(fd);
70}
71
72static void wedge_gpu(int fd)
73{
74 /* First idle the GPU then disable GPU resets before injecting a hang */
75 gem_quiescent_gpu(fd);
76
77 igt_require(i915_reset_control(false));
78
79 igt_debug("Wedging GPU by injecting hang\n");
80 igt_post_hang_ring(fd, igt_hang_ring(fd, I915_EXEC_DEFAULT));
81
82 igt_assert(i915_reset_control(true));
83}
84
85static int __gem_throttle(int fd)
86{
87 int err = 0;
88 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL))
89 err = -errno;
90 return err;
91}
92
93static void test_throttle(int fd)
94{
95 wedge_gpu(fd);
96
97 igt_assert_eq(__gem_throttle(fd), -EIO);
98
99 trigger_reset(fd);
100}
101
Chris Wilsoneb7d60e2015-06-17 18:29:49 +0100102static void test_execbuf(int fd)
103{
104 struct drm_i915_gem_execbuffer2 execbuf;
105 struct drm_i915_gem_exec_object2 exec;
106 uint32_t tmp[] = { MI_BATCH_BUFFER_END };
107
108 memset(&exec, 0, sizeof(exec));
109 memset(&execbuf, 0, sizeof(execbuf));
110
111 exec.handle = gem_create(fd, 4096);
112 gem_write(fd, exec.handle, 0, tmp, sizeof(tmp));
113
Chris Wilson4de67b22017-01-02 11:05:21 +0000114 execbuf.buffers_ptr = to_user_pointer(&exec);
Chris Wilsoneb7d60e2015-06-17 18:29:49 +0100115 execbuf.buffer_count = 1;
116
117 wedge_gpu(fd);
118
119 igt_assert_eq(__gem_execbuf(fd, &execbuf), -EIO);
120 gem_close(fd, exec.handle);
121
122 trigger_reset(fd);
123}
124
Chris Wilson32c89882015-07-15 16:18:10 +0100125static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
126{
127 struct drm_i915_gem_wait wait;
128 int err = 0;
129
130 memset(&wait, 0, sizeof(wait));
131 wait.bo_handle = handle;
132 wait.timeout_ns = timeout;
133 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_WAIT, &wait))
134 err = -errno;
135
136 return err;
137}
138
139static void test_wait(int fd)
140{
Chris Wilson0a1fc452016-09-13 11:13:14 +0100141 igt_hang_t hang;
Chris Wilson32c89882015-07-15 16:18:10 +0100142
Daniel Vetter40798ef2015-12-16 13:13:58 +0000143 /* If the request we wait on completes due to a hang (even for
144 * that request), the user expects the return value to 0 (success).
145 */
Chris Wilson32c89882015-07-15 16:18:10 +0100146 hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
Daniel Vetter40798ef2015-12-16 13:13:58 +0000147 igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
Chris Wilson32c89882015-07-15 16:18:10 +0100148 igt_post_hang_ring(fd, hang);
149
Daniel Vetter40798ef2015-12-16 13:13:58 +0000150 /* If the GPU is wedged during the wait, again we expect the return
151 * value to be 0 (success).
152 */
153 igt_require(i915_reset_control(false));
154 hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
155 igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
156 igt_post_hang_ring(fd, hang);
157 igt_require(i915_reset_control(true));
158
Chris Wilson32c89882015-07-15 16:18:10 +0100159 trigger_reset(fd);
160}
161
Chris Wilson4b0e0bf2017-09-11 16:41:13 +0100162static void test_inflight(int fd)
163{
164 struct drm_i915_gem_execbuffer2 execbuf;
165 struct drm_i915_gem_exec_object2 obj[2];
166 uint32_t bbe = MI_BATCH_BUFFER_END;
167 unsigned int engine;
168 int fence[64]; /* conservative estimate of ring size */
169
170 igt_require(gem_has_exec_fence(fd));
171
172 memset(obj, 0, sizeof(obj));
173 obj[0].flags = EXEC_OBJECT_WRITE;
174 obj[1].handle = gem_create(fd, 4096);
175 gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
176
177 for_each_engine(fd, engine) {
178 igt_hang_t hang;
179
180 igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
181 igt_require(i915_reset_control(false));
182
183 hang = igt_hang_ring(fd, engine);
184 obj[0].handle = hang.handle;
185
186 memset(&execbuf, 0, sizeof(execbuf));
187 execbuf.buffers_ptr = to_user_pointer(obj);
188 execbuf.buffer_count = 2;
189 execbuf.flags = engine | I915_EXEC_FENCE_OUT;
190
191 for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
192 gem_execbuf_wr(fd, &execbuf);
193 fence[n] = execbuf.rsvd2 >> 32;
194 igt_assert(fence[n] != -1);
195 }
196
197 igt_post_hang_ring(fd, hang);
198
199 igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
200 for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
201 igt_assert_eq(sync_fence_status(fence[n]), -EIO);
202 close(fence[n]);
203 }
204
205 igt_assert(i915_reset_control(true));
206 trigger_reset(fd);
207 }
208}
209
Chris Wilson58616272017-10-06 12:11:21 +0100210static void test_inflight_suspend(int fd)
211{
212 struct drm_i915_gem_execbuffer2 execbuf;
213 struct drm_i915_gem_exec_object2 obj[2];
214 uint32_t bbe = MI_BATCH_BUFFER_END;
215 int fence[64]; /* conservative estimate of ring size */
216 igt_hang_t hang;
217
218 igt_require(gem_has_exec_fence(fd));
219 igt_require(i915_reset_control(false));
220
221 memset(obj, 0, sizeof(obj));
222 obj[0].flags = EXEC_OBJECT_WRITE;
223 obj[1].handle = gem_create(fd, 4096);
224 gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
225
226 hang = igt_hang_ring(fd, 0);
227 obj[0].handle = hang.handle;
228
229 memset(&execbuf, 0, sizeof(execbuf));
230 execbuf.buffers_ptr = to_user_pointer(obj);
231 execbuf.buffer_count = 2;
232 execbuf.flags = I915_EXEC_FENCE_OUT;
233
234 for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
235 gem_execbuf_wr(fd, &execbuf);
236 fence[n] = execbuf.rsvd2 >> 32;
237 igt_assert(fence[n] != -1);
238 }
239
240 igt_system_suspend_autoresume(SUSPEND_STATE_MEM,
241 SUSPEND_TEST_DEVICES);
242
243 igt_post_hang_ring(fd, hang);
244
245 igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
246 for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
247 igt_assert_eq(sync_fence_status(fence[n]), -EIO);
248 close(fence[n]);
249 }
250
251 igt_assert(i915_reset_control(true));
252 trigger_reset(fd);
253}
254
Chris Wilsonda197b52017-09-15 17:05:43 +0100255static uint32_t __gem_context_create(int fd)
256{
257 struct drm_i915_gem_context_create create;
258
259 memset(&create, 0, sizeof(create));
260 if (ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create))
261 return 0;
262
263 return create.ctx_id;
264}
265
266static void test_inflight_contexts(int fd)
267{
268 struct drm_i915_gem_execbuffer2 execbuf;
269 struct drm_i915_gem_exec_object2 obj[2];
270 uint32_t bbe = MI_BATCH_BUFFER_END;
271 unsigned int engine;
272 uint32_t ctx[64];
273 int fence[64];
274
275 igt_require(gem_has_exec_fence(fd));
276
277 ctx[0] = __gem_context_create(fd);
278 igt_require(ctx[0]);
279 for (unsigned int n = 1; n < ARRAY_SIZE(ctx); n++)
280 ctx[n] = gem_context_create(fd);
281
282 memset(obj, 0, sizeof(obj));
283 obj[0].flags = EXEC_OBJECT_WRITE;
284 obj[1].handle = gem_create(fd, 4096);
285 gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
286
287 for_each_engine(fd, engine) {
288 igt_hang_t hang;
289
290 igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
291 igt_require(i915_reset_control(false));
292
293 hang = igt_hang_ring(fd, engine);
294 obj[0].handle = hang.handle;
295
296 memset(&execbuf, 0, sizeof(execbuf));
297 execbuf.buffers_ptr = to_user_pointer(obj);
298 execbuf.buffer_count = 2;
299 execbuf.flags = engine | I915_EXEC_FENCE_OUT;
300
301 for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
302 execbuf.rsvd1 = ctx[n];
303 gem_execbuf_wr(fd, &execbuf);
304 fence[n] = execbuf.rsvd2 >> 32;
305 igt_assert(fence[n] != -1);
306 }
307
308 igt_post_hang_ring(fd, hang);
309
310 igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
311 for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
312 igt_assert_eq(sync_fence_status(fence[n]), -EIO);
313 close(fence[n]);
314 }
315
316 igt_assert(i915_reset_control(true));
317 trigger_reset(fd);
318 }
319
320 for (unsigned int n = 0; n < ARRAY_SIZE(ctx); n++)
321 gem_context_destroy(fd, ctx[n]);
322}
323
Chris Wilson92e457d2017-09-08 11:33:15 +0100324static void test_inflight_external(int fd)
Chris Wilson9bbf6422016-11-18 08:50:33 +0000325{
326 struct drm_i915_gem_execbuffer2 execbuf;
Chris Wilson92e457d2017-09-08 11:33:15 +0100327 struct drm_i915_gem_exec_object2 obj;
Chris Wilson9bbf6422016-11-18 08:50:33 +0000328 uint32_t bbe = MI_BATCH_BUFFER_END;
329 igt_hang_t hang;
Chris Wilson92e457d2017-09-08 11:33:15 +0100330 int timeline, fence;
331
332 igt_require_sw_sync();
333 igt_require(gem_has_exec_fence(fd));
334
335 timeline = sw_sync_timeline_create();
336 fence = sw_sync_timeline_create_fence(timeline, 1);
Chris Wilson9bbf6422016-11-18 08:50:33 +0000337
338 igt_require(i915_reset_control(false));
339 hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
340
Chris Wilson92e457d2017-09-08 11:33:15 +0100341 memset(&obj, 0, sizeof(obj));
342 obj.handle = gem_create(fd, 4096);
343 gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
Chris Wilson9bbf6422016-11-18 08:50:33 +0000344
345 memset(&execbuf, 0, sizeof(execbuf));
Chris Wilson92e457d2017-09-08 11:33:15 +0100346 execbuf.buffers_ptr = to_user_pointer(&obj);
347 execbuf.buffer_count = 1;
348 execbuf.flags = I915_EXEC_FENCE_IN | I915_EXEC_FENCE_OUT;
349 execbuf.rsvd2 = (uint32_t)fence;
Chris Wilson9bbf6422016-11-18 08:50:33 +0000350
Chris Wilson92e457d2017-09-08 11:33:15 +0100351 gem_execbuf_wr(fd, &execbuf);
352 close(fence);
Chris Wilson9bbf6422016-11-18 08:50:33 +0000353
Chris Wilson92e457d2017-09-08 11:33:15 +0100354 fence = execbuf.rsvd2 >> 32;
355 igt_assert(fence != -1);
Chris Wilson9bbf6422016-11-18 08:50:33 +0000356
Chris Wilson92e457d2017-09-08 11:33:15 +0100357 igt_post_hang_ring(fd, hang); /* wedged, with an unready batch */
358 sw_sync_timeline_inc(timeline, 1); /* only now submit our batches */
359
360 igt_assert_eq(__gem_wait(fd, obj.handle, -1), 0);
361 igt_assert_eq(sync_fence_status(fence), -EIO);
362 close(fence);
363
364 igt_assert(i915_reset_control(true));
Chris Wilson9bbf6422016-11-18 08:50:33 +0000365 trigger_reset(fd);
Chris Wilson92e457d2017-09-08 11:33:15 +0100366 close(timeline);
Chris Wilson9bbf6422016-11-18 08:50:33 +0000367}
368
Chris Wilson639d6402017-09-08 13:45:21 +0100369static void test_inflight_internal(int fd)
370{
371 struct drm_i915_gem_execbuffer2 execbuf;
372 struct drm_i915_gem_exec_object2 obj[2];
373 uint32_t bbe = MI_BATCH_BUFFER_END;
374 unsigned engine, nfence = 0;
375 int fences[16];
376 igt_hang_t hang;
377
378 igt_require(gem_has_exec_fence(fd));
379
380 igt_require(i915_reset_control(false));
381 hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
382
383 memset(obj, 0, sizeof(obj));
384 obj[0].handle = hang.handle;
385 obj[0].flags = EXEC_OBJECT_WRITE;
386 obj[1].handle = gem_create(fd, 4096);
387 gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
388
389 memset(&execbuf, 0, sizeof(execbuf));
390 execbuf.buffers_ptr = to_user_pointer(obj);
391 execbuf.buffer_count = 2;
392 for_each_engine(fd, engine) {
393 execbuf.flags = engine | I915_EXEC_FENCE_OUT;
394
395 gem_execbuf_wr(fd, &execbuf);
396
397 fences[nfence] = execbuf.rsvd2 >> 32;
398 igt_assert(fences[nfence] != -1);
399 nfence++;
400 }
401
402 igt_post_hang_ring(fd, hang); /* wedged, with an unready batch */
403
404 igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
405 while (nfence--) {
406 igt_assert_eq(sync_fence_status(fences[nfence]), -EIO);
407 close(fences[nfence]);
408 }
409
410 igt_assert(i915_reset_control(true));
411 trigger_reset(fd);
412}
413
Chris Wilson4f082c32017-09-08 13:48:05 +0100414static int fd = -1;
415
416static void
417exit_handler(int sig)
418{
419 i915_reset_control(true);
420 igt_force_gpu_reset(fd);
421}
422
Chris Wilsoneb7d60e2015-06-17 18:29:49 +0100423igt_main
424{
Chris Wilsoneb7d60e2015-06-17 18:29:49 +0100425 igt_skip_on_simulation();
426
427 igt_fixture {
Micah Fedkec81d2932015-07-22 21:54:02 +0000428 fd = drm_open_driver(DRIVER_INTEL);
Chris Wilson4f082c32017-09-08 13:48:05 +0100429
430 igt_require(i915_reset_control(true));
431 igt_force_gpu_reset(fd);
432 igt_install_exit_handler(exit_handler);
433
Michał Winiarskif6dfe552017-10-16 11:05:14 +0200434 gem_show_submission_method(fd);
Chris Wilson28a2f142017-03-08 12:22:13 +0000435 igt_require_gem(fd);
Chris Wilson92caf132015-12-16 09:23:56 +0000436 igt_require_hang_ring(fd, I915_EXEC_DEFAULT);
Chris Wilsoneb7d60e2015-06-17 18:29:49 +0100437 }
438
439 igt_subtest("throttle")
440 test_throttle(fd);
441
442 igt_subtest("execbuf")
443 test_execbuf(fd);
444
Chris Wilson32c89882015-07-15 16:18:10 +0100445 igt_subtest("wait")
446 test_wait(fd);
447
Chris Wilson4b0e0bf2017-09-11 16:41:13 +0100448 igt_subtest("in-flight")
449 test_inflight(fd);
450
Chris Wilsonda197b52017-09-15 17:05:43 +0100451 igt_subtest("in-flight-contexts")
452 test_inflight_contexts(fd);
453
Chris Wilson92e457d2017-09-08 11:33:15 +0100454 igt_subtest("in-flight-external")
455 test_inflight_external(fd);
Chris Wilson639d6402017-09-08 13:45:21 +0100456
457 igt_subtest("in-flight-internal") {
Michał Winiarskif6dfe552017-10-16 11:05:14 +0200458 igt_skip_on(gem_has_semaphores(fd));
Chris Wilson639d6402017-09-08 13:45:21 +0100459 test_inflight_internal(fd);
460 }
Chris Wilson58616272017-10-06 12:11:21 +0100461
462 igt_subtest("in-flight-suspend")
463 test_inflight_suspend(fd);
Chris Wilsoneb7d60e2015-06-17 18:29:49 +0100464}