blob: 1be63590a7fefc4bf517e1a15d187fefe8ec3c42 [file] [log] [blame]
Mika Kuoppala84734a02013-07-12 16:50:57 +03001/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <generated/utsrelease.h>
31#include "i915_drv.h"
32
Mika Kuoppala84734a02013-07-12 16:50:57 +030033static const char *ring_str(int ring)
34{
35 switch (ring) {
36 case RCS: return "render";
37 case VCS: return "bsd";
38 case BCS: return "blt";
39 case VECS: return "vebox";
Zhao Yakui845f74a2014-04-17 10:37:37 +080040 case VCS2: return "bsd2";
Mika Kuoppala84734a02013-07-12 16:50:57 +030041 default: return "";
42 }
43}
44
45static const char *pin_flag(int pinned)
46{
47 if (pinned > 0)
48 return " P";
49 else if (pinned < 0)
50 return " p";
51 else
52 return "";
53}
54
55static const char *tiling_flag(int tiling)
56{
57 switch (tiling) {
58 default:
59 case I915_TILING_NONE: return "";
60 case I915_TILING_X: return " X";
61 case I915_TILING_Y: return " Y";
62 }
63}
64
65static const char *dirty_flag(int dirty)
66{
67 return dirty ? " dirty" : "";
68}
69
70static const char *purgeable_flag(int purgeable)
71{
72 return purgeable ? " purgeable" : "";
73}
74
75static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
76{
77
78 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
79 e->err = -ENOSPC;
80 return false;
81 }
82
83 if (e->bytes == e->size - 1 || e->err)
84 return false;
85
86 return true;
87}
88
89static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
90 unsigned len)
91{
92 if (e->pos + len <= e->start) {
93 e->pos += len;
94 return false;
95 }
96
97 /* First vsnprintf needs to fit in its entirety for memmove */
98 if (len >= e->size) {
99 e->err = -EIO;
100 return false;
101 }
102
103 return true;
104}
105
106static void __i915_error_advance(struct drm_i915_error_state_buf *e,
107 unsigned len)
108{
109 /* If this is first printf in this window, adjust it so that
110 * start position matches start of the buffer
111 */
112
113 if (e->pos < e->start) {
114 const size_t off = e->start - e->pos;
115
116 /* Should not happen but be paranoid */
117 if (off > len || e->bytes) {
118 e->err = -EIO;
119 return;
120 }
121
122 memmove(e->buf, e->buf + off, len - off);
123 e->bytes = len - off;
124 e->pos = e->start;
125 return;
126 }
127
128 e->bytes += len;
129 e->pos += len;
130}
131
132static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
133 const char *f, va_list args)
134{
135 unsigned len;
136
137 if (!__i915_error_ok(e))
138 return;
139
140 /* Seek the first printf which is hits start position */
141 if (e->pos < e->start) {
Chris Wilsone29bb4e2013-09-20 10:20:59 +0100142 va_list tmp;
143
144 va_copy(tmp, args);
Mika Kuoppala1d2cb9a2014-02-07 17:40:50 +0200145 len = vsnprintf(NULL, 0, f, tmp);
146 va_end(tmp);
147
148 if (!__i915_error_seek(e, len))
Mika Kuoppala84734a02013-07-12 16:50:57 +0300149 return;
150 }
151
152 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
153 if (len >= e->size - e->bytes)
154 len = e->size - e->bytes - 1;
155
156 __i915_error_advance(e, len);
157}
158
159static void i915_error_puts(struct drm_i915_error_state_buf *e,
160 const char *str)
161{
162 unsigned len;
163
164 if (!__i915_error_ok(e))
165 return;
166
167 len = strlen(str);
168
169 /* Seek the first printf which is hits start position */
170 if (e->pos < e->start) {
171 if (!__i915_error_seek(e, len))
172 return;
173 }
174
175 if (len >= e->size - e->bytes)
176 len = e->size - e->bytes - 1;
177 memcpy(e->buf + e->bytes, str, len);
178
179 __i915_error_advance(e, len);
180}
181
182#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
183#define err_puts(e, s) i915_error_puts(e, s)
184
185static void print_error_buffers(struct drm_i915_error_state_buf *m,
186 const char *name,
187 struct drm_i915_error_buffer *err,
188 int count)
189{
Chris Wilsonb4716182015-04-27 13:41:17 +0100190 int i;
191
Chris Wilson3a448732014-08-12 20:05:47 +0100192 err_printf(m, " %s [%d]:\n", name, count);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300193
194 while (count--) {
Michel Thierrye1f12322015-07-29 17:23:56 +0100195 err_printf(m, " %08x_%08x %8u %02x %02x [ ",
196 upper_32_bits(err->gtt_offset),
197 lower_32_bits(err->gtt_offset),
Mika Kuoppala84734a02013-07-12 16:50:57 +0300198 err->size,
199 err->read_domains,
Chris Wilsonb4716182015-04-27 13:41:17 +0100200 err->write_domain);
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000201 for (i = 0; i < I915_NUM_ENGINES; i++)
Chris Wilsonb4716182015-04-27 13:41:17 +0100202 err_printf(m, "%02x ", err->rseqno[i]);
203
204 err_printf(m, "] %02x", err->wseqno);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300205 err_puts(m, pin_flag(err->pinned));
206 err_puts(m, tiling_flag(err->tiling));
207 err_puts(m, dirty_flag(err->dirty));
208 err_puts(m, purgeable_flag(err->purgeable));
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100209 err_puts(m, err->userptr ? " userptr" : "");
Mika Kuoppala84734a02013-07-12 16:50:57 +0300210 err_puts(m, err->ring != -1 ? " " : "");
211 err_puts(m, ring_str(err->ring));
Chris Wilson0a4cd7c2014-08-22 14:41:39 +0100212 err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
Mika Kuoppala84734a02013-07-12 16:50:57 +0300213
214 if (err->name)
215 err_printf(m, " (name: %d)", err->name);
216 if (err->fence_reg != I915_FENCE_REG_NONE)
217 err_printf(m, " (fence: %d)", err->fence_reg);
218
219 err_puts(m, "\n");
220 err++;
221 }
222}
223
Mika Kuoppalada661462013-09-06 16:03:28 +0300224static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
225{
226 switch (a) {
227 case HANGCHECK_IDLE:
228 return "idle";
229 case HANGCHECK_WAIT:
230 return "wait";
231 case HANGCHECK_ACTIVE:
232 return "active";
233 case HANGCHECK_KICK:
234 return "kick";
235 case HANGCHECK_HUNG:
236 return "hung";
237 }
238
239 return "unknown";
240}
241
Mika Kuoppala84734a02013-07-12 16:50:57 +0300242static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
243 struct drm_device *dev,
Daniel Vetter77c1aa82014-11-18 13:27:07 +0100244 struct drm_i915_error_state *error,
245 int ring_idx)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300246{
Daniel Vetter77c1aa82014-11-18 13:27:07 +0100247 struct drm_i915_error_ring *ring = &error->ring[ring_idx];
248
Ben Widawsky362b8af2014-01-30 00:19:38 -0800249 if (!ring->valid)
Chris Wilson372fbb82014-01-27 13:52:34 +0000250 return;
251
Daniel Vetter77c1aa82014-11-18 13:27:07 +0100252 err_printf(m, "%s command stream:\n", ring_str(ring_idx));
Chris Wilson94f8cf12015-04-07 16:20:47 +0100253 err_printf(m, " START: 0x%08x\n", ring->start);
254 err_printf(m, " HEAD: 0x%08x\n", ring->head);
255 err_printf(m, " TAIL: 0x%08x\n", ring->tail);
256 err_printf(m, " CTL: 0x%08x\n", ring->ctl);
257 err_printf(m, " HWS: 0x%08x\n", ring->hws);
Chris Wilsone3243d12014-03-21 12:05:47 +0000258 err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
Ben Widawsky362b8af2014-01-30 00:19:38 -0800259 err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
260 err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
261 err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
Ville Syrjälä3dda20a2013-12-10 21:44:43 +0200262 if (INTEL_INFO(dev)->gen >= 4) {
Chris Wilsone3243d12014-03-21 12:05:47 +0000263 err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
Ben Widawsky362b8af2014-01-30 00:19:38 -0800264 err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
265 err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
Ville Syrjälä3dda20a2013-12-10 21:44:43 +0200266 }
Ben Widawsky362b8af2014-01-30 00:19:38 -0800267 err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
Ben Widawsky13ffadd2014-04-01 16:31:07 -0700268 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
269 lower_32_bits(ring->faddr));
Mika Kuoppala84734a02013-07-12 16:50:57 +0300270 if (INTEL_INFO(dev)->gen >= 6) {
Ben Widawsky362b8af2014-01-30 00:19:38 -0800271 err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
272 err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300273 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
Ben Widawsky362b8af2014-01-30 00:19:38 -0800274 ring->semaphore_mboxes[0],
275 ring->semaphore_seqno[0]);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300276 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
Ben Widawsky362b8af2014-01-30 00:19:38 -0800277 ring->semaphore_mboxes[1],
278 ring->semaphore_seqno[1]);
Ben Widawsky4e5aabf2013-08-12 16:53:04 -0700279 if (HAS_VEBOX(dev)) {
280 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
Ben Widawsky362b8af2014-01-30 00:19:38 -0800281 ring->semaphore_mboxes[2],
282 ring->semaphore_seqno[2]);
Ben Widawsky4e5aabf2013-08-12 16:53:04 -0700283 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300284 }
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800285 if (USES_PPGTT(dev)) {
286 err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
287
288 if (INTEL_INFO(dev)->gen >= 8) {
289 int i;
290 for (i = 0; i < 4; i++)
291 err_printf(m, " PDP%d: 0x%016llx\n",
292 i, ring->vm_info.pdp[i]);
293 } else {
294 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
295 ring->vm_info.pp_dir_base);
296 }
297 }
Ben Widawsky362b8af2014-01-30 00:19:38 -0800298 err_printf(m, " seqno: 0x%08x\n", ring->seqno);
Chris Wilson14fd0d62016-04-07 07:29:10 +0100299 err_printf(m, " last_seqno: 0x%08x\n", ring->last_seqno);
Ben Widawsky362b8af2014-01-30 00:19:38 -0800300 err_printf(m, " waiting: %s\n", yesno(ring->waiting));
301 err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
302 err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
Mika Kuoppalada661462013-09-06 16:03:28 +0300303 err_printf(m, " hangcheck: %s [%d]\n",
Ben Widawsky362b8af2014-01-30 00:19:38 -0800304 hangcheck_action_to_str(ring->hangcheck_action),
305 ring->hangcheck_score);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300306}
307
308void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
309{
310 va_list args;
311
312 va_start(args, f);
313 i915_error_vprintf(e, f, args);
314 va_end(args);
315}
316
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200317static void print_error_obj(struct drm_i915_error_state_buf *m,
318 struct drm_i915_error_object *obj)
319{
320 int page, offset, elt;
321
322 for (page = offset = 0; page < obj->page_count; page++) {
323 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
324 err_printf(m, "%08x : %08x\n", offset,
325 obj->pages[page][elt]);
326 offset += 4;
327 }
328 }
329}
330
Mika Kuoppala84734a02013-07-12 16:50:57 +0300331int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
332 const struct i915_error_state_file_priv *error_priv)
333{
334 struct drm_device *dev = error_priv->dev;
Jani Nikula50227e12014-03-31 14:27:21 +0300335 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300336 struct drm_i915_error_state *error = error_priv->error;
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700337 struct drm_i915_error_object *obj;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200338 int i, j, offset, elt;
339 int max_hangcheck_score;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300340
341 if (!error) {
342 err_printf(m, "no error state collected\n");
343 goto out;
344 }
345
Mika Kuoppalacb383002014-02-25 17:11:25 +0200346 err_printf(m, "%s\n", error->error_msg);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300347 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
348 error->time.tv_usec);
349 err_printf(m, "Kernel: " UTS_RELEASE "\n");
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200350 max_hangcheck_score = 0;
351 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
352 if (error->ring[i].hangcheck_score > max_hangcheck_score)
353 max_hangcheck_score = error->ring[i].hangcheck_score;
354 }
355 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
356 if (error->ring[i].hangcheck_score == max_hangcheck_score &&
357 error->ring[i].pid != -1) {
358 err_printf(m, "Active process (on ring %s): %s [%d]\n",
359 ring_str(i),
360 error->ring[i].comm,
361 error->ring[i].pid);
362 }
363 }
Mika Kuoppala48b031e2014-02-25 17:11:27 +0200364 err_printf(m, "Reset count: %u\n", error->reset_count);
Mika Kuoppala62d5d692014-02-25 17:11:28 +0200365 err_printf(m, "Suspend count: %u\n", error->suspend_count);
Ville Syrjäläffbab09b2013-10-04 14:53:40 +0300366 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
Arun Siluvery06e6ff82016-01-28 17:18:41 +0000367 err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
368 err_printf(m, "PCI Subsystem: %04x:%04x\n",
369 dev->pdev->subsystem_vendor,
370 dev->pdev->subsystem_device);
Chris Wilsoneb5be9d2015-08-07 20:24:15 +0100371 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
Mika Kuoppala0ac76552015-10-29 15:21:19 +0200372
373 if (HAS_CSR(dev)) {
374 struct intel_csr *csr = &dev_priv->csr;
375
376 err_printf(m, "DMC loaded: %s\n",
377 yesno(csr->dmc_payload != NULL));
378 err_printf(m, "DMC fw version: %d.%d\n",
379 CSR_VERSION_MAJOR(csr->version),
380 CSR_VERSION_MINOR(csr->version));
381 }
382
Mika Kuoppala84734a02013-07-12 16:50:57 +0300383 err_printf(m, "EIR: 0x%08x\n", error->eir);
384 err_printf(m, "IER: 0x%08x\n", error->ier);
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -0700385 if (INTEL_INFO(dev)->gen >= 8) {
386 for (i = 0; i < 4; i++)
387 err_printf(m, "GTIER gt %d: 0x%08x\n", i,
388 error->gtier[i]);
389 } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
390 err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300391 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
392 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
393 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
394 err_printf(m, "CCID: 0x%08x\n", error->ccid);
Chris Wilson094f9a52013-09-25 17:34:55 +0100395 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300396
397 for (i = 0; i < dev_priv->num_fence_regs; i++)
398 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
399
400 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
401 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
402 error->extra_instdone[i]);
403
404 if (INTEL_INFO(dev)->gen >= 6) {
405 err_printf(m, "ERROR: 0x%08x\n", error->error);
Mika Kuoppala6c826f32015-03-24 14:54:19 +0200406
407 if (INTEL_INFO(dev)->gen >= 8)
408 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
409 error->fault_data1, error->fault_data0);
410
Mika Kuoppala84734a02013-07-12 16:50:57 +0300411 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
412 }
413
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +0100414 if (IS_GEN7(dev))
Mika Kuoppala84734a02013-07-12 16:50:57 +0300415 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
416
Daniel Vetter77c1aa82014-11-18 13:27:07 +0100417 for (i = 0; i < ARRAY_SIZE(error->ring); i++)
418 i915_ring_error_state(m, dev, error, i);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300419
Chris Wilson3a448732014-08-12 20:05:47 +0100420 for (i = 0; i < error->vm_count; i++) {
421 err_printf(m, "vm[%d]\n", i);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300422
Chris Wilson3a448732014-08-12 20:05:47 +0100423 print_error_buffers(m, "Active",
424 error->active_bo[i],
425 error->active_bo_count[i]);
426
Mika Kuoppala84734a02013-07-12 16:50:57 +0300427 print_error_buffers(m, "Pinned",
Chris Wilson3a448732014-08-12 20:05:47 +0100428 error->pinned_bo[i],
429 error->pinned_bo_count[i]);
430 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300431
432 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200433 obj = error->ring[i].batchbuffer;
434 if (obj) {
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000435 err_puts(m, dev_priv->engine[i].name);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200436 if (error->ring[i].pid != -1)
437 err_printf(m, " (submitted by %s [%d])",
438 error->ring[i].comm,
439 error->ring[i].pid);
Michel Thierrye1f12322015-07-29 17:23:56 +0100440 err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
441 upper_32_bits(obj->gtt_offset),
442 lower_32_bits(obj->gtt_offset));
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200443 print_error_obj(m, obj);
444 }
445
446 obj = error->ring[i].wa_batchbuffer;
447 if (obj) {
448 err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000449 dev_priv->engine[i].name,
Michel Thierrye1f12322015-07-29 17:23:56 +0100450 lower_32_bits(obj->gtt_offset));
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200451 print_error_obj(m, obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300452 }
453
454 if (error->ring[i].num_requests) {
455 err_printf(m, "%s --- %d requests\n",
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000456 dev_priv->engine[i].name,
Mika Kuoppala84734a02013-07-12 16:50:57 +0300457 error->ring[i].num_requests);
458 for (j = 0; j < error->ring[i].num_requests; j++) {
459 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
460 error->ring[i].requests[j].seqno,
461 error->ring[i].requests[j].jiffies,
462 error->ring[i].requests[j].tail);
463 }
464 }
465
Chris Wilson688e6c72016-07-01 17:23:15 +0100466 if (error->ring[i].num_waiters) {
467 err_printf(m, "%s --- %d waiters\n",
468 dev_priv->engine[i].name,
469 error->ring[i].num_waiters);
470 for (j = 0; j < error->ring[i].num_waiters; j++) {
471 err_printf(m, " seqno 0x%08x for %s [%d]\n",
472 error->ring[i].waiters[j].seqno,
473 error->ring[i].waiters[j].comm,
474 error->ring[i].waiters[j].pid);
475 }
476 }
477
Mika Kuoppala84734a02013-07-12 16:50:57 +0300478 if ((obj = error->ring[i].ringbuffer)) {
479 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000480 dev_priv->engine[i].name,
Michel Thierrye1f12322015-07-29 17:23:56 +0100481 lower_32_bits(obj->gtt_offset));
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200482 print_error_obj(m, obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300483 }
484
Ben Widawsky362b8af2014-01-30 00:19:38 -0800485 if ((obj = error->ring[i].hws_page)) {
Jesse Barnes3a5a0392015-09-15 10:03:01 -0700486 u64 hws_offset = obj->gtt_offset;
487 u32 *hws_page = &obj->pages[0][0];
488
489 if (i915.enable_execlists) {
490 hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
491 hws_page = &obj->pages[LRC_PPHWSP_PN][0];
492 }
Alex Daid1675192015-08-12 15:43:43 +0100493 err_printf(m, "%s --- HW Status = 0x%08llx\n",
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000494 dev_priv->engine[i].name, hws_offset);
Chris Wilsonf3ce3822014-01-23 22:40:36 +0000495 offset = 0;
496 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
497 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
498 offset,
Jesse Barnes3a5a0392015-09-15 10:03:01 -0700499 hws_page[elt],
500 hws_page[elt+1],
501 hws_page[elt+2],
502 hws_page[elt+3]);
Chris Wilsona98b7e52016-07-02 15:36:01 +0100503 offset += 16;
Chris Wilsonf3ce3822014-01-23 22:40:36 +0000504 }
505 }
506
arun.siluvery@linux.intel.comf85db052016-03-01 11:24:36 +0000507 obj = error->ring[i].wa_ctx;
508 if (obj) {
509 u64 wa_ctx_offset = obj->gtt_offset;
510 u32 *wa_ctx_page = &obj->pages[0][0];
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000511 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000512 u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
513 engine->wa_ctx.per_ctx.size);
arun.siluvery@linux.intel.comf85db052016-03-01 11:24:36 +0000514
515 err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000516 dev_priv->engine[i].name, wa_ctx_offset);
arun.siluvery@linux.intel.comf85db052016-03-01 11:24:36 +0000517 offset = 0;
518 for (elt = 0; elt < wa_ctx_size; elt += 4) {
519 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
520 offset,
521 wa_ctx_page[elt + 0],
522 wa_ctx_page[elt + 1],
523 wa_ctx_page[elt + 2],
524 wa_ctx_page[elt + 3]);
525 offset += 16;
526 }
527 }
528
Chris Wilson372fbb82014-01-27 13:52:34 +0000529 if ((obj = error->ring[i].ctx)) {
Mika Kuoppala84734a02013-07-12 16:50:57 +0300530 err_printf(m, "%s --- HW Context = 0x%08x\n",
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000531 dev_priv->engine[i].name,
Michel Thierrye1f12322015-07-29 17:23:56 +0100532 lower_32_bits(obj->gtt_offset));
Ben Widawsky17d36742014-04-05 14:55:53 -0700533 print_error_obj(m, obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300534 }
535 }
536
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700537 if ((obj = error->semaphore_obj)) {
Michel Thierrye1f12322015-07-29 17:23:56 +0100538 err_printf(m, "Semaphore page = 0x%08x\n",
539 lower_32_bits(obj->gtt_offset));
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700540 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
541 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
542 elt * 4,
543 obj->pages[0][elt],
544 obj->pages[0][elt+1],
545 obj->pages[0][elt+2],
546 obj->pages[0][elt+3]);
547 }
548 }
549
Mika Kuoppala84734a02013-07-12 16:50:57 +0300550 if (error->overlay)
551 intel_overlay_print_error_state(m, error->overlay);
552
553 if (error->display)
554 intel_display_print_error_state(m, dev, error->display);
555
556out:
557 if (m->bytes == 0 && m->err)
558 return m->err;
559
560 return 0;
561}
562
563int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
Chris Wilson0a4cd7c2014-08-22 14:41:39 +0100564 struct drm_i915_private *i915,
Mika Kuoppala84734a02013-07-12 16:50:57 +0300565 size_t count, loff_t pos)
566{
567 memset(ebuf, 0, sizeof(*ebuf));
Chris Wilson0a4cd7c2014-08-22 14:41:39 +0100568 ebuf->i915 = i915;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300569
570 /* We need to have enough room to store any i915_error_state printf
571 * so that we can move it to start position.
572 */
573 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
574 ebuf->buf = kmalloc(ebuf->size,
575 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
576
577 if (ebuf->buf == NULL) {
578 ebuf->size = PAGE_SIZE;
579 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
580 }
581
582 if (ebuf->buf == NULL) {
583 ebuf->size = 128;
584 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
585 }
586
587 if (ebuf->buf == NULL)
588 return -ENOMEM;
589
590 ebuf->start = pos;
591
592 return 0;
593}
594
595static void i915_error_object_free(struct drm_i915_error_object *obj)
596{
597 int page;
598
599 if (obj == NULL)
600 return;
601
602 for (page = 0; page < obj->page_count; page++)
603 kfree(obj->pages[page]);
604
605 kfree(obj);
606}
607
608static void i915_error_state_free(struct kref *error_ref)
609{
610 struct drm_i915_error_state *error = container_of(error_ref,
611 typeof(*error), ref);
612 int i;
613
614 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
615 i915_error_object_free(error->ring[i].batchbuffer);
Mika Kuoppalab3da4a62015-05-04 17:44:11 +0300616 i915_error_object_free(error->ring[i].wa_batchbuffer);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300617 i915_error_object_free(error->ring[i].ringbuffer);
Ben Widawsky362b8af2014-01-30 00:19:38 -0800618 i915_error_object_free(error->ring[i].hws_page);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300619 i915_error_object_free(error->ring[i].ctx);
arun.siluvery@linux.intel.comf85db052016-03-01 11:24:36 +0000620 i915_error_object_free(error->ring[i].wa_ctx);
Chris Wilson688e6c72016-07-01 17:23:15 +0100621 kfree(error->ring[i].requests);
622 kfree(error->ring[i].waiters);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300623 }
624
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700625 i915_error_object_free(error->semaphore_obj);
Michel Thierry0b37a9a2015-03-20 09:41:03 +0000626
627 for (i = 0; i < error->vm_count; i++)
628 kfree(error->active_bo[i]);
629
Mika Kuoppala84734a02013-07-12 16:50:57 +0300630 kfree(error->active_bo);
Michel Thierry0b37a9a2015-03-20 09:41:03 +0000631 kfree(error->active_bo_count);
632 kfree(error->pinned_bo);
633 kfree(error->pinned_bo_count);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300634 kfree(error->overlay);
635 kfree(error->display);
636 kfree(error);
637}
638
639static struct drm_i915_error_object *
Chris Wilson8ae62dc2014-08-12 20:05:49 +0100640i915_error_object_create(struct drm_i915_private *dev_priv,
641 struct drm_i915_gem_object *src,
642 struct i915_address_space *vm)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300643{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300644 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300645 struct drm_i915_error_object *dst;
Tvrtko Ursulinaff43762014-10-24 12:42:33 +0100646 struct i915_vma *vma = NULL;
Chris Wilson8ae62dc2014-08-12 20:05:49 +0100647 int num_pages;
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100648 bool use_ggtt;
649 int i = 0;
Michel Thierrye1f12322015-07-29 17:23:56 +0100650 u64 reloc_offset;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300651
652 if (src == NULL || src->pages == NULL)
653 return NULL;
654
Chris Wilson8ae62dc2014-08-12 20:05:49 +0100655 num_pages = src->base.size >> PAGE_SHIFT;
656
Mika Kuoppala84734a02013-07-12 16:50:57 +0300657 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
658 if (dst == NULL)
659 return NULL;
660
Chris Wilson87a01e82014-08-12 20:05:50 +0100661 if (i915_gem_obj_bound(src, vm))
662 dst->gtt_offset = i915_gem_obj_offset(src, vm);
663 else
664 dst->gtt_offset = -1;
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100665
666 reloc_offset = dst->gtt_offset;
Tvrtko Ursulinaff43762014-10-24 12:42:33 +0100667 if (i915_is_ggtt(vm))
668 vma = i915_gem_obj_to_ggtt(src);
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100669 use_ggtt = (src->cache_level == I915_CACHE_NONE &&
Tvrtko Ursulinaff43762014-10-24 12:42:33 +0100670 vma && (vma->bound & GLOBAL_BIND) &&
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300671 reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100672
673 /* Cannot access stolen address directly, try to use the aperture */
674 if (src->stolen) {
675 use_ggtt = true;
676
Tvrtko Ursulinaff43762014-10-24 12:42:33 +0100677 if (!(vma && vma->bound & GLOBAL_BIND))
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100678 goto unwind;
679
680 reloc_offset = i915_gem_obj_ggtt_offset(src);
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300681 if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100682 goto unwind;
683 }
684
685 /* Cannot access snooped pages through the aperture */
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +0300686 if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
687 !HAS_LLC(dev_priv))
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100688 goto unwind;
689
690 dst->page_count = num_pages;
691 while (num_pages--) {
Mika Kuoppala84734a02013-07-12 16:50:57 +0300692 unsigned long flags;
693 void *d;
694
695 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
696 if (d == NULL)
697 goto unwind;
698
699 local_irq_save(flags);
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100700 if (use_ggtt) {
Mika Kuoppala84734a02013-07-12 16:50:57 +0300701 void __iomem *s;
702
703 /* Simply ignore tiling or any overlapping fence.
704 * It's part of the error state, and this hopefully
705 * captures what the GPU read.
706 */
707
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300708 s = io_mapping_map_atomic_wc(ggtt->mappable,
Mika Kuoppala84734a02013-07-12 16:50:57 +0300709 reloc_offset);
710 memcpy_fromio(d, s, PAGE_SIZE);
711 io_mapping_unmap_atomic(s);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300712 } else {
713 struct page *page;
714 void *s;
715
716 page = i915_gem_object_get_page(src, i);
717
718 drm_clflush_pages(&page, 1);
719
720 s = kmap_atomic(page);
721 memcpy(d, s, PAGE_SIZE);
722 kunmap_atomic(s);
723
724 drm_clflush_pages(&page, 1);
725 }
726 local_irq_restore(flags);
727
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100728 dst->pages[i++] = d;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300729 reloc_offset += PAGE_SIZE;
730 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300731
732 return dst;
733
734unwind:
735 while (i--)
736 kfree(dst->pages[i]);
737 kfree(dst);
738 return NULL;
739}
Ben Widawskya7b91072013-12-06 14:10:52 -0800740#define i915_error_ggtt_object_create(dev_priv, src) \
Joonas Lahtinen62106b42016-03-18 10:42:57 +0200741 i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300742
743static void capture_bo(struct drm_i915_error_buffer *err,
Chris Wilson3a448732014-08-12 20:05:47 +0100744 struct i915_vma *vma)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300745{
Chris Wilson3a448732014-08-12 20:05:47 +0100746 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsonb4716182015-04-27 13:41:17 +0100747 int i;
Chris Wilson3a448732014-08-12 20:05:47 +0100748
Mika Kuoppala84734a02013-07-12 16:50:57 +0300749 err->size = obj->base.size;
750 err->name = obj->base.name;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000751 for (i = 0; i < I915_NUM_ENGINES; i++)
Chris Wilsonb4716182015-04-27 13:41:17 +0100752 err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
John Harrison97b2a6a2014-11-24 18:49:26 +0000753 err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
Chris Wilson3a448732014-08-12 20:05:47 +0100754 err->gtt_offset = vma->node.start;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300755 err->read_domains = obj->base.read_domains;
756 err->write_domain = obj->base.write_domain;
757 err->fence_reg = obj->fence_reg;
758 err->pinned = 0;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800759 if (i915_gem_obj_is_pinned(obj))
Mika Kuoppala84734a02013-07-12 16:50:57 +0300760 err->pinned = 1;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300761 err->tiling = obj->tiling_mode;
762 err->dirty = obj->dirty;
763 err->purgeable = obj->madv != I915_MADV_WILLNEED;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100764 err->userptr = obj->userptr.mm != NULL;
Chris Wilsonb4716182015-04-27 13:41:17 +0100765 err->ring = obj->last_write_req ?
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000766 i915_gem_request_get_engine(obj->last_write_req)->id : -1;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300767 err->cache_level = obj->cache_level;
768}
769
770static u32 capture_active_bo(struct drm_i915_error_buffer *err,
771 int count, struct list_head *head)
772{
Ben Widawskyca191b12013-07-31 17:00:14 -0700773 struct i915_vma *vma;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300774 int i = 0;
775
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000776 list_for_each_entry(vma, head, vm_link) {
Chris Wilson3a448732014-08-12 20:05:47 +0100777 capture_bo(err++, vma);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300778 if (++i == count)
779 break;
780 }
781
782 return i;
783}
784
785static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
Chris Wilson3a448732014-08-12 20:05:47 +0100786 int count, struct list_head *head,
787 struct i915_address_space *vm)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300788{
789 struct drm_i915_gem_object *obj;
Chris Wilson3a448732014-08-12 20:05:47 +0100790 struct drm_i915_error_buffer * const first = err;
791 struct drm_i915_error_buffer * const last = err + count;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300792
793 list_for_each_entry(obj, head, global_list) {
Chris Wilson3a448732014-08-12 20:05:47 +0100794 struct i915_vma *vma;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300795
Chris Wilson3a448732014-08-12 20:05:47 +0100796 if (err == last)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300797 break;
Chris Wilson3a448732014-08-12 20:05:47 +0100798
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000799 list_for_each_entry(vma, &obj->vma_list, obj_link)
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000800 if (vma->vm == vm && vma->pin_count > 0)
Chris Wilson3a448732014-08-12 20:05:47 +0100801 capture_bo(err++, vma);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300802 }
803
Chris Wilson3a448732014-08-12 20:05:47 +0100804 return err - first;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300805}
806
Ben Widawsky011cf572014-02-04 12:18:55 +0000807/* Generate a semi-unique error code. The code is not meant to have meaning, The
808 * code's only purpose is to try to prevent false duplicated bug reports by
809 * grossly estimating a GPU error state.
810 *
811 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
812 * the hang if we could strip the GTT offset information from it.
813 *
814 * It's only a small step better than a random number in its current form.
815 */
816static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
Mika Kuoppalacb383002014-02-25 17:11:25 +0200817 struct drm_i915_error_state *error,
818 int *ring_id)
Ben Widawsky011cf572014-02-04 12:18:55 +0000819{
820 uint32_t error_code = 0;
821 int i;
822
823 /* IPEHR would be an ideal way to detect errors, as it's the gross
824 * measure of "the command that hung." However, has some very common
825 * synchronization commands which almost always appear in the case
826 * strictly a client bug. Use instdone to differentiate those some.
827 */
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000828 for (i = 0; i < I915_NUM_ENGINES; i++) {
Mika Kuoppalacb383002014-02-25 17:11:25 +0200829 if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
830 if (ring_id)
831 *ring_id = i;
832
Ben Widawsky011cf572014-02-04 12:18:55 +0000833 return error->ring[i].ipehr ^ error->ring[i].instdone;
Mika Kuoppalacb383002014-02-25 17:11:25 +0200834 }
835 }
Ben Widawsky011cf572014-02-04 12:18:55 +0000836
837 return error_code;
838}
839
Chris Wilsonc0336662016-05-06 15:40:21 +0100840static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
Mika Kuoppala84734a02013-07-12 16:50:57 +0300841 struct drm_i915_error_state *error)
842{
Mika Kuoppala84734a02013-07-12 16:50:57 +0300843 int i;
844
Chris Wilsonc0336662016-05-06 15:40:21 +0100845 if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
Rodrigo Vivice38ab02014-12-04 06:48:10 -0800846 for (i = 0; i < dev_priv->num_fence_regs; i++)
Ville Syrjäläeecf6132015-09-21 18:05:14 +0300847 error->fence[i] = I915_READ(FENCE_REG(i));
Chris Wilsonc0336662016-05-06 15:40:21 +0100848 } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
Ville Syrjäläeecf6132015-09-21 18:05:14 +0300849 for (i = 0; i < dev_priv->num_fence_regs; i++)
850 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
Chris Wilsonc0336662016-05-06 15:40:21 +0100851 } else if (INTEL_GEN(dev_priv) >= 6) {
Ville Syrjäläeecf6132015-09-21 18:05:14 +0300852 for (i = 0; i < dev_priv->num_fence_regs; i++)
853 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
854 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300855}
856
Ben Widawsky87f85eb2014-06-30 09:53:40 -0700857
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700858static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
859 struct drm_i915_error_state *error,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000860 struct intel_engine_cs *engine,
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700861 struct drm_i915_error_ring *ering)
862{
Rodrigo Vivib4558b42014-07-18 02:19:40 -0700863 struct intel_engine_cs *to;
Dave Gordonc3232b12016-03-23 18:19:53 +0000864 enum intel_engine_id id;
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700865
Chris Wilsonc0336662016-05-06 15:40:21 +0100866 if (!i915_semaphore_is_enabled(dev_priv))
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700867 return;
868
869 if (!error->semaphore_obj)
870 error->semaphore_obj =
Daniel Vettercc1df8a2014-11-19 18:38:39 +0100871 i915_error_ggtt_object_create(dev_priv,
872 dev_priv->semaphore_obj);
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700873
Dave Gordonc3232b12016-03-23 18:19:53 +0000874 for_each_engine_id(to, dev_priv, id) {
Rodrigo Vivib4558b42014-07-18 02:19:40 -0700875 int idx;
876 u16 signal_offset;
877 u32 *tmp;
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700878
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000879 if (engine == to)
Rodrigo Vivib4558b42014-07-18 02:19:40 -0700880 continue;
881
Dave Gordonc3232b12016-03-23 18:19:53 +0000882 signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
Rodrigo Vivi864c6182014-08-01 04:51:30 -0700883 / 4;
Rodrigo Vivib4558b42014-07-18 02:19:40 -0700884 tmp = error->semaphore_obj->pages[0];
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000885 idx = intel_ring_sync_index(engine, to);
Rodrigo Vivib4558b42014-07-18 02:19:40 -0700886
887 ering->semaphore_mboxes[idx] = tmp[signal_offset];
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000888 ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700889 }
890}
891
Ben Widawsky87f85eb2014-06-30 09:53:40 -0700892static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000893 struct intel_engine_cs *engine,
Ben Widawsky87f85eb2014-06-30 09:53:40 -0700894 struct drm_i915_error_ring *ering)
895{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000896 ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
897 ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
898 ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
899 ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
Ben Widawsky87f85eb2014-06-30 09:53:40 -0700900
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +0300901 if (HAS_VEBOX(dev_priv)) {
Ben Widawsky87f85eb2014-06-30 09:53:40 -0700902 ering->semaphore_mboxes[2] =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000903 I915_READ(RING_SYNC_2(engine->mmio_base));
904 ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
Ben Widawsky87f85eb2014-06-30 09:53:40 -0700905 }
906}
907
Chris Wilson688e6c72016-07-01 17:23:15 +0100908static void engine_record_waiters(struct intel_engine_cs *engine,
909 struct drm_i915_error_ring *ering)
910{
911 struct intel_breadcrumbs *b = &engine->breadcrumbs;
912 struct drm_i915_error_waiter *waiter;
913 struct rb_node *rb;
914 int count;
915
916 ering->num_waiters = 0;
917 ering->waiters = NULL;
918
919 spin_lock(&b->lock);
920 count = 0;
921 for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
922 count++;
923 spin_unlock(&b->lock);
924
925 waiter = NULL;
926 if (count)
927 waiter = kmalloc_array(count,
928 sizeof(struct drm_i915_error_waiter),
929 GFP_ATOMIC);
930 if (!waiter)
931 return;
932
933 ering->waiters = waiter;
934
935 spin_lock(&b->lock);
936 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
937 struct intel_wait *w = container_of(rb, typeof(*w), node);
938
939 strcpy(waiter->comm, w->tsk->comm);
940 waiter->pid = w->tsk->pid;
941 waiter->seqno = w->seqno;
942 waiter++;
943
944 if (++ering->num_waiters == count)
945 break;
946 }
947 spin_unlock(&b->lock);
948}
949
Chris Wilsonc0336662016-05-06 15:40:21 +0100950static void i915_record_ring_state(struct drm_i915_private *dev_priv,
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700951 struct drm_i915_error_state *error,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000952 struct intel_engine_cs *engine,
Ben Widawsky362b8af2014-01-30 00:19:38 -0800953 struct drm_i915_error_ring *ering)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300954{
Chris Wilsonc0336662016-05-06 15:40:21 +0100955 if (INTEL_GEN(dev_priv) >= 6) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000956 ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
957 ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
Chris Wilsonc0336662016-05-06 15:40:21 +0100958 if (INTEL_GEN(dev_priv) >= 8)
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000959 gen8_record_semaphore_state(dev_priv, error, engine,
960 ering);
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700961 else
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000962 gen6_record_semaphore_state(dev_priv, engine, ering);
Ben Widawsky4e5aabf2013-08-12 16:53:04 -0700963 }
964
Chris Wilsonc0336662016-05-06 15:40:21 +0100965 if (INTEL_GEN(dev_priv) >= 4) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000966 ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
967 ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
968 ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
969 ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
970 ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
971 ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
Chris Wilsonc0336662016-05-06 15:40:21 +0100972 if (INTEL_GEN(dev_priv) >= 8) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000973 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
974 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
Ben Widawsky13ffadd2014-04-01 16:31:07 -0700975 }
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000976 ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
Mika Kuoppala84734a02013-07-12 16:50:57 +0300977 } else {
Ben Widawsky362b8af2014-01-30 00:19:38 -0800978 ering->faddr = I915_READ(DMA_FADD_I8XX);
979 ering->ipeir = I915_READ(IPEIR);
980 ering->ipehr = I915_READ(IPEHR);
Imre Deakbd93a502015-09-30 23:00:43 +0300981 ering->instdone = I915_READ(GEN2_INSTDONE);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300982 }
983
Chris Wilson688e6c72016-07-01 17:23:15 +0100984 ering->waiting = intel_engine_has_waiter(engine);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000985 ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000986 ering->acthd = intel_ring_get_active_head(engine);
Chris Wilson1b7744e2016-07-01 17:23:17 +0100987 ering->seqno = intel_engine_get_seqno(engine);
Chris Wilson14fd0d62016-04-07 07:29:10 +0100988 ering->last_seqno = engine->last_submitted_seqno;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000989 ering->start = I915_READ_START(engine);
990 ering->head = I915_READ_HEAD(engine);
991 ering->tail = I915_READ_TAIL(engine);
992 ering->ctl = I915_READ_CTL(engine);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300993
Chris Wilsonc0336662016-05-06 15:40:21 +0100994 if (I915_NEED_GFX_HWS(dev_priv)) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200995 i915_reg_t mmio;
Chris Wilsonf3ce3822014-01-23 22:40:36 +0000996
Chris Wilsonc0336662016-05-06 15:40:21 +0100997 if (IS_GEN7(dev_priv)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000998 switch (engine->id) {
Chris Wilsonf3ce3822014-01-23 22:40:36 +0000999 default:
1000 case RCS:
1001 mmio = RENDER_HWS_PGA_GEN7;
1002 break;
1003 case BCS:
1004 mmio = BLT_HWS_PGA_GEN7;
1005 break;
1006 case VCS:
1007 mmio = BSD_HWS_PGA_GEN7;
1008 break;
1009 case VECS:
1010 mmio = VEBOX_HWS_PGA_GEN7;
1011 break;
1012 }
Chris Wilsonc0336662016-05-06 15:40:21 +01001013 } else if (IS_GEN6(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001014 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
Chris Wilsonf3ce3822014-01-23 22:40:36 +00001015 } else {
1016 /* XXX: gen8 returns to sanity */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001017 mmio = RING_HWS_PGA(engine->mmio_base);
Chris Wilsonf3ce3822014-01-23 22:40:36 +00001018 }
1019
Ben Widawsky362b8af2014-01-30 00:19:38 -08001020 ering->hws = I915_READ(mmio);
Chris Wilsonf3ce3822014-01-23 22:40:36 +00001021 }
1022
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001023 ering->hangcheck_score = engine->hangcheck.score;
1024 ering->hangcheck_action = engine->hangcheck.action;
Ben Widawsky6c7a01e2014-01-30 00:19:40 -08001025
Chris Wilsonc0336662016-05-06 15:40:21 +01001026 if (USES_PPGTT(dev_priv)) {
Ben Widawsky6c7a01e2014-01-30 00:19:40 -08001027 int i;
1028
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001029 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
Ben Widawsky6c7a01e2014-01-30 00:19:40 -08001030
Chris Wilsonc0336662016-05-06 15:40:21 +01001031 if (IS_GEN6(dev_priv))
Rodrigo Vivi74745b02014-12-03 04:55:27 -08001032 ering->vm_info.pp_dir_base =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001033 I915_READ(RING_PP_DIR_BASE_READ(engine));
Chris Wilsonc0336662016-05-06 15:40:21 +01001034 else if (IS_GEN7(dev_priv))
Rodrigo Vivi74745b02014-12-03 04:55:27 -08001035 ering->vm_info.pp_dir_base =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001036 I915_READ(RING_PP_DIR_BASE(engine));
Chris Wilsonc0336662016-05-06 15:40:21 +01001037 else if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky6c7a01e2014-01-30 00:19:40 -08001038 for (i = 0; i < 4; i++) {
1039 ering->vm_info.pdp[i] =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001040 I915_READ(GEN8_RING_PDP_UDW(engine, i));
Ben Widawsky6c7a01e2014-01-30 00:19:40 -08001041 ering->vm_info.pdp[i] <<= 32;
1042 ering->vm_info.pdp[i] |=
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001043 I915_READ(GEN8_RING_PDP_LDW(engine, i));
Ben Widawsky6c7a01e2014-01-30 00:19:40 -08001044 }
Ben Widawsky6c7a01e2014-01-30 00:19:40 -08001045 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03001046}
1047
1048
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001049static void i915_gem_record_active_context(struct intel_engine_cs *engine,
Mika Kuoppala84734a02013-07-12 16:50:57 +03001050 struct drm_i915_error_state *error,
1051 struct drm_i915_error_ring *ering)
1052{
Chris Wilsonc0336662016-05-06 15:40:21 +01001053 struct drm_i915_private *dev_priv = engine->i915;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001054 struct drm_i915_gem_object *obj;
1055
1056 /* Currently render ring is the only HW context user */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001057 if (engine->id != RCS || !error->ccid)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001058 return;
1059
1060 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Ben Widawsky36362ad2014-07-01 11:17:41 -07001061 if (!i915_gem_obj_ggtt_bound(obj))
1062 continue;
1063
Mika Kuoppala84734a02013-07-12 16:50:57 +03001064 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
Ben Widawsky17d36742014-04-05 14:55:53 -07001065 ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001066 break;
1067 }
1068 }
1069}
1070
Chris Wilsonc0336662016-05-06 15:40:21 +01001071static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
Mika Kuoppala84734a02013-07-12 16:50:57 +03001072 struct drm_i915_error_state *error)
1073{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001074 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001075 struct drm_i915_gem_request *request;
1076 int i, count;
1077
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001078 for (i = 0; i < I915_NUM_ENGINES; i++) {
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001079 struct intel_engine_cs *engine = &dev_priv->engine[i];
Chris Wilson372fbb82014-01-27 13:52:34 +00001080
Chris Wilsoneee73b42014-06-10 12:09:29 +01001081 error->ring[i].pid = -1;
1082
Chris Wilsonc0336662016-05-06 15:40:21 +01001083 if (!intel_engine_initialized(engine))
Chris Wilson372fbb82014-01-27 13:52:34 +00001084 continue;
1085
1086 error->ring[i].valid = true;
1087
Chris Wilsonc0336662016-05-06 15:40:21 +01001088 i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
Chris Wilson688e6c72016-07-01 17:23:15 +01001089 engine_record_waiters(engine, &error->ring[i]);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001090
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001091 request = i915_gem_find_active_request(engine);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001092 if (request) {
Daniel Vetterae6c4802014-08-06 15:04:53 +02001093 struct i915_address_space *vm;
Chris Wilsonba6e0412016-07-04 08:08:38 +01001094 struct intel_ringbuffer *rb;
Daniel Vetterae6c4802014-08-06 15:04:53 +02001095
1096 vm = request->ctx && request->ctx->ppgtt ?
1097 &request->ctx->ppgtt->base :
Joonas Lahtinen72e96d62016-03-30 16:57:10 +03001098 &ggtt->base;
Daniel Vetterae6c4802014-08-06 15:04:53 +02001099
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001100 /* We need to copy these to an anonymous buffer
1101 * as the simplest method to avoid being overwritten
1102 * by userspace.
1103 */
1104 error->ring[i].batchbuffer =
1105 i915_error_object_create(dev_priv,
1106 request->batch_obj,
Daniel Vetterae6c4802014-08-06 15:04:53 +02001107 vm);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001108
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001109 if (HAS_BROKEN_CS_TLB(dev_priv))
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001110 error->ring[i].wa_batchbuffer =
1111 i915_error_ggtt_object_create(dev_priv,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001112 engine->scratch.obj);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001113
Mika Kuoppala071c92d2015-02-12 10:26:02 +02001114 if (request->pid) {
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001115 struct task_struct *task;
1116
1117 rcu_read_lock();
Mika Kuoppala071c92d2015-02-12 10:26:02 +02001118 task = pid_task(request->pid, PIDTYPE_PID);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001119 if (task) {
1120 strcpy(error->ring[i].comm, task->comm);
1121 error->ring[i].pid = task->pid;
1122 }
1123 rcu_read_unlock();
1124 }
Chris Wilsonba6e0412016-07-04 08:08:38 +01001125
1126 rb = request->ringbuf;
1127 error->ring[i].cpu_ring_head = rb->head;
1128 error->ring[i].cpu_ring_tail = rb->tail;
1129 error->ring[i].ringbuffer =
1130 i915_error_ggtt_object_create(dev_priv,
1131 rb->obj);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001132 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03001133
Chris Wilson8ae62dc2014-08-12 20:05:49 +01001134 error->ring[i].hws_page =
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001135 i915_error_ggtt_object_create(dev_priv,
1136 engine->status_page.obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001137
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001138 if (engine->wa_ctx.obj) {
arun.siluvery@linux.intel.comf85db052016-03-01 11:24:36 +00001139 error->ring[i].wa_ctx =
1140 i915_error_ggtt_object_create(dev_priv,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001141 engine->wa_ctx.obj);
arun.siluvery@linux.intel.comf85db052016-03-01 11:24:36 +00001142 }
1143
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001144 i915_gem_record_active_context(engine, error, &error->ring[i]);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001145
1146 count = 0;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001147 list_for_each_entry(request, &engine->request_list, list)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001148 count++;
1149
1150 error->ring[i].num_requests = count;
1151 error->ring[i].requests =
Daniel Vettera1e22652013-09-21 00:35:38 +02001152 kcalloc(count, sizeof(*error->ring[i].requests),
Mika Kuoppala84734a02013-07-12 16:50:57 +03001153 GFP_ATOMIC);
1154 if (error->ring[i].requests == NULL) {
1155 error->ring[i].num_requests = 0;
1156 continue;
1157 }
1158
1159 count = 0;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001160 list_for_each_entry(request, &engine->request_list, list) {
Mika Kuoppala84734a02013-07-12 16:50:57 +03001161 struct drm_i915_error_request *erq;
1162
Tomas Elf9c8e1bd2015-10-19 17:51:57 +01001163 if (count >= error->ring[i].num_requests) {
1164 /*
1165 * If the ring request list was changed in
1166 * between the point where the error request
1167 * list was created and dimensioned and this
1168 * point then just exit early to avoid crashes.
1169 *
1170 * We don't need to communicate that the
1171 * request list changed state during error
1172 * state capture and that the error state is
1173 * slightly incorrect as a consequence since we
1174 * are typically only interested in the request
1175 * list state at the point of error state
1176 * capture, not in any changes happening during
1177 * the capture.
1178 */
1179 break;
1180 }
1181
Mika Kuoppala84734a02013-07-12 16:50:57 +03001182 erq = &error->ring[i].requests[count++];
1183 erq->seqno = request->seqno;
1184 erq->jiffies = request->emitted_jiffies;
Nick Hoath72f95af2015-01-15 13:10:37 +00001185 erq->tail = request->postfix;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001186 }
1187 }
1188}
1189
Ben Widawsky95f53012013-07-31 17:00:15 -07001190/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
1191 * VM.
1192 */
1193static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1194 struct drm_i915_error_state *error,
1195 struct i915_address_space *vm,
1196 const int ndx)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001197{
Ben Widawsky95f53012013-07-31 17:00:15 -07001198 struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001199 struct drm_i915_gem_object *obj;
Ben Widawsky95f53012013-07-31 17:00:15 -07001200 struct i915_vma *vma;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001201 int i;
1202
1203 i = 0;
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00001204 list_for_each_entry(vma, &vm->active_list, vm_link)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001205 i++;
Ben Widawsky95f53012013-07-31 17:00:15 -07001206 error->active_bo_count[ndx] = i;
Chris Wilson3a448732014-08-12 20:05:47 +01001207
1208 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Chris Wilson1c7f4bc2016-02-26 11:03:19 +00001209 list_for_each_entry(vma, &obj->vma_list, obj_link)
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00001210 if (vma->vm == vm && vma->pin_count > 0)
Chris Wilson3a448732014-08-12 20:05:47 +01001211 i++;
Chris Wilson3a448732014-08-12 20:05:47 +01001212 }
Ben Widawsky95f53012013-07-31 17:00:15 -07001213 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
Mika Kuoppala84734a02013-07-12 16:50:57 +03001214
1215 if (i) {
Daniel Vettera1e22652013-09-21 00:35:38 +02001216 active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
Ben Widawsky95f53012013-07-31 17:00:15 -07001217 if (active_bo)
1218 pinned_bo = active_bo + error->active_bo_count[ndx];
Mika Kuoppala84734a02013-07-12 16:50:57 +03001219 }
1220
Ben Widawsky95f53012013-07-31 17:00:15 -07001221 if (active_bo)
1222 error->active_bo_count[ndx] =
1223 capture_active_bo(active_bo,
1224 error->active_bo_count[ndx],
Ben Widawsky5cef07e2013-07-16 16:50:08 -07001225 &vm->active_list);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001226
Ben Widawsky95f53012013-07-31 17:00:15 -07001227 if (pinned_bo)
1228 error->pinned_bo_count[ndx] =
1229 capture_pinned_bo(pinned_bo,
1230 error->pinned_bo_count[ndx],
Chris Wilson3a448732014-08-12 20:05:47 +01001231 &dev_priv->mm.bound_list, vm);
Ben Widawsky95f53012013-07-31 17:00:15 -07001232 error->active_bo[ndx] = active_bo;
1233 error->pinned_bo[ndx] = pinned_bo;
1234}
1235
1236static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
1237 struct drm_i915_error_state *error)
1238{
1239 struct i915_address_space *vm;
1240 int cnt = 0, i = 0;
1241
1242 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
1243 cnt++;
1244
Ben Widawsky95f53012013-07-31 17:00:15 -07001245 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
1246 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
1247 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
1248 GFP_ATOMIC);
1249 error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
1250 GFP_ATOMIC);
1251
Chris Wilson3a448732014-08-12 20:05:47 +01001252 if (error->active_bo == NULL ||
1253 error->pinned_bo == NULL ||
1254 error->active_bo_count == NULL ||
1255 error->pinned_bo_count == NULL) {
1256 kfree(error->active_bo);
1257 kfree(error->active_bo_count);
1258 kfree(error->pinned_bo);
1259 kfree(error->pinned_bo_count);
1260
1261 error->active_bo = NULL;
1262 error->active_bo_count = NULL;
1263 error->pinned_bo = NULL;
1264 error->pinned_bo_count = NULL;
1265 } else {
1266 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
1267 i915_gem_capture_vm(dev_priv, error, vm, i++);
1268
1269 error->vm_count = cnt;
1270 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03001271}
1272
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001273/* Capture all registers which don't fit into another category. */
1274static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1275 struct drm_i915_error_state *error)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001276{
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001277 struct drm_device *dev = dev_priv->dev;
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -07001278 int i;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001279
Ben Widawsky654c90c2014-01-30 00:19:36 -08001280 /* General organization
1281 * 1. Registers specific to a single generation
1282 * 2. Registers which belong to multiple generations
1283 * 3. Feature specific registers.
1284 * 4. Everything else
1285 * Please try to follow the order.
1286 */
1287
1288 /* 1: Registers specific to a single generation */
1289 if (IS_VALLEYVIEW(dev)) {
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -07001290 error->gtier[0] = I915_READ(GTIER);
Rodrigo Vivi843db712014-08-01 09:12:27 -07001291 error->ier = I915_READ(VLV_IER);
Ville Syrjälä40181692015-10-22 15:34:57 +03001292 error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
Ben Widawsky654c90c2014-01-30 00:19:36 -08001293 }
1294
1295 if (IS_GEN7(dev))
1296 error->err_int = I915_READ(GEN7_ERR_INT);
1297
Mika Kuoppala6c826f32015-03-24 14:54:19 +02001298 if (INTEL_INFO(dev)->gen >= 8) {
1299 error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1300 error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1301 }
1302
Ben Widawsky91ec5d12014-01-30 00:19:39 -08001303 if (IS_GEN6(dev)) {
Ville Syrjälä40181692015-10-22 15:34:57 +03001304 error->forcewake = I915_READ_FW(FORCEWAKE);
Ben Widawsky91ec5d12014-01-30 00:19:39 -08001305 error->gab_ctl = I915_READ(GAB_CTL);
1306 error->gfx_mode = I915_READ(GFX_MODE);
1307 }
Ben Widawsky654c90c2014-01-30 00:19:36 -08001308
Ben Widawsky654c90c2014-01-30 00:19:36 -08001309 /* 2: Registers which belong to multiple generations */
1310 if (INTEL_INFO(dev)->gen >= 7)
Ville Syrjälä40181692015-10-22 15:34:57 +03001311 error->forcewake = I915_READ_FW(FORCEWAKE_MT);
Ben Widawsky654c90c2014-01-30 00:19:36 -08001312
1313 if (INTEL_INFO(dev)->gen >= 6) {
1314 error->derrmr = I915_READ(DERRMR);
1315 error->error = I915_READ(ERROR_GEN6);
1316 error->done_reg = I915_READ(DONE_REG);
1317 }
1318
1319 /* 3: Feature specific registers */
Ben Widawsky91ec5d12014-01-30 00:19:39 -08001320 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1321 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1322 error->gac_eco = I915_READ(GAC_ECO_BITS);
1323 }
1324
1325 /* 4: Everything else */
Mika Kuoppala84734a02013-07-12 16:50:57 +03001326 if (HAS_HW_CONTEXTS(dev))
1327 error->ccid = I915_READ(CCID);
1328
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -07001329 if (INTEL_INFO(dev)->gen >= 8) {
1330 error->ier = I915_READ(GEN8_DE_MISC_IER);
1331 for (i = 0; i < 4; i++)
1332 error->gtier[i] = I915_READ(GEN8_GT_IER(i));
1333 } else if (HAS_PCH_SPLIT(dev)) {
Rodrigo Vivi843db712014-08-01 09:12:27 -07001334 error->ier = I915_READ(DEIER);
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -07001335 error->gtier[0] = I915_READ(GTIER);
Rodrigo Vivi843db712014-08-01 09:12:27 -07001336 } else if (IS_GEN2(dev)) {
1337 error->ier = I915_READ16(IER);
1338 } else if (!IS_VALLEYVIEW(dev)) {
1339 error->ier = I915_READ(IER);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001340 }
Ben Widawsky654c90c2014-01-30 00:19:36 -08001341 error->eir = I915_READ(EIR);
1342 error->pgtbl_er = I915_READ(PGTBL_ER);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001343
Chris Wilsonc0336662016-05-06 15:40:21 +01001344 i915_get_extra_instdone(dev_priv, error->extra_instdone);
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001345}
Mika Kuoppala84734a02013-07-12 16:50:57 +03001346
Chris Wilsonc0336662016-05-06 15:40:21 +01001347static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
Mika Kuoppala58174462014-02-25 17:11:26 +02001348 struct drm_i915_error_state *error,
arun.siluvery@linux.intel.com14b730f2016-03-18 20:07:55 +00001349 u32 engine_mask,
Mika Kuoppala58174462014-02-25 17:11:26 +02001350 const char *error_msg)
Mika Kuoppalacb383002014-02-25 17:11:25 +02001351{
Mika Kuoppalacb383002014-02-25 17:11:25 +02001352 u32 ecode;
Mika Kuoppala58174462014-02-25 17:11:26 +02001353 int ring_id = -1, len;
Mika Kuoppalacb383002014-02-25 17:11:25 +02001354
1355 ecode = i915_error_generate_code(dev_priv, error, &ring_id);
1356
Mika Kuoppala58174462014-02-25 17:11:26 +02001357 len = scnprintf(error->error_msg, sizeof(error->error_msg),
Mika Kuoppala0b5492d2014-11-06 13:03:46 +02001358 "GPU HANG: ecode %d:%d:0x%08x",
Chris Wilsonc0336662016-05-06 15:40:21 +01001359 INTEL_GEN(dev_priv), ring_id, ecode);
Mika Kuoppala58174462014-02-25 17:11:26 +02001360
1361 if (ring_id != -1 && error->ring[ring_id].pid != -1)
1362 len += scnprintf(error->error_msg + len,
1363 sizeof(error->error_msg) - len,
1364 ", in %s [%d]",
1365 error->ring[ring_id].comm,
1366 error->ring[ring_id].pid);
1367
1368 scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
1369 ", reason: %s, action: %s",
1370 error_msg,
arun.siluvery@linux.intel.com14b730f2016-03-18 20:07:55 +00001371 engine_mask ? "reset" : "continue");
Mika Kuoppalacb383002014-02-25 17:11:25 +02001372}
1373
Mika Kuoppala48b031e2014-02-25 17:11:27 +02001374static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1375 struct drm_i915_error_state *error)
1376{
Chris Wilsoneb5be9d2015-08-07 20:24:15 +01001377 error->iommu = -1;
1378#ifdef CONFIG_INTEL_IOMMU
1379 error->iommu = intel_iommu_gfx_mapped;
1380#endif
Mika Kuoppala48b031e2014-02-25 17:11:27 +02001381 error->reset_count = i915_reset_count(&dev_priv->gpu_error);
Mika Kuoppala62d5d692014-02-25 17:11:28 +02001382 error->suspend_count = dev_priv->suspend_count;
Mika Kuoppala48b031e2014-02-25 17:11:27 +02001383}
1384
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001385/**
1386 * i915_capture_error_state - capture an error record for later analysis
1387 * @dev: drm device
1388 *
1389 * Should be called when an error is detected (either a hang or an error
1390 * interrupt) to capture error state from the time of the error. Fills
1391 * out a structure which becomes available in debugfs for user level tools
1392 * to pick up.
1393 */
Chris Wilsonc0336662016-05-06 15:40:21 +01001394void i915_capture_error_state(struct drm_i915_private *dev_priv,
1395 u32 engine_mask,
Mika Kuoppala58174462014-02-25 17:11:26 +02001396 const char *error_msg)
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001397{
Chris Wilson53a4c6b2014-01-30 14:38:15 +00001398 static bool warned;
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001399 struct drm_i915_error_state *error;
1400 unsigned long flags;
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001401
1402 /* Account for pipe specific data like PIPE*STAT */
1403 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1404 if (!error) {
1405 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1406 return;
1407 }
1408
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001409 kref_init(&error->ref);
1410
Mika Kuoppala48b031e2014-02-25 17:11:27 +02001411 i915_capture_gen_state(dev_priv, error);
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001412 i915_capture_reg_state(dev_priv, error);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001413 i915_gem_capture_buffers(dev_priv, error);
Chris Wilsonc0336662016-05-06 15:40:21 +01001414 i915_gem_record_fences(dev_priv, error);
1415 i915_gem_record_rings(dev_priv, error);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001416
1417 do_gettimeofday(&error->time);
1418
Chris Wilsonc0336662016-05-06 15:40:21 +01001419 error->overlay = intel_overlay_capture_error_state(dev_priv);
1420 error->display = intel_display_capture_error_state(dev_priv);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001421
Chris Wilsonc0336662016-05-06 15:40:21 +01001422 i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
Mika Kuoppalacb383002014-02-25 17:11:25 +02001423 DRM_INFO("%s\n", error->error_msg);
1424
Mika Kuoppala84734a02013-07-12 16:50:57 +03001425 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1426 if (dev_priv->gpu_error.first_error == NULL) {
1427 dev_priv->gpu_error.first_error = error;
1428 error = NULL;
1429 }
1430 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1431
Mika Kuoppalacb383002014-02-25 17:11:25 +02001432 if (error) {
Mika Kuoppala84734a02013-07-12 16:50:57 +03001433 i915_error_state_free(&error->ref);
Mika Kuoppalacb383002014-02-25 17:11:25 +02001434 return;
1435 }
1436
1437 if (!warned) {
1438 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1439 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1440 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1441 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
Chris Wilsonc0336662016-05-06 15:40:21 +01001442 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
Mika Kuoppalacb383002014-02-25 17:11:25 +02001443 warned = true;
1444 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03001445}
1446
1447void i915_error_state_get(struct drm_device *dev,
1448 struct i915_error_state_file_priv *error_priv)
1449{
1450 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001451
Daniel Vetter5b254c52014-09-15 14:55:24 +02001452 spin_lock_irq(&dev_priv->gpu_error.lock);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001453 error_priv->error = dev_priv->gpu_error.first_error;
1454 if (error_priv->error)
1455 kref_get(&error_priv->error->ref);
Daniel Vetter5b254c52014-09-15 14:55:24 +02001456 spin_unlock_irq(&dev_priv->gpu_error.lock);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001457
1458}
1459
1460void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
1461{
1462 if (error_priv->error)
1463 kref_put(&error_priv->error->ref, i915_error_state_free);
1464}
1465
1466void i915_destroy_error_state(struct drm_device *dev)
1467{
1468 struct drm_i915_private *dev_priv = dev->dev_private;
1469 struct drm_i915_error_state *error;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001470
Daniel Vetter5b254c52014-09-15 14:55:24 +02001471 spin_lock_irq(&dev_priv->gpu_error.lock);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001472 error = dev_priv->gpu_error.first_error;
1473 dev_priv->gpu_error.first_error = NULL;
Daniel Vetter5b254c52014-09-15 14:55:24 +02001474 spin_unlock_irq(&dev_priv->gpu_error.lock);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001475
1476 if (error)
1477 kref_put(&error->ref, i915_error_state_free);
1478}
1479
Chris Wilson0a4cd7c2014-08-22 14:41:39 +01001480const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001481{
1482 switch (type) {
1483 case I915_CACHE_NONE: return " uncached";
Chris Wilson0a4cd7c2014-08-22 14:41:39 +01001484 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
Chris Wilson350ec882013-08-06 13:17:02 +01001485 case I915_CACHE_L3_LLC: return " L3+LLC";
Chris Wilsonf56383c2013-09-25 10:23:19 +01001486 case I915_CACHE_WT: return " WT";
Mika Kuoppala84734a02013-07-12 16:50:57 +03001487 default: return "";
1488 }
1489}
1490
1491/* NB: please notice the memset */
Chris Wilsonc0336662016-05-06 15:40:21 +01001492void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
1493 uint32_t *instdone)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001494{
Mika Kuoppala84734a02013-07-12 16:50:57 +03001495 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1496
Chris Wilsonc0336662016-05-06 15:40:21 +01001497 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
Imre Deakbd93a502015-09-30 23:00:43 +03001498 instdone[0] = I915_READ(GEN2_INSTDONE);
Chris Wilsonc0336662016-05-06 15:40:21 +01001499 else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
Imre Deakf1d54342015-09-30 23:00:42 +03001500 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
Imre Deak13d70b82015-09-30 23:00:44 +03001501 instdone[1] = I915_READ(GEN4_INSTDONE1);
Chris Wilsonc0336662016-05-06 15:40:21 +01001502 } else if (INTEL_GEN(dev_priv) >= 7) {
Imre Deakf1d54342015-09-30 23:00:42 +03001503 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
Mika Kuoppala84734a02013-07-12 16:50:57 +03001504 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1505 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1506 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001507 }
1508}