blob: 06ca4082735bc84c4ff5073bf60fb303096fce08 [file] [log] [blame]
Mika Kuoppala84734a02013-07-12 16:50:57 +03001/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <generated/utsrelease.h>
31#include "i915_drv.h"
32
Mika Kuoppala84734a02013-07-12 16:50:57 +030033static const char *ring_str(int ring)
34{
35 switch (ring) {
36 case RCS: return "render";
37 case VCS: return "bsd";
38 case BCS: return "blt";
39 case VECS: return "vebox";
Zhao Yakui845f74a2014-04-17 10:37:37 +080040 case VCS2: return "bsd2";
Mika Kuoppala84734a02013-07-12 16:50:57 +030041 default: return "";
42 }
43}
44
45static const char *pin_flag(int pinned)
46{
47 if (pinned > 0)
48 return " P";
49 else if (pinned < 0)
50 return " p";
51 else
52 return "";
53}
54
55static const char *tiling_flag(int tiling)
56{
57 switch (tiling) {
58 default:
59 case I915_TILING_NONE: return "";
60 case I915_TILING_X: return " X";
61 case I915_TILING_Y: return " Y";
62 }
63}
64
65static const char *dirty_flag(int dirty)
66{
67 return dirty ? " dirty" : "";
68}
69
70static const char *purgeable_flag(int purgeable)
71{
72 return purgeable ? " purgeable" : "";
73}
74
75static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
76{
77
78 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
79 e->err = -ENOSPC;
80 return false;
81 }
82
83 if (e->bytes == e->size - 1 || e->err)
84 return false;
85
86 return true;
87}
88
89static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
90 unsigned len)
91{
92 if (e->pos + len <= e->start) {
93 e->pos += len;
94 return false;
95 }
96
97 /* First vsnprintf needs to fit in its entirety for memmove */
98 if (len >= e->size) {
99 e->err = -EIO;
100 return false;
101 }
102
103 return true;
104}
105
106static void __i915_error_advance(struct drm_i915_error_state_buf *e,
107 unsigned len)
108{
109 /* If this is first printf in this window, adjust it so that
110 * start position matches start of the buffer
111 */
112
113 if (e->pos < e->start) {
114 const size_t off = e->start - e->pos;
115
116 /* Should not happen but be paranoid */
117 if (off > len || e->bytes) {
118 e->err = -EIO;
119 return;
120 }
121
122 memmove(e->buf, e->buf + off, len - off);
123 e->bytes = len - off;
124 e->pos = e->start;
125 return;
126 }
127
128 e->bytes += len;
129 e->pos += len;
130}
131
132static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
133 const char *f, va_list args)
134{
135 unsigned len;
136
137 if (!__i915_error_ok(e))
138 return;
139
140 /* Seek the first printf which is hits start position */
141 if (e->pos < e->start) {
Chris Wilsone29bb4e2013-09-20 10:20:59 +0100142 va_list tmp;
143
144 va_copy(tmp, args);
Mika Kuoppala1d2cb9a2014-02-07 17:40:50 +0200145 len = vsnprintf(NULL, 0, f, tmp);
146 va_end(tmp);
147
148 if (!__i915_error_seek(e, len))
Mika Kuoppala84734a02013-07-12 16:50:57 +0300149 return;
150 }
151
152 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
153 if (len >= e->size - e->bytes)
154 len = e->size - e->bytes - 1;
155
156 __i915_error_advance(e, len);
157}
158
159static void i915_error_puts(struct drm_i915_error_state_buf *e,
160 const char *str)
161{
162 unsigned len;
163
164 if (!__i915_error_ok(e))
165 return;
166
167 len = strlen(str);
168
169 /* Seek the first printf which is hits start position */
170 if (e->pos < e->start) {
171 if (!__i915_error_seek(e, len))
172 return;
173 }
174
175 if (len >= e->size - e->bytes)
176 len = e->size - e->bytes - 1;
177 memcpy(e->buf + e->bytes, str, len);
178
179 __i915_error_advance(e, len);
180}
181
182#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
183#define err_puts(e, s) i915_error_puts(e, s)
184
185static void print_error_buffers(struct drm_i915_error_state_buf *m,
186 const char *name,
187 struct drm_i915_error_buffer *err,
188 int count)
189{
Chris Wilsonb4716182015-04-27 13:41:17 +0100190 int i;
191
Chris Wilson3a448732014-08-12 20:05:47 +0100192 err_printf(m, " %s [%d]:\n", name, count);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300193
194 while (count--) {
Michel Thierrye1f12322015-07-29 17:23:56 +0100195 err_printf(m, " %08x_%08x %8u %02x %02x [ ",
196 upper_32_bits(err->gtt_offset),
197 lower_32_bits(err->gtt_offset),
Mika Kuoppala84734a02013-07-12 16:50:57 +0300198 err->size,
199 err->read_domains,
Chris Wilsonb4716182015-04-27 13:41:17 +0100200 err->write_domain);
201 for (i = 0; i < I915_NUM_RINGS; i++)
202 err_printf(m, "%02x ", err->rseqno[i]);
203
204 err_printf(m, "] %02x", err->wseqno);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300205 err_puts(m, pin_flag(err->pinned));
206 err_puts(m, tiling_flag(err->tiling));
207 err_puts(m, dirty_flag(err->dirty));
208 err_puts(m, purgeable_flag(err->purgeable));
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100209 err_puts(m, err->userptr ? " userptr" : "");
Mika Kuoppala84734a02013-07-12 16:50:57 +0300210 err_puts(m, err->ring != -1 ? " " : "");
211 err_puts(m, ring_str(err->ring));
Chris Wilson0a4cd7c2014-08-22 14:41:39 +0100212 err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
Mika Kuoppala84734a02013-07-12 16:50:57 +0300213
214 if (err->name)
215 err_printf(m, " (name: %d)", err->name);
216 if (err->fence_reg != I915_FENCE_REG_NONE)
217 err_printf(m, " (fence: %d)", err->fence_reg);
218
219 err_puts(m, "\n");
220 err++;
221 }
222}
223
Mika Kuoppalada661462013-09-06 16:03:28 +0300224static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
225{
226 switch (a) {
227 case HANGCHECK_IDLE:
228 return "idle";
229 case HANGCHECK_WAIT:
230 return "wait";
231 case HANGCHECK_ACTIVE:
232 return "active";
Mika Kuoppalaf260fe72014-08-05 17:16:26 +0300233 case HANGCHECK_ACTIVE_LOOP:
234 return "active (loop)";
Mika Kuoppalada661462013-09-06 16:03:28 +0300235 case HANGCHECK_KICK:
236 return "kick";
237 case HANGCHECK_HUNG:
238 return "hung";
239 }
240
241 return "unknown";
242}
243
Mika Kuoppala84734a02013-07-12 16:50:57 +0300244static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
245 struct drm_device *dev,
Daniel Vetter77c1aa82014-11-18 13:27:07 +0100246 struct drm_i915_error_state *error,
247 int ring_idx)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300248{
Daniel Vetter77c1aa82014-11-18 13:27:07 +0100249 struct drm_i915_error_ring *ring = &error->ring[ring_idx];
250
Ben Widawsky362b8af2014-01-30 00:19:38 -0800251 if (!ring->valid)
Chris Wilson372fbb82014-01-27 13:52:34 +0000252 return;
253
Daniel Vetter77c1aa82014-11-18 13:27:07 +0100254 err_printf(m, "%s command stream:\n", ring_str(ring_idx));
Chris Wilson94f8cf12015-04-07 16:20:47 +0100255 err_printf(m, " START: 0x%08x\n", ring->start);
256 err_printf(m, " HEAD: 0x%08x\n", ring->head);
257 err_printf(m, " TAIL: 0x%08x\n", ring->tail);
258 err_printf(m, " CTL: 0x%08x\n", ring->ctl);
259 err_printf(m, " HWS: 0x%08x\n", ring->hws);
Chris Wilsone3243d12014-03-21 12:05:47 +0000260 err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
Ben Widawsky362b8af2014-01-30 00:19:38 -0800261 err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
262 err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
263 err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
Ville Syrjälä3dda20a2013-12-10 21:44:43 +0200264 if (INTEL_INFO(dev)->gen >= 4) {
Chris Wilsone3243d12014-03-21 12:05:47 +0000265 err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
Ben Widawsky362b8af2014-01-30 00:19:38 -0800266 err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
267 err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
Ville Syrjälä3dda20a2013-12-10 21:44:43 +0200268 }
Ben Widawsky362b8af2014-01-30 00:19:38 -0800269 err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
Ben Widawsky13ffadd2014-04-01 16:31:07 -0700270 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
271 lower_32_bits(ring->faddr));
Mika Kuoppala84734a02013-07-12 16:50:57 +0300272 if (INTEL_INFO(dev)->gen >= 6) {
Ben Widawsky362b8af2014-01-30 00:19:38 -0800273 err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
274 err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300275 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
Ben Widawsky362b8af2014-01-30 00:19:38 -0800276 ring->semaphore_mboxes[0],
277 ring->semaphore_seqno[0]);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300278 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
Ben Widawsky362b8af2014-01-30 00:19:38 -0800279 ring->semaphore_mboxes[1],
280 ring->semaphore_seqno[1]);
Ben Widawsky4e5aabf2013-08-12 16:53:04 -0700281 if (HAS_VEBOX(dev)) {
282 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
Ben Widawsky362b8af2014-01-30 00:19:38 -0800283 ring->semaphore_mboxes[2],
284 ring->semaphore_seqno[2]);
Ben Widawsky4e5aabf2013-08-12 16:53:04 -0700285 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300286 }
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800287 if (USES_PPGTT(dev)) {
288 err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
289
290 if (INTEL_INFO(dev)->gen >= 8) {
291 int i;
292 for (i = 0; i < 4; i++)
293 err_printf(m, " PDP%d: 0x%016llx\n",
294 i, ring->vm_info.pdp[i]);
295 } else {
296 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
297 ring->vm_info.pp_dir_base);
298 }
299 }
Ben Widawsky362b8af2014-01-30 00:19:38 -0800300 err_printf(m, " seqno: 0x%08x\n", ring->seqno);
301 err_printf(m, " waiting: %s\n", yesno(ring->waiting));
302 err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
303 err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
Mika Kuoppalada661462013-09-06 16:03:28 +0300304 err_printf(m, " hangcheck: %s [%d]\n",
Ben Widawsky362b8af2014-01-30 00:19:38 -0800305 hangcheck_action_to_str(ring->hangcheck_action),
306 ring->hangcheck_score);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300307}
308
309void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
310{
311 va_list args;
312
313 va_start(args, f);
314 i915_error_vprintf(e, f, args);
315 va_end(args);
316}
317
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200318static void print_error_obj(struct drm_i915_error_state_buf *m,
319 struct drm_i915_error_object *obj)
320{
321 int page, offset, elt;
322
323 for (page = offset = 0; page < obj->page_count; page++) {
324 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
325 err_printf(m, "%08x : %08x\n", offset,
326 obj->pages[page][elt]);
327 offset += 4;
328 }
329 }
330}
331
Mika Kuoppala84734a02013-07-12 16:50:57 +0300332int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
333 const struct i915_error_state_file_priv *error_priv)
334{
335 struct drm_device *dev = error_priv->dev;
Jani Nikula50227e12014-03-31 14:27:21 +0300336 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300337 struct drm_i915_error_state *error = error_priv->error;
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700338 struct drm_i915_error_object *obj;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200339 int i, j, offset, elt;
340 int max_hangcheck_score;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300341
342 if (!error) {
343 err_printf(m, "no error state collected\n");
344 goto out;
345 }
346
Mika Kuoppalacb383002014-02-25 17:11:25 +0200347 err_printf(m, "%s\n", error->error_msg);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300348 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
349 error->time.tv_usec);
350 err_printf(m, "Kernel: " UTS_RELEASE "\n");
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200351 max_hangcheck_score = 0;
352 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
353 if (error->ring[i].hangcheck_score > max_hangcheck_score)
354 max_hangcheck_score = error->ring[i].hangcheck_score;
355 }
356 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
357 if (error->ring[i].hangcheck_score == max_hangcheck_score &&
358 error->ring[i].pid != -1) {
359 err_printf(m, "Active process (on ring %s): %s [%d]\n",
360 ring_str(i),
361 error->ring[i].comm,
362 error->ring[i].pid);
363 }
364 }
Mika Kuoppala48b031e2014-02-25 17:11:27 +0200365 err_printf(m, "Reset count: %u\n", error->reset_count);
Mika Kuoppala62d5d692014-02-25 17:11:28 +0200366 err_printf(m, "Suspend count: %u\n", error->suspend_count);
Ville Syrjäläffbab09b2013-10-04 14:53:40 +0300367 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
Chris Wilsoneb5be9d2015-08-07 20:24:15 +0100368 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
Mika Kuoppala0ac76552015-10-29 15:21:19 +0200369
370 if (HAS_CSR(dev)) {
371 struct intel_csr *csr = &dev_priv->csr;
372
373 err_printf(m, "DMC loaded: %s\n",
374 yesno(csr->dmc_payload != NULL));
375 err_printf(m, "DMC fw version: %d.%d\n",
376 CSR_VERSION_MAJOR(csr->version),
377 CSR_VERSION_MINOR(csr->version));
378 }
379
Mika Kuoppala84734a02013-07-12 16:50:57 +0300380 err_printf(m, "EIR: 0x%08x\n", error->eir);
381 err_printf(m, "IER: 0x%08x\n", error->ier);
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -0700382 if (INTEL_INFO(dev)->gen >= 8) {
383 for (i = 0; i < 4; i++)
384 err_printf(m, "GTIER gt %d: 0x%08x\n", i,
385 error->gtier[i]);
386 } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
387 err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300388 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
389 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
390 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
391 err_printf(m, "CCID: 0x%08x\n", error->ccid);
Chris Wilson094f9a52013-09-25 17:34:55 +0100392 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300393
394 for (i = 0; i < dev_priv->num_fence_regs; i++)
395 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
396
397 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
398 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
399 error->extra_instdone[i]);
400
401 if (INTEL_INFO(dev)->gen >= 6) {
402 err_printf(m, "ERROR: 0x%08x\n", error->error);
Mika Kuoppala6c826f32015-03-24 14:54:19 +0200403
404 if (INTEL_INFO(dev)->gen >= 8)
405 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
406 error->fault_data1, error->fault_data0);
407
Mika Kuoppala84734a02013-07-12 16:50:57 +0300408 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
409 }
410
411 if (INTEL_INFO(dev)->gen == 7)
412 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
413
Daniel Vetter77c1aa82014-11-18 13:27:07 +0100414 for (i = 0; i < ARRAY_SIZE(error->ring); i++)
415 i915_ring_error_state(m, dev, error, i);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300416
Chris Wilson3a448732014-08-12 20:05:47 +0100417 for (i = 0; i < error->vm_count; i++) {
418 err_printf(m, "vm[%d]\n", i);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300419
Chris Wilson3a448732014-08-12 20:05:47 +0100420 print_error_buffers(m, "Active",
421 error->active_bo[i],
422 error->active_bo_count[i]);
423
Mika Kuoppala84734a02013-07-12 16:50:57 +0300424 print_error_buffers(m, "Pinned",
Chris Wilson3a448732014-08-12 20:05:47 +0100425 error->pinned_bo[i],
426 error->pinned_bo_count[i]);
427 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300428
429 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200430 obj = error->ring[i].batchbuffer;
431 if (obj) {
432 err_puts(m, dev_priv->ring[i].name);
433 if (error->ring[i].pid != -1)
434 err_printf(m, " (submitted by %s [%d])",
435 error->ring[i].comm,
436 error->ring[i].pid);
Michel Thierrye1f12322015-07-29 17:23:56 +0100437 err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
438 upper_32_bits(obj->gtt_offset),
439 lower_32_bits(obj->gtt_offset));
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200440 print_error_obj(m, obj);
441 }
442
443 obj = error->ring[i].wa_batchbuffer;
444 if (obj) {
445 err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
Michel Thierrye1f12322015-07-29 17:23:56 +0100446 dev_priv->ring[i].name,
447 lower_32_bits(obj->gtt_offset));
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200448 print_error_obj(m, obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300449 }
450
451 if (error->ring[i].num_requests) {
452 err_printf(m, "%s --- %d requests\n",
453 dev_priv->ring[i].name,
454 error->ring[i].num_requests);
455 for (j = 0; j < error->ring[i].num_requests; j++) {
456 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
457 error->ring[i].requests[j].seqno,
458 error->ring[i].requests[j].jiffies,
459 error->ring[i].requests[j].tail);
460 }
461 }
462
463 if ((obj = error->ring[i].ringbuffer)) {
464 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
465 dev_priv->ring[i].name,
Michel Thierrye1f12322015-07-29 17:23:56 +0100466 lower_32_bits(obj->gtt_offset));
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200467 print_error_obj(m, obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300468 }
469
Ben Widawsky362b8af2014-01-30 00:19:38 -0800470 if ((obj = error->ring[i].hws_page)) {
Jesse Barnes3a5a0392015-09-15 10:03:01 -0700471 u64 hws_offset = obj->gtt_offset;
472 u32 *hws_page = &obj->pages[0][0];
473
474 if (i915.enable_execlists) {
475 hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
476 hws_page = &obj->pages[LRC_PPHWSP_PN][0];
477 }
Alex Daid1675192015-08-12 15:43:43 +0100478 err_printf(m, "%s --- HW Status = 0x%08llx\n",
Jesse Barnes3a5a0392015-09-15 10:03:01 -0700479 dev_priv->ring[i].name, hws_offset);
Chris Wilsonf3ce3822014-01-23 22:40:36 +0000480 offset = 0;
481 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
482 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
483 offset,
Jesse Barnes3a5a0392015-09-15 10:03:01 -0700484 hws_page[elt],
485 hws_page[elt+1],
486 hws_page[elt+2],
487 hws_page[elt+3]);
Chris Wilsonf3ce3822014-01-23 22:40:36 +0000488 offset += 16;
489 }
490 }
491
Chris Wilson372fbb82014-01-27 13:52:34 +0000492 if ((obj = error->ring[i].ctx)) {
Mika Kuoppala84734a02013-07-12 16:50:57 +0300493 err_printf(m, "%s --- HW Context = 0x%08x\n",
494 dev_priv->ring[i].name,
Michel Thierrye1f12322015-07-29 17:23:56 +0100495 lower_32_bits(obj->gtt_offset));
Ben Widawsky17d36742014-04-05 14:55:53 -0700496 print_error_obj(m, obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300497 }
498 }
499
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700500 if ((obj = error->semaphore_obj)) {
Michel Thierrye1f12322015-07-29 17:23:56 +0100501 err_printf(m, "Semaphore page = 0x%08x\n",
502 lower_32_bits(obj->gtt_offset));
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700503 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
504 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
505 elt * 4,
506 obj->pages[0][elt],
507 obj->pages[0][elt+1],
508 obj->pages[0][elt+2],
509 obj->pages[0][elt+3]);
510 }
511 }
512
Mika Kuoppala84734a02013-07-12 16:50:57 +0300513 if (error->overlay)
514 intel_overlay_print_error_state(m, error->overlay);
515
516 if (error->display)
517 intel_display_print_error_state(m, dev, error->display);
518
519out:
520 if (m->bytes == 0 && m->err)
521 return m->err;
522
523 return 0;
524}
525
526int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
Chris Wilson0a4cd7c2014-08-22 14:41:39 +0100527 struct drm_i915_private *i915,
Mika Kuoppala84734a02013-07-12 16:50:57 +0300528 size_t count, loff_t pos)
529{
530 memset(ebuf, 0, sizeof(*ebuf));
Chris Wilson0a4cd7c2014-08-22 14:41:39 +0100531 ebuf->i915 = i915;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300532
533 /* We need to have enough room to store any i915_error_state printf
534 * so that we can move it to start position.
535 */
536 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
537 ebuf->buf = kmalloc(ebuf->size,
538 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
539
540 if (ebuf->buf == NULL) {
541 ebuf->size = PAGE_SIZE;
542 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
543 }
544
545 if (ebuf->buf == NULL) {
546 ebuf->size = 128;
547 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
548 }
549
550 if (ebuf->buf == NULL)
551 return -ENOMEM;
552
553 ebuf->start = pos;
554
555 return 0;
556}
557
558static void i915_error_object_free(struct drm_i915_error_object *obj)
559{
560 int page;
561
562 if (obj == NULL)
563 return;
564
565 for (page = 0; page < obj->page_count; page++)
566 kfree(obj->pages[page]);
567
568 kfree(obj);
569}
570
571static void i915_error_state_free(struct kref *error_ref)
572{
573 struct drm_i915_error_state *error = container_of(error_ref,
574 typeof(*error), ref);
575 int i;
576
577 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
578 i915_error_object_free(error->ring[i].batchbuffer);
Mika Kuoppalab3da4a62015-05-04 17:44:11 +0300579 i915_error_object_free(error->ring[i].wa_batchbuffer);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300580 i915_error_object_free(error->ring[i].ringbuffer);
Ben Widawsky362b8af2014-01-30 00:19:38 -0800581 i915_error_object_free(error->ring[i].hws_page);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300582 i915_error_object_free(error->ring[i].ctx);
583 kfree(error->ring[i].requests);
584 }
585
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700586 i915_error_object_free(error->semaphore_obj);
Michel Thierry0b37a9a2015-03-20 09:41:03 +0000587
588 for (i = 0; i < error->vm_count; i++)
589 kfree(error->active_bo[i]);
590
Mika Kuoppala84734a02013-07-12 16:50:57 +0300591 kfree(error->active_bo);
Michel Thierry0b37a9a2015-03-20 09:41:03 +0000592 kfree(error->active_bo_count);
593 kfree(error->pinned_bo);
594 kfree(error->pinned_bo_count);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300595 kfree(error->overlay);
596 kfree(error->display);
597 kfree(error);
598}
599
600static struct drm_i915_error_object *
Chris Wilson8ae62dc2014-08-12 20:05:49 +0100601i915_error_object_create(struct drm_i915_private *dev_priv,
602 struct drm_i915_gem_object *src,
603 struct i915_address_space *vm)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300604{
605 struct drm_i915_error_object *dst;
Tvrtko Ursulinaff43762014-10-24 12:42:33 +0100606 struct i915_vma *vma = NULL;
Chris Wilson8ae62dc2014-08-12 20:05:49 +0100607 int num_pages;
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100608 bool use_ggtt;
609 int i = 0;
Michel Thierrye1f12322015-07-29 17:23:56 +0100610 u64 reloc_offset;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300611
612 if (src == NULL || src->pages == NULL)
613 return NULL;
614
Chris Wilson8ae62dc2014-08-12 20:05:49 +0100615 num_pages = src->base.size >> PAGE_SHIFT;
616
Mika Kuoppala84734a02013-07-12 16:50:57 +0300617 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
618 if (dst == NULL)
619 return NULL;
620
Chris Wilson87a01e82014-08-12 20:05:50 +0100621 if (i915_gem_obj_bound(src, vm))
622 dst->gtt_offset = i915_gem_obj_offset(src, vm);
623 else
624 dst->gtt_offset = -1;
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100625
626 reloc_offset = dst->gtt_offset;
Tvrtko Ursulinaff43762014-10-24 12:42:33 +0100627 if (i915_is_ggtt(vm))
628 vma = i915_gem_obj_to_ggtt(src);
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100629 use_ggtt = (src->cache_level == I915_CACHE_NONE &&
Tvrtko Ursulinaff43762014-10-24 12:42:33 +0100630 vma && (vma->bound & GLOBAL_BIND) &&
631 reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100632
633 /* Cannot access stolen address directly, try to use the aperture */
634 if (src->stolen) {
635 use_ggtt = true;
636
Tvrtko Ursulinaff43762014-10-24 12:42:33 +0100637 if (!(vma && vma->bound & GLOBAL_BIND))
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100638 goto unwind;
639
640 reloc_offset = i915_gem_obj_ggtt_offset(src);
641 if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
642 goto unwind;
643 }
644
645 /* Cannot access snooped pages through the aperture */
646 if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev))
647 goto unwind;
648
649 dst->page_count = num_pages;
650 while (num_pages--) {
Mika Kuoppala84734a02013-07-12 16:50:57 +0300651 unsigned long flags;
652 void *d;
653
654 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
655 if (d == NULL)
656 goto unwind;
657
658 local_irq_save(flags);
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100659 if (use_ggtt) {
Mika Kuoppala84734a02013-07-12 16:50:57 +0300660 void __iomem *s;
661
662 /* Simply ignore tiling or any overlapping fence.
663 * It's part of the error state, and this hopefully
664 * captures what the GPU read.
665 */
666
667 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
668 reloc_offset);
669 memcpy_fromio(d, s, PAGE_SIZE);
670 io_mapping_unmap_atomic(s);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300671 } else {
672 struct page *page;
673 void *s;
674
675 page = i915_gem_object_get_page(src, i);
676
677 drm_clflush_pages(&page, 1);
678
679 s = kmap_atomic(page);
680 memcpy(d, s, PAGE_SIZE);
681 kunmap_atomic(s);
682
683 drm_clflush_pages(&page, 1);
684 }
685 local_irq_restore(flags);
686
Chris Wilsonb3c3f5e2014-08-12 20:05:48 +0100687 dst->pages[i++] = d;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300688 reloc_offset += PAGE_SIZE;
689 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300690
691 return dst;
692
693unwind:
694 while (i--)
695 kfree(dst->pages[i]);
696 kfree(dst);
697 return NULL;
698}
Ben Widawskya7b91072013-12-06 14:10:52 -0800699#define i915_error_ggtt_object_create(dev_priv, src) \
Chris Wilson8ae62dc2014-08-12 20:05:49 +0100700 i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300701
702static void capture_bo(struct drm_i915_error_buffer *err,
Chris Wilson3a448732014-08-12 20:05:47 +0100703 struct i915_vma *vma)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300704{
Chris Wilson3a448732014-08-12 20:05:47 +0100705 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsonb4716182015-04-27 13:41:17 +0100706 int i;
Chris Wilson3a448732014-08-12 20:05:47 +0100707
Mika Kuoppala84734a02013-07-12 16:50:57 +0300708 err->size = obj->base.size;
709 err->name = obj->base.name;
Chris Wilsonb4716182015-04-27 13:41:17 +0100710 for (i = 0; i < I915_NUM_RINGS; i++)
711 err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
John Harrison97b2a6a2014-11-24 18:49:26 +0000712 err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
Chris Wilson3a448732014-08-12 20:05:47 +0100713 err->gtt_offset = vma->node.start;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300714 err->read_domains = obj->base.read_domains;
715 err->write_domain = obj->base.write_domain;
716 err->fence_reg = obj->fence_reg;
717 err->pinned = 0;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800718 if (i915_gem_obj_is_pinned(obj))
Mika Kuoppala84734a02013-07-12 16:50:57 +0300719 err->pinned = 1;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300720 err->tiling = obj->tiling_mode;
721 err->dirty = obj->dirty;
722 err->purgeable = obj->madv != I915_MADV_WILLNEED;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100723 err->userptr = obj->userptr.mm != NULL;
Chris Wilsonb4716182015-04-27 13:41:17 +0100724 err->ring = obj->last_write_req ?
725 i915_gem_request_get_ring(obj->last_write_req)->id : -1;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300726 err->cache_level = obj->cache_level;
727}
728
729static u32 capture_active_bo(struct drm_i915_error_buffer *err,
730 int count, struct list_head *head)
731{
Ben Widawskyca191b12013-07-31 17:00:14 -0700732 struct i915_vma *vma;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300733 int i = 0;
734
Ben Widawskyca191b12013-07-31 17:00:14 -0700735 list_for_each_entry(vma, head, mm_list) {
Chris Wilson3a448732014-08-12 20:05:47 +0100736 capture_bo(err++, vma);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300737 if (++i == count)
738 break;
739 }
740
741 return i;
742}
743
744static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
Chris Wilson3a448732014-08-12 20:05:47 +0100745 int count, struct list_head *head,
746 struct i915_address_space *vm)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300747{
748 struct drm_i915_gem_object *obj;
Chris Wilson3a448732014-08-12 20:05:47 +0100749 struct drm_i915_error_buffer * const first = err;
750 struct drm_i915_error_buffer * const last = err + count;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300751
752 list_for_each_entry(obj, head, global_list) {
Chris Wilson3a448732014-08-12 20:05:47 +0100753 struct i915_vma *vma;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300754
Chris Wilson3a448732014-08-12 20:05:47 +0100755 if (err == last)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300756 break;
Chris Wilson3a448732014-08-12 20:05:47 +0100757
758 list_for_each_entry(vma, &obj->vma_list, vma_link)
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +0000759 if (vma->vm == vm && vma->pin_count > 0)
Chris Wilson3a448732014-08-12 20:05:47 +0100760 capture_bo(err++, vma);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300761 }
762
Chris Wilson3a448732014-08-12 20:05:47 +0100763 return err - first;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300764}
765
Ben Widawsky011cf572014-02-04 12:18:55 +0000766/* Generate a semi-unique error code. The code is not meant to have meaning, The
767 * code's only purpose is to try to prevent false duplicated bug reports by
768 * grossly estimating a GPU error state.
769 *
770 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
771 * the hang if we could strip the GTT offset information from it.
772 *
773 * It's only a small step better than a random number in its current form.
774 */
775static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
Mika Kuoppalacb383002014-02-25 17:11:25 +0200776 struct drm_i915_error_state *error,
777 int *ring_id)
Ben Widawsky011cf572014-02-04 12:18:55 +0000778{
779 uint32_t error_code = 0;
780 int i;
781
782 /* IPEHR would be an ideal way to detect errors, as it's the gross
783 * measure of "the command that hung." However, has some very common
784 * synchronization commands which almost always appear in the case
785 * strictly a client bug. Use instdone to differentiate those some.
786 */
Mika Kuoppalacb383002014-02-25 17:11:25 +0200787 for (i = 0; i < I915_NUM_RINGS; i++) {
788 if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
789 if (ring_id)
790 *ring_id = i;
791
Ben Widawsky011cf572014-02-04 12:18:55 +0000792 return error->ring[i].ipehr ^ error->ring[i].instdone;
Mika Kuoppalacb383002014-02-25 17:11:25 +0200793 }
794 }
Ben Widawsky011cf572014-02-04 12:18:55 +0000795
796 return error_code;
797}
798
Mika Kuoppala84734a02013-07-12 16:50:57 +0300799static void i915_gem_record_fences(struct drm_device *dev,
800 struct drm_i915_error_state *error)
801{
802 struct drm_i915_private *dev_priv = dev->dev_private;
803 int i;
804
Rodrigo Vivice38ab02014-12-04 06:48:10 -0800805 if (IS_GEN3(dev) || IS_GEN2(dev)) {
Rodrigo Vivice38ab02014-12-04 06:48:10 -0800806 for (i = 0; i < dev_priv->num_fence_regs; i++)
Ville Syrjäläeecf6132015-09-21 18:05:14 +0300807 error->fence[i] = I915_READ(FENCE_REG(i));
808 } else if (IS_GEN5(dev) || IS_GEN4(dev)) {
809 for (i = 0; i < dev_priv->num_fence_regs; i++)
810 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
811 } else if (INTEL_INFO(dev)->gen >= 6) {
812 for (i = 0; i < dev_priv->num_fence_regs; i++)
813 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
814 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300815}
816
Ben Widawsky87f85eb2014-06-30 09:53:40 -0700817
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700818static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
819 struct drm_i915_error_state *error,
820 struct intel_engine_cs *ring,
821 struct drm_i915_error_ring *ering)
822{
Rodrigo Vivib4558b42014-07-18 02:19:40 -0700823 struct intel_engine_cs *to;
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700824 int i;
825
826 if (!i915_semaphore_is_enabled(dev_priv->dev))
827 return;
828
829 if (!error->semaphore_obj)
830 error->semaphore_obj =
Daniel Vettercc1df8a2014-11-19 18:38:39 +0100831 i915_error_ggtt_object_create(dev_priv,
832 dev_priv->semaphore_obj);
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700833
Rodrigo Vivib4558b42014-07-18 02:19:40 -0700834 for_each_ring(to, dev_priv, i) {
835 int idx;
836 u16 signal_offset;
837 u32 *tmp;
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700838
Rodrigo Vivib4558b42014-07-18 02:19:40 -0700839 if (ring == to)
840 continue;
841
Rodrigo Vivi864c6182014-08-01 04:51:30 -0700842 signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
843 / 4;
Rodrigo Vivib4558b42014-07-18 02:19:40 -0700844 tmp = error->semaphore_obj->pages[0];
845 idx = intel_ring_sync_index(ring, to);
846
847 ering->semaphore_mboxes[idx] = tmp[signal_offset];
848 ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700849 }
850}
851
Ben Widawsky87f85eb2014-06-30 09:53:40 -0700852static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
853 struct intel_engine_cs *ring,
854 struct drm_i915_error_ring *ering)
855{
856 ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
857 ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
858 ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
859 ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
860
861 if (HAS_VEBOX(dev_priv->dev)) {
862 ering->semaphore_mboxes[2] =
863 I915_READ(RING_SYNC_2(ring->mmio_base));
864 ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
865 }
866}
867
Mika Kuoppala84734a02013-07-12 16:50:57 +0300868static void i915_record_ring_state(struct drm_device *dev,
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700869 struct drm_i915_error_state *error,
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100870 struct intel_engine_cs *ring,
Ben Widawsky362b8af2014-01-30 00:19:38 -0800871 struct drm_i915_error_ring *ering)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300872{
873 struct drm_i915_private *dev_priv = dev->dev_private;
874
875 if (INTEL_INFO(dev)->gen >= 6) {
Ville Syrjälä3613cf12015-11-04 23:20:04 +0200876 ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base));
Ben Widawsky362b8af2014-01-30 00:19:38 -0800877 ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700878 if (INTEL_INFO(dev)->gen >= 8)
879 gen8_record_semaphore_state(dev_priv, error, ring, ering);
880 else
881 gen6_record_semaphore_state(dev_priv, ring, ering);
Ben Widawsky4e5aabf2013-08-12 16:53:04 -0700882 }
883
Mika Kuoppala84734a02013-07-12 16:50:57 +0300884 if (INTEL_INFO(dev)->gen >= 4) {
Ben Widawsky362b8af2014-01-30 00:19:38 -0800885 ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
886 ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
887 ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
888 ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
889 ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
890 ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
Ben Widawsky13ffadd2014-04-01 16:31:07 -0700891 if (INTEL_INFO(dev)->gen >= 8) {
892 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
Ben Widawsky362b8af2014-01-30 00:19:38 -0800893 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
Ben Widawsky13ffadd2014-04-01 16:31:07 -0700894 }
Ben Widawsky362b8af2014-01-30 00:19:38 -0800895 ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
Mika Kuoppala84734a02013-07-12 16:50:57 +0300896 } else {
Ben Widawsky362b8af2014-01-30 00:19:38 -0800897 ering->faddr = I915_READ(DMA_FADD_I8XX);
898 ering->ipeir = I915_READ(IPEIR);
899 ering->ipehr = I915_READ(IPEHR);
Imre Deakbd93a502015-09-30 23:00:43 +0300900 ering->instdone = I915_READ(GEN2_INSTDONE);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300901 }
902
Ben Widawsky362b8af2014-01-30 00:19:38 -0800903 ering->waiting = waitqueue_active(&ring->irq_queue);
904 ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
905 ering->seqno = ring->get_seqno(ring, false);
906 ering->acthd = intel_ring_get_active_head(ring);
Chris Wilson94f8cf12015-04-07 16:20:47 +0100907 ering->start = I915_READ_START(ring);
Ben Widawsky362b8af2014-01-30 00:19:38 -0800908 ering->head = I915_READ_HEAD(ring);
909 ering->tail = I915_READ_TAIL(ring);
910 ering->ctl = I915_READ_CTL(ring);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300911
Chris Wilsonf3ce3822014-01-23 22:40:36 +0000912 if (I915_NEED_GFX_HWS(dev)) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200913 i915_reg_t mmio;
Chris Wilsonf3ce3822014-01-23 22:40:36 +0000914
915 if (IS_GEN7(dev)) {
916 switch (ring->id) {
917 default:
918 case RCS:
919 mmio = RENDER_HWS_PGA_GEN7;
920 break;
921 case BCS:
922 mmio = BLT_HWS_PGA_GEN7;
923 break;
924 case VCS:
925 mmio = BSD_HWS_PGA_GEN7;
926 break;
927 case VECS:
928 mmio = VEBOX_HWS_PGA_GEN7;
929 break;
930 }
931 } else if (IS_GEN6(ring->dev)) {
932 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
933 } else {
934 /* XXX: gen8 returns to sanity */
935 mmio = RING_HWS_PGA(ring->mmio_base);
936 }
937
Ben Widawsky362b8af2014-01-30 00:19:38 -0800938 ering->hws = I915_READ(mmio);
Chris Wilsonf3ce3822014-01-23 22:40:36 +0000939 }
940
Ben Widawsky362b8af2014-01-30 00:19:38 -0800941 ering->hangcheck_score = ring->hangcheck.score;
942 ering->hangcheck_action = ring->hangcheck.action;
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800943
944 if (USES_PPGTT(dev)) {
945 int i;
946
947 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
948
Rodrigo Vivi74745b02014-12-03 04:55:27 -0800949 if (IS_GEN6(dev))
950 ering->vm_info.pp_dir_base =
951 I915_READ(RING_PP_DIR_BASE_READ(ring));
952 else if (IS_GEN7(dev))
953 ering->vm_info.pp_dir_base =
954 I915_READ(RING_PP_DIR_BASE(ring));
955 else if (INTEL_INFO(dev)->gen >= 8)
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800956 for (i = 0; i < 4; i++) {
957 ering->vm_info.pdp[i] =
958 I915_READ(GEN8_RING_PDP_UDW(ring, i));
959 ering->vm_info.pdp[i] <<= 32;
960 ering->vm_info.pdp[i] |=
961 I915_READ(GEN8_RING_PDP_LDW(ring, i));
962 }
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800963 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300964}
965
966
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100967static void i915_gem_record_active_context(struct intel_engine_cs *ring,
Mika Kuoppala84734a02013-07-12 16:50:57 +0300968 struct drm_i915_error_state *error,
969 struct drm_i915_error_ring *ering)
970{
971 struct drm_i915_private *dev_priv = ring->dev->dev_private;
972 struct drm_i915_gem_object *obj;
973
974 /* Currently render ring is the only HW context user */
975 if (ring->id != RCS || !error->ccid)
976 return;
977
978 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Ben Widawsky36362ad2014-07-01 11:17:41 -0700979 if (!i915_gem_obj_ggtt_bound(obj))
980 continue;
981
Mika Kuoppala84734a02013-07-12 16:50:57 +0300982 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
Ben Widawsky17d36742014-04-05 14:55:53 -0700983 ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300984 break;
985 }
986 }
987}
988
989static void i915_gem_record_rings(struct drm_device *dev,
990 struct drm_i915_error_state *error)
991{
992 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300993 struct drm_i915_gem_request *request;
994 int i, count;
995
Chris Wilson372fbb82014-01-27 13:52:34 +0000996 for (i = 0; i < I915_NUM_RINGS; i++) {
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100997 struct intel_engine_cs *ring = &dev_priv->ring[i];
Oscar Mateo9075e522014-07-24 17:04:43 +0100998 struct intel_ringbuffer *rbuf;
Chris Wilson372fbb82014-01-27 13:52:34 +0000999
Chris Wilsoneee73b42014-06-10 12:09:29 +01001000 error->ring[i].pid = -1;
1001
Chris Wilson372fbb82014-01-27 13:52:34 +00001002 if (ring->dev == NULL)
1003 continue;
1004
1005 error->ring[i].valid = true;
1006
Ben Widawsky0ca36d72014-06-30 09:53:41 -07001007 i915_record_ring_state(dev, error, ring, &error->ring[i]);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001008
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001009 request = i915_gem_find_active_request(ring);
1010 if (request) {
Daniel Vetterae6c4802014-08-06 15:04:53 +02001011 struct i915_address_space *vm;
1012
1013 vm = request->ctx && request->ctx->ppgtt ?
1014 &request->ctx->ppgtt->base :
1015 &dev_priv->gtt.base;
1016
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001017 /* We need to copy these to an anonymous buffer
1018 * as the simplest method to avoid being overwritten
1019 * by userspace.
1020 */
1021 error->ring[i].batchbuffer =
1022 i915_error_object_create(dev_priv,
1023 request->batch_obj,
Daniel Vetterae6c4802014-08-06 15:04:53 +02001024 vm);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001025
Chris Wilson8ae62dc2014-08-12 20:05:49 +01001026 if (HAS_BROKEN_CS_TLB(dev_priv->dev))
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001027 error->ring[i].wa_batchbuffer =
1028 i915_error_ggtt_object_create(dev_priv,
1029 ring->scratch.obj);
1030
Mika Kuoppala071c92d2015-02-12 10:26:02 +02001031 if (request->pid) {
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001032 struct task_struct *task;
1033
1034 rcu_read_lock();
Mika Kuoppala071c92d2015-02-12 10:26:02 +02001035 task = pid_task(request->pid, PIDTYPE_PID);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +02001036 if (task) {
1037 strcpy(error->ring[i].comm, task->comm);
1038 error->ring[i].pid = task->pid;
1039 }
1040 rcu_read_unlock();
1041 }
1042 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03001043
Oscar Mateo9075e522014-07-24 17:04:43 +01001044 if (i915.enable_execlists) {
1045 /* TODO: This is only a small fix to keep basic error
1046 * capture working, but we need to add more information
1047 * for it to be useful (e.g. dump the context being
1048 * executed).
1049 */
1050 if (request)
1051 rbuf = request->ctx->engine[ring->id].ringbuf;
1052 else
1053 rbuf = ring->default_context->engine[ring->id].ringbuf;
1054 } else
1055 rbuf = ring->buffer;
1056
1057 error->ring[i].cpu_ring_head = rbuf->head;
1058 error->ring[i].cpu_ring_tail = rbuf->tail;
1059
Mika Kuoppala84734a02013-07-12 16:50:57 +03001060 error->ring[i].ringbuffer =
Oscar Mateo9075e522014-07-24 17:04:43 +01001061 i915_error_ggtt_object_create(dev_priv, rbuf->obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001062
Chris Wilson8ae62dc2014-08-12 20:05:49 +01001063 error->ring[i].hws_page =
1064 i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001065
1066 i915_gem_record_active_context(ring, error, &error->ring[i]);
1067
1068 count = 0;
1069 list_for_each_entry(request, &ring->request_list, list)
1070 count++;
1071
1072 error->ring[i].num_requests = count;
1073 error->ring[i].requests =
Daniel Vettera1e22652013-09-21 00:35:38 +02001074 kcalloc(count, sizeof(*error->ring[i].requests),
Mika Kuoppala84734a02013-07-12 16:50:57 +03001075 GFP_ATOMIC);
1076 if (error->ring[i].requests == NULL) {
1077 error->ring[i].num_requests = 0;
1078 continue;
1079 }
1080
1081 count = 0;
1082 list_for_each_entry(request, &ring->request_list, list) {
1083 struct drm_i915_error_request *erq;
1084
Tomas Elf9c8e1bd2015-10-19 17:51:57 +01001085 if (count >= error->ring[i].num_requests) {
1086 /*
1087 * If the ring request list was changed in
1088 * between the point where the error request
1089 * list was created and dimensioned and this
1090 * point then just exit early to avoid crashes.
1091 *
1092 * We don't need to communicate that the
1093 * request list changed state during error
1094 * state capture and that the error state is
1095 * slightly incorrect as a consequence since we
1096 * are typically only interested in the request
1097 * list state at the point of error state
1098 * capture, not in any changes happening during
1099 * the capture.
1100 */
1101 break;
1102 }
1103
Mika Kuoppala84734a02013-07-12 16:50:57 +03001104 erq = &error->ring[i].requests[count++];
1105 erq->seqno = request->seqno;
1106 erq->jiffies = request->emitted_jiffies;
Nick Hoath72f95af2015-01-15 13:10:37 +00001107 erq->tail = request->postfix;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001108 }
1109 }
1110}
1111
Ben Widawsky95f53012013-07-31 17:00:15 -07001112/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
1113 * VM.
1114 */
1115static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1116 struct drm_i915_error_state *error,
1117 struct i915_address_space *vm,
1118 const int ndx)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001119{
Ben Widawsky95f53012013-07-31 17:00:15 -07001120 struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001121 struct drm_i915_gem_object *obj;
Ben Widawsky95f53012013-07-31 17:00:15 -07001122 struct i915_vma *vma;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001123 int i;
1124
1125 i = 0;
Ben Widawskyca191b12013-07-31 17:00:14 -07001126 list_for_each_entry(vma, &vm->active_list, mm_list)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001127 i++;
Ben Widawsky95f53012013-07-31 17:00:15 -07001128 error->active_bo_count[ndx] = i;
Chris Wilson3a448732014-08-12 20:05:47 +01001129
1130 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1131 list_for_each_entry(vma, &obj->vma_list, vma_link)
Tvrtko Ursulinfe14d5f2014-12-10 17:27:58 +00001132 if (vma->vm == vm && vma->pin_count > 0)
Chris Wilson3a448732014-08-12 20:05:47 +01001133 i++;
Chris Wilson3a448732014-08-12 20:05:47 +01001134 }
Ben Widawsky95f53012013-07-31 17:00:15 -07001135 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
Mika Kuoppala84734a02013-07-12 16:50:57 +03001136
1137 if (i) {
Daniel Vettera1e22652013-09-21 00:35:38 +02001138 active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
Ben Widawsky95f53012013-07-31 17:00:15 -07001139 if (active_bo)
1140 pinned_bo = active_bo + error->active_bo_count[ndx];
Mika Kuoppala84734a02013-07-12 16:50:57 +03001141 }
1142
Ben Widawsky95f53012013-07-31 17:00:15 -07001143 if (active_bo)
1144 error->active_bo_count[ndx] =
1145 capture_active_bo(active_bo,
1146 error->active_bo_count[ndx],
Ben Widawsky5cef07e2013-07-16 16:50:08 -07001147 &vm->active_list);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001148
Ben Widawsky95f53012013-07-31 17:00:15 -07001149 if (pinned_bo)
1150 error->pinned_bo_count[ndx] =
1151 capture_pinned_bo(pinned_bo,
1152 error->pinned_bo_count[ndx],
Chris Wilson3a448732014-08-12 20:05:47 +01001153 &dev_priv->mm.bound_list, vm);
Ben Widawsky95f53012013-07-31 17:00:15 -07001154 error->active_bo[ndx] = active_bo;
1155 error->pinned_bo[ndx] = pinned_bo;
1156}
1157
1158static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
1159 struct drm_i915_error_state *error)
1160{
1161 struct i915_address_space *vm;
1162 int cnt = 0, i = 0;
1163
1164 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
1165 cnt++;
1166
Ben Widawsky95f53012013-07-31 17:00:15 -07001167 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
1168 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
1169 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
1170 GFP_ATOMIC);
1171 error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
1172 GFP_ATOMIC);
1173
Chris Wilson3a448732014-08-12 20:05:47 +01001174 if (error->active_bo == NULL ||
1175 error->pinned_bo == NULL ||
1176 error->active_bo_count == NULL ||
1177 error->pinned_bo_count == NULL) {
1178 kfree(error->active_bo);
1179 kfree(error->active_bo_count);
1180 kfree(error->pinned_bo);
1181 kfree(error->pinned_bo_count);
1182
1183 error->active_bo = NULL;
1184 error->active_bo_count = NULL;
1185 error->pinned_bo = NULL;
1186 error->pinned_bo_count = NULL;
1187 } else {
1188 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
1189 i915_gem_capture_vm(dev_priv, error, vm, i++);
1190
1191 error->vm_count = cnt;
1192 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03001193}
1194
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001195/* Capture all registers which don't fit into another category. */
1196static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1197 struct drm_i915_error_state *error)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001198{
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001199 struct drm_device *dev = dev_priv->dev;
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -07001200 int i;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001201
Ben Widawsky654c90c2014-01-30 00:19:36 -08001202 /* General organization
1203 * 1. Registers specific to a single generation
1204 * 2. Registers which belong to multiple generations
1205 * 3. Feature specific registers.
1206 * 4. Everything else
1207 * Please try to follow the order.
1208 */
1209
1210 /* 1: Registers specific to a single generation */
1211 if (IS_VALLEYVIEW(dev)) {
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -07001212 error->gtier[0] = I915_READ(GTIER);
Rodrigo Vivi843db712014-08-01 09:12:27 -07001213 error->ier = I915_READ(VLV_IER);
Ville Syrjälä40181692015-10-22 15:34:57 +03001214 error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
Ben Widawsky654c90c2014-01-30 00:19:36 -08001215 }
1216
1217 if (IS_GEN7(dev))
1218 error->err_int = I915_READ(GEN7_ERR_INT);
1219
Mika Kuoppala6c826f32015-03-24 14:54:19 +02001220 if (INTEL_INFO(dev)->gen >= 8) {
1221 error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1222 error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1223 }
1224
Ben Widawsky91ec5d12014-01-30 00:19:39 -08001225 if (IS_GEN6(dev)) {
Ville Syrjälä40181692015-10-22 15:34:57 +03001226 error->forcewake = I915_READ_FW(FORCEWAKE);
Ben Widawsky91ec5d12014-01-30 00:19:39 -08001227 error->gab_ctl = I915_READ(GAB_CTL);
1228 error->gfx_mode = I915_READ(GFX_MODE);
1229 }
Ben Widawsky654c90c2014-01-30 00:19:36 -08001230
Ben Widawsky654c90c2014-01-30 00:19:36 -08001231 /* 2: Registers which belong to multiple generations */
1232 if (INTEL_INFO(dev)->gen >= 7)
Ville Syrjälä40181692015-10-22 15:34:57 +03001233 error->forcewake = I915_READ_FW(FORCEWAKE_MT);
Ben Widawsky654c90c2014-01-30 00:19:36 -08001234
1235 if (INTEL_INFO(dev)->gen >= 6) {
1236 error->derrmr = I915_READ(DERRMR);
1237 error->error = I915_READ(ERROR_GEN6);
1238 error->done_reg = I915_READ(DONE_REG);
1239 }
1240
1241 /* 3: Feature specific registers */
Ben Widawsky91ec5d12014-01-30 00:19:39 -08001242 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1243 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1244 error->gac_eco = I915_READ(GAC_ECO_BITS);
1245 }
1246
1247 /* 4: Everything else */
Mika Kuoppala84734a02013-07-12 16:50:57 +03001248 if (HAS_HW_CONTEXTS(dev))
1249 error->ccid = I915_READ(CCID);
1250
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -07001251 if (INTEL_INFO(dev)->gen >= 8) {
1252 error->ier = I915_READ(GEN8_DE_MISC_IER);
1253 for (i = 0; i < 4; i++)
1254 error->gtier[i] = I915_READ(GEN8_GT_IER(i));
1255 } else if (HAS_PCH_SPLIT(dev)) {
Rodrigo Vivi843db712014-08-01 09:12:27 -07001256 error->ier = I915_READ(DEIER);
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -07001257 error->gtier[0] = I915_READ(GTIER);
Rodrigo Vivi843db712014-08-01 09:12:27 -07001258 } else if (IS_GEN2(dev)) {
1259 error->ier = I915_READ16(IER);
1260 } else if (!IS_VALLEYVIEW(dev)) {
1261 error->ier = I915_READ(IER);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001262 }
Ben Widawsky654c90c2014-01-30 00:19:36 -08001263 error->eir = I915_READ(EIR);
1264 error->pgtbl_er = I915_READ(PGTBL_ER);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001265
1266 i915_get_extra_instdone(dev, error->extra_instdone);
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001267}
Mika Kuoppala84734a02013-07-12 16:50:57 +03001268
Mika Kuoppalacb383002014-02-25 17:11:25 +02001269static void i915_error_capture_msg(struct drm_device *dev,
Mika Kuoppala58174462014-02-25 17:11:26 +02001270 struct drm_i915_error_state *error,
1271 bool wedged,
1272 const char *error_msg)
Mika Kuoppalacb383002014-02-25 17:11:25 +02001273{
1274 struct drm_i915_private *dev_priv = dev->dev_private;
1275 u32 ecode;
Mika Kuoppala58174462014-02-25 17:11:26 +02001276 int ring_id = -1, len;
Mika Kuoppalacb383002014-02-25 17:11:25 +02001277
1278 ecode = i915_error_generate_code(dev_priv, error, &ring_id);
1279
Mika Kuoppala58174462014-02-25 17:11:26 +02001280 len = scnprintf(error->error_msg, sizeof(error->error_msg),
Mika Kuoppala0b5492d2014-11-06 13:03:46 +02001281 "GPU HANG: ecode %d:%d:0x%08x",
1282 INTEL_INFO(dev)->gen, ring_id, ecode);
Mika Kuoppala58174462014-02-25 17:11:26 +02001283
1284 if (ring_id != -1 && error->ring[ring_id].pid != -1)
1285 len += scnprintf(error->error_msg + len,
1286 sizeof(error->error_msg) - len,
1287 ", in %s [%d]",
1288 error->ring[ring_id].comm,
1289 error->ring[ring_id].pid);
1290
1291 scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
1292 ", reason: %s, action: %s",
1293 error_msg,
1294 wedged ? "reset" : "continue");
Mika Kuoppalacb383002014-02-25 17:11:25 +02001295}
1296
Mika Kuoppala48b031e2014-02-25 17:11:27 +02001297static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1298 struct drm_i915_error_state *error)
1299{
Chris Wilsoneb5be9d2015-08-07 20:24:15 +01001300 error->iommu = -1;
1301#ifdef CONFIG_INTEL_IOMMU
1302 error->iommu = intel_iommu_gfx_mapped;
1303#endif
Mika Kuoppala48b031e2014-02-25 17:11:27 +02001304 error->reset_count = i915_reset_count(&dev_priv->gpu_error);
Mika Kuoppala62d5d692014-02-25 17:11:28 +02001305 error->suspend_count = dev_priv->suspend_count;
Mika Kuoppala48b031e2014-02-25 17:11:27 +02001306}
1307
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001308/**
1309 * i915_capture_error_state - capture an error record for later analysis
1310 * @dev: drm device
1311 *
1312 * Should be called when an error is detected (either a hang or an error
1313 * interrupt) to capture error state from the time of the error. Fills
1314 * out a structure which becomes available in debugfs for user level tools
1315 * to pick up.
1316 */
Mika Kuoppala58174462014-02-25 17:11:26 +02001317void i915_capture_error_state(struct drm_device *dev, bool wedged,
1318 const char *error_msg)
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001319{
Chris Wilson53a4c6b2014-01-30 14:38:15 +00001320 static bool warned;
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001321 struct drm_i915_private *dev_priv = dev->dev_private;
1322 struct drm_i915_error_state *error;
1323 unsigned long flags;
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001324
1325 /* Account for pipe specific data like PIPE*STAT */
1326 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1327 if (!error) {
1328 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1329 return;
1330 }
1331
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001332 kref_init(&error->ref);
1333
Mika Kuoppala48b031e2014-02-25 17:11:27 +02001334 i915_capture_gen_state(dev_priv, error);
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001335 i915_capture_reg_state(dev_priv, error);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001336 i915_gem_capture_buffers(dev_priv, error);
1337 i915_gem_record_fences(dev, error);
1338 i915_gem_record_rings(dev, error);
1339
1340 do_gettimeofday(&error->time);
1341
1342 error->overlay = intel_overlay_capture_error_state(dev);
1343 error->display = intel_display_capture_error_state(dev);
1344
Mika Kuoppala58174462014-02-25 17:11:26 +02001345 i915_error_capture_msg(dev, error, wedged, error_msg);
Mika Kuoppalacb383002014-02-25 17:11:25 +02001346 DRM_INFO("%s\n", error->error_msg);
1347
Mika Kuoppala84734a02013-07-12 16:50:57 +03001348 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1349 if (dev_priv->gpu_error.first_error == NULL) {
1350 dev_priv->gpu_error.first_error = error;
1351 error = NULL;
1352 }
1353 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1354
Mika Kuoppalacb383002014-02-25 17:11:25 +02001355 if (error) {
Mika Kuoppala84734a02013-07-12 16:50:57 +03001356 i915_error_state_free(&error->ref);
Mika Kuoppalacb383002014-02-25 17:11:25 +02001357 return;
1358 }
1359
1360 if (!warned) {
1361 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1362 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1363 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1364 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1365 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
1366 warned = true;
1367 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03001368}
1369
1370void i915_error_state_get(struct drm_device *dev,
1371 struct i915_error_state_file_priv *error_priv)
1372{
1373 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001374
Daniel Vetter5b254c52014-09-15 14:55:24 +02001375 spin_lock_irq(&dev_priv->gpu_error.lock);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001376 error_priv->error = dev_priv->gpu_error.first_error;
1377 if (error_priv->error)
1378 kref_get(&error_priv->error->ref);
Daniel Vetter5b254c52014-09-15 14:55:24 +02001379 spin_unlock_irq(&dev_priv->gpu_error.lock);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001380
1381}
1382
1383void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
1384{
1385 if (error_priv->error)
1386 kref_put(&error_priv->error->ref, i915_error_state_free);
1387}
1388
1389void i915_destroy_error_state(struct drm_device *dev)
1390{
1391 struct drm_i915_private *dev_priv = dev->dev_private;
1392 struct drm_i915_error_state *error;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001393
Daniel Vetter5b254c52014-09-15 14:55:24 +02001394 spin_lock_irq(&dev_priv->gpu_error.lock);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001395 error = dev_priv->gpu_error.first_error;
1396 dev_priv->gpu_error.first_error = NULL;
Daniel Vetter5b254c52014-09-15 14:55:24 +02001397 spin_unlock_irq(&dev_priv->gpu_error.lock);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001398
1399 if (error)
1400 kref_put(&error->ref, i915_error_state_free);
1401}
1402
Chris Wilson0a4cd7c2014-08-22 14:41:39 +01001403const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001404{
1405 switch (type) {
1406 case I915_CACHE_NONE: return " uncached";
Chris Wilson0a4cd7c2014-08-22 14:41:39 +01001407 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
Chris Wilson350ec882013-08-06 13:17:02 +01001408 case I915_CACHE_L3_LLC: return " L3+LLC";
Chris Wilsonf56383c2013-09-25 10:23:19 +01001409 case I915_CACHE_WT: return " WT";
Mika Kuoppala84734a02013-07-12 16:50:57 +03001410 default: return "";
1411 }
1412}
1413
1414/* NB: please notice the memset */
1415void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
1416{
1417 struct drm_i915_private *dev_priv = dev->dev_private;
1418 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1419
Rodrigo Vivi563f94f2014-12-03 04:55:28 -08001420 if (IS_GEN2(dev) || IS_GEN3(dev))
Imre Deakbd93a502015-09-30 23:00:43 +03001421 instdone[0] = I915_READ(GEN2_INSTDONE);
Rodrigo Vivi563f94f2014-12-03 04:55:28 -08001422 else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
Imre Deakf1d54342015-09-30 23:00:42 +03001423 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
Imre Deak13d70b82015-09-30 23:00:44 +03001424 instdone[1] = I915_READ(GEN4_INSTDONE1);
Rodrigo Vivi563f94f2014-12-03 04:55:28 -08001425 } else if (INTEL_INFO(dev)->gen >= 7) {
Imre Deakf1d54342015-09-30 23:00:42 +03001426 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
Mika Kuoppala84734a02013-07-12 16:50:57 +03001427 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1428 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1429 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001430 }
1431}