blob: 35e70d5d62828b0d99bf122fb7e37fff8931ba5b [file] [log] [blame]
Mika Kuoppala84734a02013-07-12 16:50:57 +03001/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <generated/utsrelease.h>
31#include "i915_drv.h"
32
33static const char *yesno(int v)
34{
35 return v ? "yes" : "no";
36}
37
38static const char *ring_str(int ring)
39{
40 switch (ring) {
41 case RCS: return "render";
42 case VCS: return "bsd";
43 case BCS: return "blt";
44 case VECS: return "vebox";
Zhao Yakui845f74a2014-04-17 10:37:37 +080045 case VCS2: return "bsd2";
Mika Kuoppala84734a02013-07-12 16:50:57 +030046 default: return "";
47 }
48}
49
50static const char *pin_flag(int pinned)
51{
52 if (pinned > 0)
53 return " P";
54 else if (pinned < 0)
55 return " p";
56 else
57 return "";
58}
59
60static const char *tiling_flag(int tiling)
61{
62 switch (tiling) {
63 default:
64 case I915_TILING_NONE: return "";
65 case I915_TILING_X: return " X";
66 case I915_TILING_Y: return " Y";
67 }
68}
69
70static const char *dirty_flag(int dirty)
71{
72 return dirty ? " dirty" : "";
73}
74
75static const char *purgeable_flag(int purgeable)
76{
77 return purgeable ? " purgeable" : "";
78}
79
80static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
81{
82
83 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
84 e->err = -ENOSPC;
85 return false;
86 }
87
88 if (e->bytes == e->size - 1 || e->err)
89 return false;
90
91 return true;
92}
93
94static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
95 unsigned len)
96{
97 if (e->pos + len <= e->start) {
98 e->pos += len;
99 return false;
100 }
101
102 /* First vsnprintf needs to fit in its entirety for memmove */
103 if (len >= e->size) {
104 e->err = -EIO;
105 return false;
106 }
107
108 return true;
109}
110
111static void __i915_error_advance(struct drm_i915_error_state_buf *e,
112 unsigned len)
113{
114 /* If this is first printf in this window, adjust it so that
115 * start position matches start of the buffer
116 */
117
118 if (e->pos < e->start) {
119 const size_t off = e->start - e->pos;
120
121 /* Should not happen but be paranoid */
122 if (off > len || e->bytes) {
123 e->err = -EIO;
124 return;
125 }
126
127 memmove(e->buf, e->buf + off, len - off);
128 e->bytes = len - off;
129 e->pos = e->start;
130 return;
131 }
132
133 e->bytes += len;
134 e->pos += len;
135}
136
137static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
138 const char *f, va_list args)
139{
140 unsigned len;
141
142 if (!__i915_error_ok(e))
143 return;
144
145 /* Seek the first printf which is hits start position */
146 if (e->pos < e->start) {
Chris Wilsone29bb4e2013-09-20 10:20:59 +0100147 va_list tmp;
148
149 va_copy(tmp, args);
Mika Kuoppala1d2cb9a2014-02-07 17:40:50 +0200150 len = vsnprintf(NULL, 0, f, tmp);
151 va_end(tmp);
152
153 if (!__i915_error_seek(e, len))
Mika Kuoppala84734a02013-07-12 16:50:57 +0300154 return;
155 }
156
157 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
158 if (len >= e->size - e->bytes)
159 len = e->size - e->bytes - 1;
160
161 __i915_error_advance(e, len);
162}
163
164static void i915_error_puts(struct drm_i915_error_state_buf *e,
165 const char *str)
166{
167 unsigned len;
168
169 if (!__i915_error_ok(e))
170 return;
171
172 len = strlen(str);
173
174 /* Seek the first printf which is hits start position */
175 if (e->pos < e->start) {
176 if (!__i915_error_seek(e, len))
177 return;
178 }
179
180 if (len >= e->size - e->bytes)
181 len = e->size - e->bytes - 1;
182 memcpy(e->buf + e->bytes, str, len);
183
184 __i915_error_advance(e, len);
185}
186
187#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
188#define err_puts(e, s) i915_error_puts(e, s)
189
190static void print_error_buffers(struct drm_i915_error_state_buf *m,
191 const char *name,
192 struct drm_i915_error_buffer *err,
193 int count)
194{
Chris Wilson3a448732014-08-12 20:05:47 +0100195 err_printf(m, " %s [%d]:\n", name, count);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300196
197 while (count--) {
Chris Wilson3a448732014-08-12 20:05:47 +0100198 err_printf(m, " %08x %8u %02x %02x %x %x",
Mika Kuoppala84734a02013-07-12 16:50:57 +0300199 err->gtt_offset,
200 err->size,
201 err->read_domains,
202 err->write_domain,
203 err->rseqno, err->wseqno);
204 err_puts(m, pin_flag(err->pinned));
205 err_puts(m, tiling_flag(err->tiling));
206 err_puts(m, dirty_flag(err->dirty));
207 err_puts(m, purgeable_flag(err->purgeable));
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100208 err_puts(m, err->userptr ? " userptr" : "");
Mika Kuoppala84734a02013-07-12 16:50:57 +0300209 err_puts(m, err->ring != -1 ? " " : "");
210 err_puts(m, ring_str(err->ring));
211 err_puts(m, i915_cache_level_str(err->cache_level));
212
213 if (err->name)
214 err_printf(m, " (name: %d)", err->name);
215 if (err->fence_reg != I915_FENCE_REG_NONE)
216 err_printf(m, " (fence: %d)", err->fence_reg);
217
218 err_puts(m, "\n");
219 err++;
220 }
221}
222
Mika Kuoppalada661462013-09-06 16:03:28 +0300223static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
224{
225 switch (a) {
226 case HANGCHECK_IDLE:
227 return "idle";
228 case HANGCHECK_WAIT:
229 return "wait";
230 case HANGCHECK_ACTIVE:
231 return "active";
Mika Kuoppalaf260fe72014-08-05 17:16:26 +0300232 case HANGCHECK_ACTIVE_LOOP:
233 return "active (loop)";
Mika Kuoppalada661462013-09-06 16:03:28 +0300234 case HANGCHECK_KICK:
235 return "kick";
236 case HANGCHECK_HUNG:
237 return "hung";
238 }
239
240 return "unknown";
241}
242
Mika Kuoppala84734a02013-07-12 16:50:57 +0300243static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
244 struct drm_device *dev,
Ben Widawsky362b8af2014-01-30 00:19:38 -0800245 struct drm_i915_error_ring *ring)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300246{
Ben Widawsky362b8af2014-01-30 00:19:38 -0800247 if (!ring->valid)
Chris Wilson372fbb82014-01-27 13:52:34 +0000248 return;
249
Ben Widawsky362b8af2014-01-30 00:19:38 -0800250 err_printf(m, " HEAD: 0x%08x\n", ring->head);
251 err_printf(m, " TAIL: 0x%08x\n", ring->tail);
252 err_printf(m, " CTL: 0x%08x\n", ring->ctl);
253 err_printf(m, " HWS: 0x%08x\n", ring->hws);
Chris Wilsone3243d12014-03-21 12:05:47 +0000254 err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
Ben Widawsky362b8af2014-01-30 00:19:38 -0800255 err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
256 err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
257 err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
Ville Syrjälä3dda20a2013-12-10 21:44:43 +0200258 if (INTEL_INFO(dev)->gen >= 4) {
Chris Wilsone3243d12014-03-21 12:05:47 +0000259 err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
Ben Widawsky362b8af2014-01-30 00:19:38 -0800260 err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
261 err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
Ville Syrjälä3dda20a2013-12-10 21:44:43 +0200262 }
Ben Widawsky362b8af2014-01-30 00:19:38 -0800263 err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
Ben Widawsky13ffadd2014-04-01 16:31:07 -0700264 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
265 lower_32_bits(ring->faddr));
Mika Kuoppala84734a02013-07-12 16:50:57 +0300266 if (INTEL_INFO(dev)->gen >= 6) {
Ben Widawsky362b8af2014-01-30 00:19:38 -0800267 err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
268 err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300269 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
Ben Widawsky362b8af2014-01-30 00:19:38 -0800270 ring->semaphore_mboxes[0],
271 ring->semaphore_seqno[0]);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300272 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
Ben Widawsky362b8af2014-01-30 00:19:38 -0800273 ring->semaphore_mboxes[1],
274 ring->semaphore_seqno[1]);
Ben Widawsky4e5aabf2013-08-12 16:53:04 -0700275 if (HAS_VEBOX(dev)) {
276 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
Ben Widawsky362b8af2014-01-30 00:19:38 -0800277 ring->semaphore_mboxes[2],
278 ring->semaphore_seqno[2]);
Ben Widawsky4e5aabf2013-08-12 16:53:04 -0700279 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300280 }
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800281 if (USES_PPGTT(dev)) {
282 err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
283
284 if (INTEL_INFO(dev)->gen >= 8) {
285 int i;
286 for (i = 0; i < 4; i++)
287 err_printf(m, " PDP%d: 0x%016llx\n",
288 i, ring->vm_info.pdp[i]);
289 } else {
290 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
291 ring->vm_info.pp_dir_base);
292 }
293 }
Ben Widawsky362b8af2014-01-30 00:19:38 -0800294 err_printf(m, " seqno: 0x%08x\n", ring->seqno);
295 err_printf(m, " waiting: %s\n", yesno(ring->waiting));
296 err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
297 err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
Mika Kuoppalada661462013-09-06 16:03:28 +0300298 err_printf(m, " hangcheck: %s [%d]\n",
Ben Widawsky362b8af2014-01-30 00:19:38 -0800299 hangcheck_action_to_str(ring->hangcheck_action),
300 ring->hangcheck_score);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300301}
302
303void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
304{
305 va_list args;
306
307 va_start(args, f);
308 i915_error_vprintf(e, f, args);
309 va_end(args);
310}
311
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200312static void print_error_obj(struct drm_i915_error_state_buf *m,
313 struct drm_i915_error_object *obj)
314{
315 int page, offset, elt;
316
317 for (page = offset = 0; page < obj->page_count; page++) {
318 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
319 err_printf(m, "%08x : %08x\n", offset,
320 obj->pages[page][elt]);
321 offset += 4;
322 }
323 }
324}
325
Mika Kuoppala84734a02013-07-12 16:50:57 +0300326int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
327 const struct i915_error_state_file_priv *error_priv)
328{
329 struct drm_device *dev = error_priv->dev;
Jani Nikula50227e12014-03-31 14:27:21 +0300330 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300331 struct drm_i915_error_state *error = error_priv->error;
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700332 struct drm_i915_error_object *obj;
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200333 int i, j, offset, elt;
334 int max_hangcheck_score;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300335
336 if (!error) {
337 err_printf(m, "no error state collected\n");
338 goto out;
339 }
340
Mika Kuoppalacb383002014-02-25 17:11:25 +0200341 err_printf(m, "%s\n", error->error_msg);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300342 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
343 error->time.tv_usec);
344 err_printf(m, "Kernel: " UTS_RELEASE "\n");
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200345 max_hangcheck_score = 0;
346 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
347 if (error->ring[i].hangcheck_score > max_hangcheck_score)
348 max_hangcheck_score = error->ring[i].hangcheck_score;
349 }
350 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
351 if (error->ring[i].hangcheck_score == max_hangcheck_score &&
352 error->ring[i].pid != -1) {
353 err_printf(m, "Active process (on ring %s): %s [%d]\n",
354 ring_str(i),
355 error->ring[i].comm,
356 error->ring[i].pid);
357 }
358 }
Mika Kuoppala48b031e2014-02-25 17:11:27 +0200359 err_printf(m, "Reset count: %u\n", error->reset_count);
Mika Kuoppala62d5d692014-02-25 17:11:28 +0200360 err_printf(m, "Suspend count: %u\n", error->suspend_count);
Ville Syrjäläffbab09b2013-10-04 14:53:40 +0300361 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300362 err_printf(m, "EIR: 0x%08x\n", error->eir);
363 err_printf(m, "IER: 0x%08x\n", error->ier);
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -0700364 if (INTEL_INFO(dev)->gen >= 8) {
365 for (i = 0; i < 4; i++)
366 err_printf(m, "GTIER gt %d: 0x%08x\n", i,
367 error->gtier[i]);
368 } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
369 err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300370 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
371 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
372 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
373 err_printf(m, "CCID: 0x%08x\n", error->ccid);
Chris Wilson094f9a52013-09-25 17:34:55 +0100374 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300375
376 for (i = 0; i < dev_priv->num_fence_regs; i++)
377 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
378
379 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
380 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
381 error->extra_instdone[i]);
382
383 if (INTEL_INFO(dev)->gen >= 6) {
384 err_printf(m, "ERROR: 0x%08x\n", error->error);
385 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
386 }
387
388 if (INTEL_INFO(dev)->gen == 7)
389 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
390
Ben Widawsky362b8af2014-01-30 00:19:38 -0800391 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
392 err_printf(m, "%s command stream:\n", ring_str(i));
393 i915_ring_error_state(m, dev, &error->ring[i]);
394 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300395
Chris Wilson3a448732014-08-12 20:05:47 +0100396 for (i = 0; i < error->vm_count; i++) {
397 err_printf(m, "vm[%d]\n", i);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300398
Chris Wilson3a448732014-08-12 20:05:47 +0100399 print_error_buffers(m, "Active",
400 error->active_bo[i],
401 error->active_bo_count[i]);
402
Mika Kuoppala84734a02013-07-12 16:50:57 +0300403 print_error_buffers(m, "Pinned",
Chris Wilson3a448732014-08-12 20:05:47 +0100404 error->pinned_bo[i],
405 error->pinned_bo_count[i]);
406 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300407
408 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200409 obj = error->ring[i].batchbuffer;
410 if (obj) {
411 err_puts(m, dev_priv->ring[i].name);
412 if (error->ring[i].pid != -1)
413 err_printf(m, " (submitted by %s [%d])",
414 error->ring[i].comm,
415 error->ring[i].pid);
416 err_printf(m, " --- gtt_offset = 0x%08x\n",
Mika Kuoppala84734a02013-07-12 16:50:57 +0300417 obj->gtt_offset);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200418 print_error_obj(m, obj);
419 }
420
421 obj = error->ring[i].wa_batchbuffer;
422 if (obj) {
423 err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
424 dev_priv->ring[i].name, obj->gtt_offset);
425 print_error_obj(m, obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300426 }
427
428 if (error->ring[i].num_requests) {
429 err_printf(m, "%s --- %d requests\n",
430 dev_priv->ring[i].name,
431 error->ring[i].num_requests);
432 for (j = 0; j < error->ring[i].num_requests; j++) {
433 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
434 error->ring[i].requests[j].seqno,
435 error->ring[i].requests[j].jiffies,
436 error->ring[i].requests[j].tail);
437 }
438 }
439
440 if ((obj = error->ring[i].ringbuffer)) {
441 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
442 dev_priv->ring[i].name,
443 obj->gtt_offset);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200444 print_error_obj(m, obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300445 }
446
Ben Widawsky362b8af2014-01-30 00:19:38 -0800447 if ((obj = error->ring[i].hws_page)) {
Chris Wilsonf3ce3822014-01-23 22:40:36 +0000448 err_printf(m, "%s --- HW Status = 0x%08x\n",
449 dev_priv->ring[i].name,
450 obj->gtt_offset);
451 offset = 0;
452 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
453 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
454 offset,
455 obj->pages[0][elt],
456 obj->pages[0][elt+1],
457 obj->pages[0][elt+2],
458 obj->pages[0][elt+3]);
459 offset += 16;
460 }
461 }
462
Chris Wilson372fbb82014-01-27 13:52:34 +0000463 if ((obj = error->ring[i].ctx)) {
Mika Kuoppala84734a02013-07-12 16:50:57 +0300464 err_printf(m, "%s --- HW Context = 0x%08x\n",
465 dev_priv->ring[i].name,
466 obj->gtt_offset);
Ben Widawsky17d36742014-04-05 14:55:53 -0700467 print_error_obj(m, obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300468 }
469 }
470
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700471 if ((obj = error->semaphore_obj)) {
472 err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
473 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
474 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
475 elt * 4,
476 obj->pages[0][elt],
477 obj->pages[0][elt+1],
478 obj->pages[0][elt+2],
479 obj->pages[0][elt+3]);
480 }
481 }
482
Mika Kuoppala84734a02013-07-12 16:50:57 +0300483 if (error->overlay)
484 intel_overlay_print_error_state(m, error->overlay);
485
486 if (error->display)
487 intel_display_print_error_state(m, dev, error->display);
488
489out:
490 if (m->bytes == 0 && m->err)
491 return m->err;
492
493 return 0;
494}
495
496int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
497 size_t count, loff_t pos)
498{
499 memset(ebuf, 0, sizeof(*ebuf));
500
501 /* We need to have enough room to store any i915_error_state printf
502 * so that we can move it to start position.
503 */
504 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
505 ebuf->buf = kmalloc(ebuf->size,
506 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
507
508 if (ebuf->buf == NULL) {
509 ebuf->size = PAGE_SIZE;
510 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
511 }
512
513 if (ebuf->buf == NULL) {
514 ebuf->size = 128;
515 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
516 }
517
518 if (ebuf->buf == NULL)
519 return -ENOMEM;
520
521 ebuf->start = pos;
522
523 return 0;
524}
525
526static void i915_error_object_free(struct drm_i915_error_object *obj)
527{
528 int page;
529
530 if (obj == NULL)
531 return;
532
533 for (page = 0; page < obj->page_count; page++)
534 kfree(obj->pages[page]);
535
536 kfree(obj);
537}
538
539static void i915_error_state_free(struct kref *error_ref)
540{
541 struct drm_i915_error_state *error = container_of(error_ref,
542 typeof(*error), ref);
543 int i;
544
545 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
546 i915_error_object_free(error->ring[i].batchbuffer);
547 i915_error_object_free(error->ring[i].ringbuffer);
Ben Widawsky362b8af2014-01-30 00:19:38 -0800548 i915_error_object_free(error->ring[i].hws_page);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300549 i915_error_object_free(error->ring[i].ctx);
550 kfree(error->ring[i].requests);
551 }
552
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700553 i915_error_object_free(error->semaphore_obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300554 kfree(error->active_bo);
555 kfree(error->overlay);
556 kfree(error->display);
557 kfree(error);
558}
559
560static struct drm_i915_error_object *
561i915_error_object_create_sized(struct drm_i915_private *dev_priv,
562 struct drm_i915_gem_object *src,
Ben Widawskya7b91072013-12-06 14:10:52 -0800563 struct i915_address_space *vm,
Mika Kuoppala84734a02013-07-12 16:50:57 +0300564 const int num_pages)
565{
566 struct drm_i915_error_object *dst;
567 int i;
568 u32 reloc_offset;
569
570 if (src == NULL || src->pages == NULL)
571 return NULL;
572
573 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
574 if (dst == NULL)
575 return NULL;
576
Ben Widawskya7b91072013-12-06 14:10:52 -0800577 reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300578 for (i = 0; i < num_pages; i++) {
579 unsigned long flags;
580 void *d;
581
582 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
583 if (d == NULL)
584 goto unwind;
585
586 local_irq_save(flags);
Chris Wilson8b6124a2014-01-30 14:38:16 +0000587 if (src->cache_level == I915_CACHE_NONE &&
588 reloc_offset < dev_priv->gtt.mappable_end &&
Ben Widawsky496bfcb2013-12-06 14:10:53 -0800589 src->has_global_gtt_mapping &&
590 i915_is_ggtt(vm)) {
Mika Kuoppala84734a02013-07-12 16:50:57 +0300591 void __iomem *s;
592
593 /* Simply ignore tiling or any overlapping fence.
594 * It's part of the error state, and this hopefully
595 * captures what the GPU read.
596 */
597
598 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
599 reloc_offset);
600 memcpy_fromio(d, s, PAGE_SIZE);
601 io_mapping_unmap_atomic(s);
602 } else if (src->stolen) {
603 unsigned long offset;
604
605 offset = dev_priv->mm.stolen_base;
606 offset += src->stolen->start;
607 offset += i << PAGE_SHIFT;
608
609 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
610 } else {
611 struct page *page;
612 void *s;
613
614 page = i915_gem_object_get_page(src, i);
615
616 drm_clflush_pages(&page, 1);
617
618 s = kmap_atomic(page);
619 memcpy(d, s, PAGE_SIZE);
620 kunmap_atomic(s);
621
622 drm_clflush_pages(&page, 1);
623 }
624 local_irq_restore(flags);
625
626 dst->pages[i] = d;
627
628 reloc_offset += PAGE_SIZE;
629 }
630 dst->page_count = num_pages;
631
632 return dst;
633
634unwind:
635 while (i--)
636 kfree(dst->pages[i]);
637 kfree(dst);
638 return NULL;
639}
Ben Widawskya7b91072013-12-06 14:10:52 -0800640#define i915_error_object_create(dev_priv, src, vm) \
641 i915_error_object_create_sized((dev_priv), (src), (vm), \
642 (src)->base.size>>PAGE_SHIFT)
643
644#define i915_error_ggtt_object_create(dev_priv, src) \
645 i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
Mika Kuoppala84734a02013-07-12 16:50:57 +0300646 (src)->base.size>>PAGE_SHIFT)
647
648static void capture_bo(struct drm_i915_error_buffer *err,
Chris Wilson3a448732014-08-12 20:05:47 +0100649 struct i915_vma *vma)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300650{
Chris Wilson3a448732014-08-12 20:05:47 +0100651 struct drm_i915_gem_object *obj = vma->obj;
652
Mika Kuoppala84734a02013-07-12 16:50:57 +0300653 err->size = obj->base.size;
654 err->name = obj->base.name;
655 err->rseqno = obj->last_read_seqno;
656 err->wseqno = obj->last_write_seqno;
Chris Wilson3a448732014-08-12 20:05:47 +0100657 err->gtt_offset = vma->node.start;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300658 err->read_domains = obj->base.read_domains;
659 err->write_domain = obj->base.write_domain;
660 err->fence_reg = obj->fence_reg;
661 err->pinned = 0;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800662 if (i915_gem_obj_is_pinned(obj))
Mika Kuoppala84734a02013-07-12 16:50:57 +0300663 err->pinned = 1;
664 if (obj->user_pin_count > 0)
665 err->pinned = -1;
666 err->tiling = obj->tiling_mode;
667 err->dirty = obj->dirty;
668 err->purgeable = obj->madv != I915_MADV_WILLNEED;
Chris Wilson5cc9ed42014-05-16 14:22:37 +0100669 err->userptr = obj->userptr.mm != NULL;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300670 err->ring = obj->ring ? obj->ring->id : -1;
671 err->cache_level = obj->cache_level;
672}
673
674static u32 capture_active_bo(struct drm_i915_error_buffer *err,
675 int count, struct list_head *head)
676{
Ben Widawskyca191b12013-07-31 17:00:14 -0700677 struct i915_vma *vma;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300678 int i = 0;
679
Ben Widawskyca191b12013-07-31 17:00:14 -0700680 list_for_each_entry(vma, head, mm_list) {
Chris Wilson3a448732014-08-12 20:05:47 +0100681 capture_bo(err++, vma);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300682 if (++i == count)
683 break;
684 }
685
686 return i;
687}
688
689static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
Chris Wilson3a448732014-08-12 20:05:47 +0100690 int count, struct list_head *head,
691 struct i915_address_space *vm)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300692{
693 struct drm_i915_gem_object *obj;
Chris Wilson3a448732014-08-12 20:05:47 +0100694 struct drm_i915_error_buffer * const first = err;
695 struct drm_i915_error_buffer * const last = err + count;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300696
697 list_for_each_entry(obj, head, global_list) {
Chris Wilson3a448732014-08-12 20:05:47 +0100698 struct i915_vma *vma;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300699
Chris Wilson3a448732014-08-12 20:05:47 +0100700 if (err == last)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300701 break;
Chris Wilson3a448732014-08-12 20:05:47 +0100702
703 list_for_each_entry(vma, &obj->vma_list, vma_link)
704 if (vma->vm == vm && vma->pin_count > 0) {
705 capture_bo(err++, vma);
706 break;
707 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300708 }
709
Chris Wilson3a448732014-08-12 20:05:47 +0100710 return err - first;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300711}
712
Ben Widawsky011cf572014-02-04 12:18:55 +0000713/* Generate a semi-unique error code. The code is not meant to have meaning, The
714 * code's only purpose is to try to prevent false duplicated bug reports by
715 * grossly estimating a GPU error state.
716 *
717 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
718 * the hang if we could strip the GTT offset information from it.
719 *
720 * It's only a small step better than a random number in its current form.
721 */
722static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
Mika Kuoppalacb383002014-02-25 17:11:25 +0200723 struct drm_i915_error_state *error,
724 int *ring_id)
Ben Widawsky011cf572014-02-04 12:18:55 +0000725{
726 uint32_t error_code = 0;
727 int i;
728
729 /* IPEHR would be an ideal way to detect errors, as it's the gross
730 * measure of "the command that hung." However, has some very common
731 * synchronization commands which almost always appear in the case
732 * strictly a client bug. Use instdone to differentiate those some.
733 */
Mika Kuoppalacb383002014-02-25 17:11:25 +0200734 for (i = 0; i < I915_NUM_RINGS; i++) {
735 if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
736 if (ring_id)
737 *ring_id = i;
738
Ben Widawsky011cf572014-02-04 12:18:55 +0000739 return error->ring[i].ipehr ^ error->ring[i].instdone;
Mika Kuoppalacb383002014-02-25 17:11:25 +0200740 }
741 }
Ben Widawsky011cf572014-02-04 12:18:55 +0000742
743 return error_code;
744}
745
Mika Kuoppala84734a02013-07-12 16:50:57 +0300746static void i915_gem_record_fences(struct drm_device *dev,
747 struct drm_i915_error_state *error)
748{
749 struct drm_i915_private *dev_priv = dev->dev_private;
750 int i;
751
752 /* Fences */
753 switch (INTEL_INFO(dev)->gen) {
Ben Widawsky5ab31332013-11-02 21:07:03 -0700754 case 8:
Mika Kuoppala84734a02013-07-12 16:50:57 +0300755 case 7:
756 case 6:
757 for (i = 0; i < dev_priv->num_fence_regs; i++)
758 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
759 break;
760 case 5:
761 case 4:
762 for (i = 0; i < 16; i++)
763 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
764 break;
765 case 3:
766 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
767 for (i = 0; i < 8; i++)
768 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
769 case 2:
770 for (i = 0; i < 8; i++)
771 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
772 break;
773
774 default:
775 BUG();
776 }
777}
778
Ben Widawsky87f85eb2014-06-30 09:53:40 -0700779
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700780static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
781 struct drm_i915_error_state *error,
782 struct intel_engine_cs *ring,
783 struct drm_i915_error_ring *ering)
784{
Rodrigo Vivib4558b42014-07-18 02:19:40 -0700785 struct intel_engine_cs *to;
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700786 int i;
787
788 if (!i915_semaphore_is_enabled(dev_priv->dev))
789 return;
790
791 if (!error->semaphore_obj)
792 error->semaphore_obj =
793 i915_error_object_create(dev_priv,
794 dev_priv->semaphore_obj,
795 &dev_priv->gtt.base);
796
Rodrigo Vivib4558b42014-07-18 02:19:40 -0700797 for_each_ring(to, dev_priv, i) {
798 int idx;
799 u16 signal_offset;
800 u32 *tmp;
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700801
Rodrigo Vivib4558b42014-07-18 02:19:40 -0700802 if (ring == to)
803 continue;
804
Rodrigo Vivi864c6182014-08-01 04:51:30 -0700805 signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
806 / 4;
Rodrigo Vivib4558b42014-07-18 02:19:40 -0700807 tmp = error->semaphore_obj->pages[0];
808 idx = intel_ring_sync_index(ring, to);
809
810 ering->semaphore_mboxes[idx] = tmp[signal_offset];
811 ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700812 }
813}
814
Ben Widawsky87f85eb2014-06-30 09:53:40 -0700815static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
816 struct intel_engine_cs *ring,
817 struct drm_i915_error_ring *ering)
818{
819 ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
820 ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
821 ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
822 ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
823
824 if (HAS_VEBOX(dev_priv->dev)) {
825 ering->semaphore_mboxes[2] =
826 I915_READ(RING_SYNC_2(ring->mmio_base));
827 ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
828 }
829}
830
Mika Kuoppala84734a02013-07-12 16:50:57 +0300831static void i915_record_ring_state(struct drm_device *dev,
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700832 struct drm_i915_error_state *error,
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100833 struct intel_engine_cs *ring,
Ben Widawsky362b8af2014-01-30 00:19:38 -0800834 struct drm_i915_error_ring *ering)
Mika Kuoppala84734a02013-07-12 16:50:57 +0300835{
836 struct drm_i915_private *dev_priv = dev->dev_private;
837
838 if (INTEL_INFO(dev)->gen >= 6) {
Ben Widawsky362b8af2014-01-30 00:19:38 -0800839 ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
840 ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700841 if (INTEL_INFO(dev)->gen >= 8)
842 gen8_record_semaphore_state(dev_priv, error, ring, ering);
843 else
844 gen6_record_semaphore_state(dev_priv, ring, ering);
Ben Widawsky4e5aabf2013-08-12 16:53:04 -0700845 }
846
Mika Kuoppala84734a02013-07-12 16:50:57 +0300847 if (INTEL_INFO(dev)->gen >= 4) {
Ben Widawsky362b8af2014-01-30 00:19:38 -0800848 ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
849 ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
850 ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
851 ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
852 ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
853 ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
Ben Widawsky13ffadd2014-04-01 16:31:07 -0700854 if (INTEL_INFO(dev)->gen >= 8) {
855 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
Ben Widawsky362b8af2014-01-30 00:19:38 -0800856 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
Ben Widawsky13ffadd2014-04-01 16:31:07 -0700857 }
Ben Widawsky362b8af2014-01-30 00:19:38 -0800858 ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
Mika Kuoppala84734a02013-07-12 16:50:57 +0300859 } else {
Ben Widawsky362b8af2014-01-30 00:19:38 -0800860 ering->faddr = I915_READ(DMA_FADD_I8XX);
861 ering->ipeir = I915_READ(IPEIR);
862 ering->ipehr = I915_READ(IPEHR);
863 ering->instdone = I915_READ(INSTDONE);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300864 }
865
Ben Widawsky362b8af2014-01-30 00:19:38 -0800866 ering->waiting = waitqueue_active(&ring->irq_queue);
867 ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
868 ering->seqno = ring->get_seqno(ring, false);
869 ering->acthd = intel_ring_get_active_head(ring);
870 ering->head = I915_READ_HEAD(ring);
871 ering->tail = I915_READ_TAIL(ring);
872 ering->ctl = I915_READ_CTL(ring);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300873
Chris Wilsonf3ce3822014-01-23 22:40:36 +0000874 if (I915_NEED_GFX_HWS(dev)) {
875 int mmio;
876
877 if (IS_GEN7(dev)) {
878 switch (ring->id) {
879 default:
880 case RCS:
881 mmio = RENDER_HWS_PGA_GEN7;
882 break;
883 case BCS:
884 mmio = BLT_HWS_PGA_GEN7;
885 break;
886 case VCS:
887 mmio = BSD_HWS_PGA_GEN7;
888 break;
889 case VECS:
890 mmio = VEBOX_HWS_PGA_GEN7;
891 break;
892 }
893 } else if (IS_GEN6(ring->dev)) {
894 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
895 } else {
896 /* XXX: gen8 returns to sanity */
897 mmio = RING_HWS_PGA(ring->mmio_base);
898 }
899
Ben Widawsky362b8af2014-01-30 00:19:38 -0800900 ering->hws = I915_READ(mmio);
Chris Wilsonf3ce3822014-01-23 22:40:36 +0000901 }
902
Oscar Mateoee1b1e52014-05-22 14:13:35 +0100903 ering->cpu_ring_head = ring->buffer->head;
904 ering->cpu_ring_tail = ring->buffer->tail;
Mika Kuoppalada661462013-09-06 16:03:28 +0300905
Ben Widawsky362b8af2014-01-30 00:19:38 -0800906 ering->hangcheck_score = ring->hangcheck.score;
907 ering->hangcheck_action = ring->hangcheck.action;
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800908
909 if (USES_PPGTT(dev)) {
910 int i;
911
912 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
913
914 switch (INTEL_INFO(dev)->gen) {
915 case 8:
916 for (i = 0; i < 4; i++) {
917 ering->vm_info.pdp[i] =
918 I915_READ(GEN8_RING_PDP_UDW(ring, i));
919 ering->vm_info.pdp[i] <<= 32;
920 ering->vm_info.pdp[i] |=
921 I915_READ(GEN8_RING_PDP_LDW(ring, i));
922 }
923 break;
924 case 7:
Ben Widawskyae89f442014-03-14 23:01:58 -0700925 ering->vm_info.pp_dir_base =
926 I915_READ(RING_PP_DIR_BASE(ring));
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800927 break;
928 case 6:
Ben Widawskyae89f442014-03-14 23:01:58 -0700929 ering->vm_info.pp_dir_base =
930 I915_READ(RING_PP_DIR_BASE_READ(ring));
Ben Widawsky6c7a01e2014-01-30 00:19:40 -0800931 break;
932 }
933 }
Mika Kuoppala84734a02013-07-12 16:50:57 +0300934}
935
936
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100937static void i915_gem_record_active_context(struct intel_engine_cs *ring,
Mika Kuoppala84734a02013-07-12 16:50:57 +0300938 struct drm_i915_error_state *error,
939 struct drm_i915_error_ring *ering)
940{
941 struct drm_i915_private *dev_priv = ring->dev->dev_private;
942 struct drm_i915_gem_object *obj;
943
944 /* Currently render ring is the only HW context user */
945 if (ring->id != RCS || !error->ccid)
946 return;
947
948 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Ben Widawsky36362ad2014-07-01 11:17:41 -0700949 if (!i915_gem_obj_ggtt_bound(obj))
950 continue;
951
Mika Kuoppala84734a02013-07-12 16:50:57 +0300952 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
Ben Widawsky17d36742014-04-05 14:55:53 -0700953 ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300954 break;
955 }
956 }
957}
958
959static void i915_gem_record_rings(struct drm_device *dev,
960 struct drm_i915_error_state *error)
961{
962 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppala84734a02013-07-12 16:50:57 +0300963 struct drm_i915_gem_request *request;
964 int i, count;
965
Chris Wilson372fbb82014-01-27 13:52:34 +0000966 for (i = 0; i < I915_NUM_RINGS; i++) {
Oscar Mateoa4872ba2014-05-22 14:13:33 +0100967 struct intel_engine_cs *ring = &dev_priv->ring[i];
Chris Wilson372fbb82014-01-27 13:52:34 +0000968
Chris Wilsoneee73b42014-06-10 12:09:29 +0100969 error->ring[i].pid = -1;
970
Chris Wilson372fbb82014-01-27 13:52:34 +0000971 if (ring->dev == NULL)
972 continue;
973
974 error->ring[i].valid = true;
975
Ben Widawsky0ca36d72014-06-30 09:53:41 -0700976 i915_record_ring_state(dev, error, ring, &error->ring[i]);
Mika Kuoppala84734a02013-07-12 16:50:57 +0300977
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200978 request = i915_gem_find_active_request(ring);
979 if (request) {
Daniel Vetterae6c4802014-08-06 15:04:53 +0200980 struct i915_address_space *vm;
981
982 vm = request->ctx && request->ctx->ppgtt ?
983 &request->ctx->ppgtt->base :
984 &dev_priv->gtt.base;
985
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200986 /* We need to copy these to an anonymous buffer
987 * as the simplest method to avoid being overwritten
988 * by userspace.
989 */
990 error->ring[i].batchbuffer =
991 i915_error_object_create(dev_priv,
992 request->batch_obj,
Daniel Vetterae6c4802014-08-06 15:04:53 +0200993 vm);
Chris Wilsonab0e7ff2014-02-25 17:11:24 +0200994
995 if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
996 ring->scratch.obj)
997 error->ring[i].wa_batchbuffer =
998 i915_error_ggtt_object_create(dev_priv,
999 ring->scratch.obj);
1000
1001 if (request->file_priv) {
1002 struct task_struct *task;
1003
1004 rcu_read_lock();
1005 task = pid_task(request->file_priv->file->pid,
1006 PIDTYPE_PID);
1007 if (task) {
1008 strcpy(error->ring[i].comm, task->comm);
1009 error->ring[i].pid = task->pid;
1010 }
1011 rcu_read_unlock();
1012 }
1013 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03001014
1015 error->ring[i].ringbuffer =
Oscar Mateoee1b1e52014-05-22 14:13:35 +01001016 i915_error_ggtt_object_create(dev_priv, ring->buffer->obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001017
Chris Wilsonf3ce3822014-01-23 22:40:36 +00001018 if (ring->status_page.obj)
Ben Widawsky362b8af2014-01-30 00:19:38 -08001019 error->ring[i].hws_page =
Chris Wilsonf3ce3822014-01-23 22:40:36 +00001020 i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001021
1022 i915_gem_record_active_context(ring, error, &error->ring[i]);
1023
1024 count = 0;
1025 list_for_each_entry(request, &ring->request_list, list)
1026 count++;
1027
1028 error->ring[i].num_requests = count;
1029 error->ring[i].requests =
Daniel Vettera1e22652013-09-21 00:35:38 +02001030 kcalloc(count, sizeof(*error->ring[i].requests),
Mika Kuoppala84734a02013-07-12 16:50:57 +03001031 GFP_ATOMIC);
1032 if (error->ring[i].requests == NULL) {
1033 error->ring[i].num_requests = 0;
1034 continue;
1035 }
1036
1037 count = 0;
1038 list_for_each_entry(request, &ring->request_list, list) {
1039 struct drm_i915_error_request *erq;
1040
1041 erq = &error->ring[i].requests[count++];
1042 erq->seqno = request->seqno;
1043 erq->jiffies = request->emitted_jiffies;
1044 erq->tail = request->tail;
1045 }
1046 }
1047}
1048
Ben Widawsky95f53012013-07-31 17:00:15 -07001049/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
1050 * VM.
1051 */
1052static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1053 struct drm_i915_error_state *error,
1054 struct i915_address_space *vm,
1055 const int ndx)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001056{
Ben Widawsky95f53012013-07-31 17:00:15 -07001057 struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001058 struct drm_i915_gem_object *obj;
Ben Widawsky95f53012013-07-31 17:00:15 -07001059 struct i915_vma *vma;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001060 int i;
1061
1062 i = 0;
Ben Widawskyca191b12013-07-31 17:00:14 -07001063 list_for_each_entry(vma, &vm->active_list, mm_list)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001064 i++;
Ben Widawsky95f53012013-07-31 17:00:15 -07001065 error->active_bo_count[ndx] = i;
Chris Wilson3a448732014-08-12 20:05:47 +01001066
1067 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1068 list_for_each_entry(vma, &obj->vma_list, vma_link)
1069 if (vma->vm == vm && vma->pin_count > 0) {
1070 i++;
1071 break;
1072 }
1073 }
Ben Widawsky95f53012013-07-31 17:00:15 -07001074 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
Mika Kuoppala84734a02013-07-12 16:50:57 +03001075
1076 if (i) {
Daniel Vettera1e22652013-09-21 00:35:38 +02001077 active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
Ben Widawsky95f53012013-07-31 17:00:15 -07001078 if (active_bo)
1079 pinned_bo = active_bo + error->active_bo_count[ndx];
Mika Kuoppala84734a02013-07-12 16:50:57 +03001080 }
1081
Ben Widawsky95f53012013-07-31 17:00:15 -07001082 if (active_bo)
1083 error->active_bo_count[ndx] =
1084 capture_active_bo(active_bo,
1085 error->active_bo_count[ndx],
Ben Widawsky5cef07e2013-07-16 16:50:08 -07001086 &vm->active_list);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001087
Ben Widawsky95f53012013-07-31 17:00:15 -07001088 if (pinned_bo)
1089 error->pinned_bo_count[ndx] =
1090 capture_pinned_bo(pinned_bo,
1091 error->pinned_bo_count[ndx],
Chris Wilson3a448732014-08-12 20:05:47 +01001092 &dev_priv->mm.bound_list, vm);
Ben Widawsky95f53012013-07-31 17:00:15 -07001093 error->active_bo[ndx] = active_bo;
1094 error->pinned_bo[ndx] = pinned_bo;
1095}
1096
1097static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
1098 struct drm_i915_error_state *error)
1099{
1100 struct i915_address_space *vm;
1101 int cnt = 0, i = 0;
1102
1103 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
1104 cnt++;
1105
Ben Widawsky95f53012013-07-31 17:00:15 -07001106 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
1107 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
1108 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
1109 GFP_ATOMIC);
1110 error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
1111 GFP_ATOMIC);
1112
Chris Wilson3a448732014-08-12 20:05:47 +01001113 if (error->active_bo == NULL ||
1114 error->pinned_bo == NULL ||
1115 error->active_bo_count == NULL ||
1116 error->pinned_bo_count == NULL) {
1117 kfree(error->active_bo);
1118 kfree(error->active_bo_count);
1119 kfree(error->pinned_bo);
1120 kfree(error->pinned_bo_count);
1121
1122 error->active_bo = NULL;
1123 error->active_bo_count = NULL;
1124 error->pinned_bo = NULL;
1125 error->pinned_bo_count = NULL;
1126 } else {
1127 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
1128 i915_gem_capture_vm(dev_priv, error, vm, i++);
1129
1130 error->vm_count = cnt;
1131 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03001132}
1133
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001134/* Capture all registers which don't fit into another category. */
1135static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1136 struct drm_i915_error_state *error)
Mika Kuoppala84734a02013-07-12 16:50:57 +03001137{
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001138 struct drm_device *dev = dev_priv->dev;
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -07001139 int i;
Mika Kuoppala84734a02013-07-12 16:50:57 +03001140
Ben Widawsky654c90c2014-01-30 00:19:36 -08001141 /* General organization
1142 * 1. Registers specific to a single generation
1143 * 2. Registers which belong to multiple generations
1144 * 3. Feature specific registers.
1145 * 4. Everything else
1146 * Please try to follow the order.
1147 */
1148
1149 /* 1: Registers specific to a single generation */
1150 if (IS_VALLEYVIEW(dev)) {
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -07001151 error->gtier[0] = I915_READ(GTIER);
Rodrigo Vivi843db712014-08-01 09:12:27 -07001152 error->ier = I915_READ(VLV_IER);
Ben Widawsky654c90c2014-01-30 00:19:36 -08001153 error->forcewake = I915_READ(FORCEWAKE_VLV);
1154 }
1155
1156 if (IS_GEN7(dev))
1157 error->err_int = I915_READ(GEN7_ERR_INT);
1158
Ben Widawsky91ec5d12014-01-30 00:19:39 -08001159 if (IS_GEN6(dev)) {
Ben Widawsky654c90c2014-01-30 00:19:36 -08001160 error->forcewake = I915_READ(FORCEWAKE);
Ben Widawsky91ec5d12014-01-30 00:19:39 -08001161 error->gab_ctl = I915_READ(GAB_CTL);
1162 error->gfx_mode = I915_READ(GFX_MODE);
1163 }
Ben Widawsky654c90c2014-01-30 00:19:36 -08001164
Ben Widawsky654c90c2014-01-30 00:19:36 -08001165 /* 2: Registers which belong to multiple generations */
1166 if (INTEL_INFO(dev)->gen >= 7)
1167 error->forcewake = I915_READ(FORCEWAKE_MT);
1168
1169 if (INTEL_INFO(dev)->gen >= 6) {
1170 error->derrmr = I915_READ(DERRMR);
1171 error->error = I915_READ(ERROR_GEN6);
1172 error->done_reg = I915_READ(DONE_REG);
1173 }
1174
1175 /* 3: Feature specific registers */
Ben Widawsky91ec5d12014-01-30 00:19:39 -08001176 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1177 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1178 error->gac_eco = I915_READ(GAC_ECO_BITS);
1179 }
1180
1181 /* 4: Everything else */
Mika Kuoppala84734a02013-07-12 16:50:57 +03001182 if (HAS_HW_CONTEXTS(dev))
1183 error->ccid = I915_READ(CCID);
1184
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -07001185 if (INTEL_INFO(dev)->gen >= 8) {
1186 error->ier = I915_READ(GEN8_DE_MISC_IER);
1187 for (i = 0; i < 4; i++)
1188 error->gtier[i] = I915_READ(GEN8_GT_IER(i));
1189 } else if (HAS_PCH_SPLIT(dev)) {
Rodrigo Vivi843db712014-08-01 09:12:27 -07001190 error->ier = I915_READ(DEIER);
Rodrigo Vivi885ea5a2014-08-05 10:07:13 -07001191 error->gtier[0] = I915_READ(GTIER);
Rodrigo Vivi843db712014-08-01 09:12:27 -07001192 } else if (IS_GEN2(dev)) {
1193 error->ier = I915_READ16(IER);
1194 } else if (!IS_VALLEYVIEW(dev)) {
1195 error->ier = I915_READ(IER);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001196 }
Ben Widawsky654c90c2014-01-30 00:19:36 -08001197 error->eir = I915_READ(EIR);
1198 error->pgtbl_er = I915_READ(PGTBL_ER);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001199
1200 i915_get_extra_instdone(dev, error->extra_instdone);
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001201}
Mika Kuoppala84734a02013-07-12 16:50:57 +03001202
Mika Kuoppalacb383002014-02-25 17:11:25 +02001203static void i915_error_capture_msg(struct drm_device *dev,
Mika Kuoppala58174462014-02-25 17:11:26 +02001204 struct drm_i915_error_state *error,
1205 bool wedged,
1206 const char *error_msg)
Mika Kuoppalacb383002014-02-25 17:11:25 +02001207{
1208 struct drm_i915_private *dev_priv = dev->dev_private;
1209 u32 ecode;
Mika Kuoppala58174462014-02-25 17:11:26 +02001210 int ring_id = -1, len;
Mika Kuoppalacb383002014-02-25 17:11:25 +02001211
1212 ecode = i915_error_generate_code(dev_priv, error, &ring_id);
1213
Mika Kuoppala58174462014-02-25 17:11:26 +02001214 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1215 "GPU HANG: ecode %d:0x%08x", ring_id, ecode);
1216
1217 if (ring_id != -1 && error->ring[ring_id].pid != -1)
1218 len += scnprintf(error->error_msg + len,
1219 sizeof(error->error_msg) - len,
1220 ", in %s [%d]",
1221 error->ring[ring_id].comm,
1222 error->ring[ring_id].pid);
1223
1224 scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
1225 ", reason: %s, action: %s",
1226 error_msg,
1227 wedged ? "reset" : "continue");
Mika Kuoppalacb383002014-02-25 17:11:25 +02001228}
1229
Mika Kuoppala48b031e2014-02-25 17:11:27 +02001230static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1231 struct drm_i915_error_state *error)
1232{
1233 error->reset_count = i915_reset_count(&dev_priv->gpu_error);
Mika Kuoppala62d5d692014-02-25 17:11:28 +02001234 error->suspend_count = dev_priv->suspend_count;
Mika Kuoppala48b031e2014-02-25 17:11:27 +02001235}
1236
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001237/**
1238 * i915_capture_error_state - capture an error record for later analysis
1239 * @dev: drm device
1240 *
1241 * Should be called when an error is detected (either a hang or an error
1242 * interrupt) to capture error state from the time of the error. Fills
1243 * out a structure which becomes available in debugfs for user level tools
1244 * to pick up.
1245 */
Mika Kuoppala58174462014-02-25 17:11:26 +02001246void i915_capture_error_state(struct drm_device *dev, bool wedged,
1247 const char *error_msg)
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001248{
Chris Wilson53a4c6b2014-01-30 14:38:15 +00001249 static bool warned;
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001250 struct drm_i915_private *dev_priv = dev->dev_private;
1251 struct drm_i915_error_state *error;
1252 unsigned long flags;
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001253
1254 /* Account for pipe specific data like PIPE*STAT */
1255 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1256 if (!error) {
1257 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1258 return;
1259 }
1260
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001261 kref_init(&error->ref);
1262
Mika Kuoppala48b031e2014-02-25 17:11:27 +02001263 i915_capture_gen_state(dev_priv, error);
Ben Widawsky1d762aa2014-01-30 00:19:35 -08001264 i915_capture_reg_state(dev_priv, error);
Mika Kuoppala84734a02013-07-12 16:50:57 +03001265 i915_gem_capture_buffers(dev_priv, error);
1266 i915_gem_record_fences(dev, error);
1267 i915_gem_record_rings(dev, error);
1268
1269 do_gettimeofday(&error->time);
1270
1271 error->overlay = intel_overlay_capture_error_state(dev);
1272 error->display = intel_display_capture_error_state(dev);
1273
Mika Kuoppala58174462014-02-25 17:11:26 +02001274 i915_error_capture_msg(dev, error, wedged, error_msg);
Mika Kuoppalacb383002014-02-25 17:11:25 +02001275 DRM_INFO("%s\n", error->error_msg);
1276
Mika Kuoppala84734a02013-07-12 16:50:57 +03001277 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1278 if (dev_priv->gpu_error.first_error == NULL) {
1279 dev_priv->gpu_error.first_error = error;
1280 error = NULL;
1281 }
1282 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1283
Mika Kuoppalacb383002014-02-25 17:11:25 +02001284 if (error) {
Mika Kuoppala84734a02013-07-12 16:50:57 +03001285 i915_error_state_free(&error->ref);
Mika Kuoppalacb383002014-02-25 17:11:25 +02001286 return;
1287 }
1288
1289 if (!warned) {
1290 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1291 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1292 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1293 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1294 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
1295 warned = true;
1296 }
Mika Kuoppala84734a02013-07-12 16:50:57 +03001297}
1298
1299void i915_error_state_get(struct drm_device *dev,
1300 struct i915_error_state_file_priv *error_priv)
1301{
1302 struct drm_i915_private *dev_priv = dev->dev_private;
1303 unsigned long flags;
1304
1305 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1306 error_priv->error = dev_priv->gpu_error.first_error;
1307 if (error_priv->error)
1308 kref_get(&error_priv->error->ref);
1309 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1310
1311}
1312
1313void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
1314{
1315 if (error_priv->error)
1316 kref_put(&error_priv->error->ref, i915_error_state_free);
1317}
1318
1319void i915_destroy_error_state(struct drm_device *dev)
1320{
1321 struct drm_i915_private *dev_priv = dev->dev_private;
1322 struct drm_i915_error_state *error;
1323 unsigned long flags;
1324
1325 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1326 error = dev_priv->gpu_error.first_error;
1327 dev_priv->gpu_error.first_error = NULL;
1328 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1329
1330 if (error)
1331 kref_put(&error->ref, i915_error_state_free);
1332}
1333
1334const char *i915_cache_level_str(int type)
1335{
1336 switch (type) {
1337 case I915_CACHE_NONE: return " uncached";
Chris Wilson350ec882013-08-06 13:17:02 +01001338 case I915_CACHE_LLC: return " snooped or LLC";
1339 case I915_CACHE_L3_LLC: return " L3+LLC";
Chris Wilsonf56383c2013-09-25 10:23:19 +01001340 case I915_CACHE_WT: return " WT";
Mika Kuoppala84734a02013-07-12 16:50:57 +03001341 default: return "";
1342 }
1343}
1344
1345/* NB: please notice the memset */
1346void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
1347{
1348 struct drm_i915_private *dev_priv = dev->dev_private;
1349 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1350
1351 switch (INTEL_INFO(dev)->gen) {
1352 case 2:
1353 case 3:
1354 instdone[0] = I915_READ(INSTDONE);
1355 break;
1356 case 4:
1357 case 5:
1358 case 6:
1359 instdone[0] = I915_READ(INSTDONE_I965);
1360 instdone[1] = I915_READ(INSTDONE1);
1361 break;
1362 default:
1363 WARN_ONCE(1, "Unsupported platform\n");
1364 case 7:
Ben Widawskyd0582ed2013-11-02 21:07:15 -07001365 case 8:
Mika Kuoppala84734a02013-07-12 16:50:57 +03001366 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1367 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1368 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1369 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1370 break;
1371 }
1372}