blob: 4fc1e05b769faecaa658356b87388acb0ebc3af2 [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include <linux/seq_file.h>
Chris Wilsonf3cd4742009-10-13 22:20:20 +010030#include <linux/debugfs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Ben Gamari20172632009-02-17 20:08:50 -050032#include "drmP.h"
33#include "drm.h"
Simon Farnsworth4e5359c2010-09-01 17:47:52 +010034#include "intel_drv.h"
Ben Gamari20172632009-02-17 20:08:50 -050035#include "i915_drm.h"
36#include "i915_drv.h"
37
38#define DRM_I915_RING_DEBUG 1
39
40
41#if defined(CONFIG_DEBUG_FS)
42
Chris Wilsonf13d3f72010-09-20 17:36:15 +010043enum {
Chris Wilson69dc4982010-10-19 10:36:51 +010044 ACTIVE_LIST,
Chris Wilsonf13d3f72010-09-20 17:36:15 +010045 FLUSHING_LIST,
46 INACTIVE_LIST,
Chris Wilsond21d5972010-09-26 11:19:33 +010047 PINNED_LIST,
48 DEFERRED_FREE_LIST,
Chris Wilsonf13d3f72010-09-20 17:36:15 +010049};
Ben Gamari433e12f2009-02-17 20:08:51 -050050
Chris Wilsonc2c347a92010-10-27 15:11:53 +010051enum {
52 RENDER_RING,
53 BSD_RING,
54 BLT_RING,
55};
56
Chris Wilson70d39fe2010-08-25 16:03:34 +010057static const char *yesno(int v)
58{
59 return v ? "yes" : "no";
60}
61
62static int i915_capabilities(struct seq_file *m, void *data)
63{
64 struct drm_info_node *node = (struct drm_info_node *) m->private;
65 struct drm_device *dev = node->minor->dev;
66 const struct intel_device_info *info = INTEL_INFO(dev);
67
68 seq_printf(m, "gen: %d\n", info->gen);
69#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
70 B(is_mobile);
Chris Wilson70d39fe2010-08-25 16:03:34 +010071 B(is_i85x);
72 B(is_i915g);
Chris Wilson70d39fe2010-08-25 16:03:34 +010073 B(is_i945gm);
Chris Wilson70d39fe2010-08-25 16:03:34 +010074 B(is_g33);
75 B(need_gfx_hws);
76 B(is_g4x);
77 B(is_pineview);
78 B(is_broadwater);
79 B(is_crestline);
Chris Wilson70d39fe2010-08-25 16:03:34 +010080 B(has_fbc);
81 B(has_rc6);
82 B(has_pipe_cxsr);
83 B(has_hotplug);
84 B(cursor_needs_physical);
85 B(has_overlay);
86 B(overlay_needs_physical);
Chris Wilsona6c45cf2010-09-17 00:32:17 +010087 B(supports_tv);
Chris Wilson549f7362010-10-19 11:19:32 +010088 B(has_bsd_ring);
89 B(has_blt_ring);
Chris Wilson70d39fe2010-08-25 16:03:34 +010090#undef B
91
92 return 0;
93}
Ben Gamari433e12f2009-02-17 20:08:51 -050094
Chris Wilsona6172a82009-02-11 14:26:38 +000095static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
96{
97 if (obj_priv->user_pin_count > 0)
98 return "P";
99 else if (obj_priv->pin_count > 0)
100 return "p";
101 else
102 return " ";
103}
104
105static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
106{
107 switch (obj_priv->tiling_mode) {
108 default:
109 case I915_TILING_NONE: return " ";
110 case I915_TILING_X: return "X";
111 case I915_TILING_Y: return "Y";
112 }
113}
114
Chris Wilson37811fc2010-08-25 22:45:57 +0100115static void
116describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
117{
118 seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
119 &obj->base,
120 get_pin_flag(obj),
121 get_tiling_flag(obj),
122 obj->base.size,
123 obj->base.read_domains,
124 obj->base.write_domain,
125 obj->last_rendering_seqno,
126 obj->dirty ? " dirty" : "",
127 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
128 if (obj->base.name)
129 seq_printf(m, " (name: %d)", obj->base.name);
130 if (obj->fence_reg != I915_FENCE_REG_NONE)
131 seq_printf(m, " (fence: %d)", obj->fence_reg);
132 if (obj->gtt_space != NULL)
133 seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
Daniel Vetterfb7d5162010-10-01 22:05:20 +0200134 if (obj->pin_mappable || obj->fault_mappable)
135 seq_printf(m, " (mappable)");
Chris Wilson69dc4982010-10-19 10:36:51 +0100136 if (obj->ring != NULL)
137 seq_printf(m, " (%s)", obj->ring->name);
Chris Wilson37811fc2010-08-25 22:45:57 +0100138}
139
Ben Gamari433e12f2009-02-17 20:08:51 -0500140static int i915_gem_object_list_info(struct seq_file *m, void *data)
Ben Gamari20172632009-02-17 20:08:50 -0500141{
142 struct drm_info_node *node = (struct drm_info_node *) m->private;
Ben Gamari433e12f2009-02-17 20:08:51 -0500143 uintptr_t list = (uintptr_t) node->info_ent->data;
144 struct list_head *head;
Ben Gamari20172632009-02-17 20:08:50 -0500145 struct drm_device *dev = node->minor->dev;
146 drm_i915_private_t *dev_priv = dev->dev_private;
147 struct drm_i915_gem_object *obj_priv;
Chris Wilson8f2480f2010-09-26 11:44:19 +0100148 size_t total_obj_size, total_gtt_size;
149 int count, ret;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100150
151 ret = mutex_lock_interruptible(&dev->struct_mutex);
152 if (ret)
153 return ret;
Ben Gamari20172632009-02-17 20:08:50 -0500154
Ben Gamari433e12f2009-02-17 20:08:51 -0500155 switch (list) {
156 case ACTIVE_LIST:
157 seq_printf(m, "Active:\n");
Chris Wilson69dc4982010-10-19 10:36:51 +0100158 head = &dev_priv->mm.active_list;
Ben Gamari433e12f2009-02-17 20:08:51 -0500159 break;
160 case INACTIVE_LIST:
Ben Gamaria17458f2009-07-01 15:01:36 -0400161 seq_printf(m, "Inactive:\n");
Ben Gamari433e12f2009-02-17 20:08:51 -0500162 head = &dev_priv->mm.inactive_list;
163 break;
Chris Wilsonf13d3f72010-09-20 17:36:15 +0100164 case PINNED_LIST:
165 seq_printf(m, "Pinned:\n");
166 head = &dev_priv->mm.pinned_list;
167 break;
Ben Gamari433e12f2009-02-17 20:08:51 -0500168 case FLUSHING_LIST:
169 seq_printf(m, "Flushing:\n");
170 head = &dev_priv->mm.flushing_list;
171 break;
Chris Wilsond21d5972010-09-26 11:19:33 +0100172 case DEFERRED_FREE_LIST:
173 seq_printf(m, "Deferred free:\n");
174 head = &dev_priv->mm.deferred_free_list;
175 break;
Ben Gamari433e12f2009-02-17 20:08:51 -0500176 default:
Chris Wilsonde227ef2010-07-03 07:58:38 +0100177 mutex_unlock(&dev->struct_mutex);
178 return -EINVAL;
Ben Gamari433e12f2009-02-17 20:08:51 -0500179 }
180
Chris Wilson8f2480f2010-09-26 11:44:19 +0100181 total_obj_size = total_gtt_size = count = 0;
Chris Wilson69dc4982010-10-19 10:36:51 +0100182 list_for_each_entry(obj_priv, head, mm_list) {
Chris Wilson37811fc2010-08-25 22:45:57 +0100183 seq_printf(m, " ");
184 describe_obj(m, obj_priv);
Eric Anholtf4ceda82009-02-17 23:53:41 -0800185 seq_printf(m, "\n");
Chris Wilson8f2480f2010-09-26 11:44:19 +0100186 total_obj_size += obj_priv->base.size;
187 total_gtt_size += obj_priv->gtt_space->size;
188 count++;
Ben Gamari20172632009-02-17 20:08:50 -0500189 }
Chris Wilsonde227ef2010-07-03 07:58:38 +0100190 mutex_unlock(&dev->struct_mutex);
Carl Worth5e118f42009-03-20 11:54:25 -0700191
Chris Wilson8f2480f2010-09-26 11:44:19 +0100192 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
193 count, total_obj_size, total_gtt_size);
Ben Gamari20172632009-02-17 20:08:50 -0500194 return 0;
195}
196
Chris Wilson73aa8082010-09-30 11:46:12 +0100197static int i915_gem_object_info(struct seq_file *m, void* data)
198{
199 struct drm_info_node *node = (struct drm_info_node *) m->private;
200 struct drm_device *dev = node->minor->dev;
201 struct drm_i915_private *dev_priv = dev->dev_private;
202 int ret;
203
204 ret = mutex_lock_interruptible(&dev->struct_mutex);
205 if (ret)
206 return ret;
207
208 seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
209 seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
210 seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
211 seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
Daniel Vetterfb7d5162010-10-01 22:05:20 +0200212 seq_printf(m, "%u mappable objects in gtt\n", dev_priv->mm.gtt_mappable_count);
213 seq_printf(m, "%zu mappable gtt bytes\n", dev_priv->mm.gtt_mappable_memory);
214 seq_printf(m, "%zu mappable gtt used bytes\n", dev_priv->mm.mappable_gtt_used);
215 seq_printf(m, "%zu mappable gtt total\n", dev_priv->mm.mappable_gtt_total);
Chris Wilson73aa8082010-09-30 11:46:12 +0100216 seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
217 seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
218 seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
219
220 mutex_unlock(&dev->struct_mutex);
221
222 return 0;
223}
224
225
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100226static int i915_gem_pageflip_info(struct seq_file *m, void *data)
227{
228 struct drm_info_node *node = (struct drm_info_node *) m->private;
229 struct drm_device *dev = node->minor->dev;
230 unsigned long flags;
231 struct intel_crtc *crtc;
232
233 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
234 const char *pipe = crtc->pipe ? "B" : "A";
235 const char *plane = crtc->plane ? "B" : "A";
236 struct intel_unpin_work *work;
237
238 spin_lock_irqsave(&dev->event_lock, flags);
239 work = crtc->unpin_work;
240 if (work == NULL) {
241 seq_printf(m, "No flip due on pipe %s (plane %s)\n",
242 pipe, plane);
243 } else {
244 if (!work->pending) {
245 seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
246 pipe, plane);
247 } else {
248 seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
249 pipe, plane);
250 }
251 if (work->enable_stall_check)
252 seq_printf(m, "Stall check enabled, ");
253 else
254 seq_printf(m, "Stall check waiting for page flip ioctl, ");
255 seq_printf(m, "%d prepares\n", work->pending);
256
257 if (work->old_fb_obj) {
258 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
259 if(obj_priv)
260 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
261 }
262 if (work->pending_flip_obj) {
263 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
264 if(obj_priv)
265 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
266 }
267 }
268 spin_unlock_irqrestore(&dev->event_lock, flags);
269 }
270
271 return 0;
272}
273
Ben Gamari20172632009-02-17 20:08:50 -0500274static int i915_gem_request_info(struct seq_file *m, void *data)
275{
276 struct drm_info_node *node = (struct drm_info_node *) m->private;
277 struct drm_device *dev = node->minor->dev;
278 drm_i915_private_t *dev_priv = dev->dev_private;
279 struct drm_i915_gem_request *gem_request;
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100280 int ret, count;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100281
282 ret = mutex_lock_interruptible(&dev->struct_mutex);
283 if (ret)
284 return ret;
Ben Gamari20172632009-02-17 20:08:50 -0500285
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100286 count = 0;
287 if (!list_empty(&dev_priv->render_ring.request_list)) {
288 seq_printf(m, "Render requests:\n");
289 list_for_each_entry(gem_request,
290 &dev_priv->render_ring.request_list,
291 list) {
292 seq_printf(m, " %d @ %d\n",
293 gem_request->seqno,
294 (int) (jiffies - gem_request->emitted_jiffies));
295 }
296 count++;
297 }
298 if (!list_empty(&dev_priv->bsd_ring.request_list)) {
299 seq_printf(m, "BSD requests:\n");
300 list_for_each_entry(gem_request,
301 &dev_priv->bsd_ring.request_list,
302 list) {
303 seq_printf(m, " %d @ %d\n",
304 gem_request->seqno,
305 (int) (jiffies - gem_request->emitted_jiffies));
306 }
307 count++;
308 }
309 if (!list_empty(&dev_priv->blt_ring.request_list)) {
310 seq_printf(m, "BLT requests:\n");
311 list_for_each_entry(gem_request,
312 &dev_priv->blt_ring.request_list,
313 list) {
314 seq_printf(m, " %d @ %d\n",
315 gem_request->seqno,
316 (int) (jiffies - gem_request->emitted_jiffies));
317 }
318 count++;
Ben Gamari20172632009-02-17 20:08:50 -0500319 }
Chris Wilsonde227ef2010-07-03 07:58:38 +0100320 mutex_unlock(&dev->struct_mutex);
321
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100322 if (count == 0)
323 seq_printf(m, "No requests\n");
324
Ben Gamari20172632009-02-17 20:08:50 -0500325 return 0;
326}
327
Chris Wilsonb2223492010-10-27 15:27:33 +0100328static void i915_ring_seqno_info(struct seq_file *m,
329 struct intel_ring_buffer *ring)
330{
331 if (ring->get_seqno) {
332 seq_printf(m, "Current sequence (%s): %d\n",
333 ring->name, ring->get_seqno(ring));
334 seq_printf(m, "Waiter sequence (%s): %d\n",
335 ring->name, ring->waiting_seqno);
336 seq_printf(m, "IRQ sequence (%s): %d\n",
337 ring->name, ring->irq_seqno);
338 }
339}
340
Ben Gamari20172632009-02-17 20:08:50 -0500341static int i915_gem_seqno_info(struct seq_file *m, void *data)
342{
343 struct drm_info_node *node = (struct drm_info_node *) m->private;
344 struct drm_device *dev = node->minor->dev;
345 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100346 int ret;
347
348 ret = mutex_lock_interruptible(&dev->struct_mutex);
349 if (ret)
350 return ret;
Ben Gamari20172632009-02-17 20:08:50 -0500351
Chris Wilsonb2223492010-10-27 15:27:33 +0100352 i915_ring_seqno_info(m, &dev_priv->render_ring);
353 i915_ring_seqno_info(m, &dev_priv->bsd_ring);
354 i915_ring_seqno_info(m, &dev_priv->blt_ring);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100355
356 mutex_unlock(&dev->struct_mutex);
357
Ben Gamari20172632009-02-17 20:08:50 -0500358 return 0;
359}
360
361
362static int i915_interrupt_info(struct seq_file *m, void *data)
363{
364 struct drm_info_node *node = (struct drm_info_node *) m->private;
365 struct drm_device *dev = node->minor->dev;
366 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100367 int ret;
368
369 ret = mutex_lock_interruptible(&dev->struct_mutex);
370 if (ret)
371 return ret;
Ben Gamari20172632009-02-17 20:08:50 -0500372
Eric Anholtbad720f2009-10-22 16:11:14 -0700373 if (!HAS_PCH_SPLIT(dev)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800374 seq_printf(m, "Interrupt enable: %08x\n",
375 I915_READ(IER));
376 seq_printf(m, "Interrupt identity: %08x\n",
377 I915_READ(IIR));
378 seq_printf(m, "Interrupt mask: %08x\n",
379 I915_READ(IMR));
380 seq_printf(m, "Pipe A stat: %08x\n",
381 I915_READ(PIPEASTAT));
382 seq_printf(m, "Pipe B stat: %08x\n",
383 I915_READ(PIPEBSTAT));
384 } else {
385 seq_printf(m, "North Display Interrupt enable: %08x\n",
386 I915_READ(DEIER));
387 seq_printf(m, "North Display Interrupt identity: %08x\n",
388 I915_READ(DEIIR));
389 seq_printf(m, "North Display Interrupt mask: %08x\n",
390 I915_READ(DEIMR));
391 seq_printf(m, "South Display Interrupt enable: %08x\n",
392 I915_READ(SDEIER));
393 seq_printf(m, "South Display Interrupt identity: %08x\n",
394 I915_READ(SDEIIR));
395 seq_printf(m, "South Display Interrupt mask: %08x\n",
396 I915_READ(SDEIMR));
397 seq_printf(m, "Graphics Interrupt enable: %08x\n",
398 I915_READ(GTIER));
399 seq_printf(m, "Graphics Interrupt identity: %08x\n",
400 I915_READ(GTIIR));
401 seq_printf(m, "Graphics Interrupt mask: %08x\n",
402 I915_READ(GTIMR));
403 }
Ben Gamari20172632009-02-17 20:08:50 -0500404 seq_printf(m, "Interrupts received: %d\n",
405 atomic_read(&dev_priv->irq_received));
Chris Wilsonb2223492010-10-27 15:27:33 +0100406 i915_ring_seqno_info(m, &dev_priv->render_ring);
407 i915_ring_seqno_info(m, &dev_priv->bsd_ring);
408 i915_ring_seqno_info(m, &dev_priv->blt_ring);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100409 mutex_unlock(&dev->struct_mutex);
410
Ben Gamari20172632009-02-17 20:08:50 -0500411 return 0;
412}
413
Chris Wilsona6172a82009-02-11 14:26:38 +0000414static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
415{
416 struct drm_info_node *node = (struct drm_info_node *) m->private;
417 struct drm_device *dev = node->minor->dev;
418 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100419 int i, ret;
420
421 ret = mutex_lock_interruptible(&dev->struct_mutex);
422 if (ret)
423 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000424
425 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
426 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
427 for (i = 0; i < dev_priv->num_fence_regs; i++) {
428 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
429
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100430 seq_printf(m, "Fenced object[%2d] = ", i);
431 if (obj == NULL)
432 seq_printf(m, "unused");
433 else
434 describe_obj(m, to_intel_bo(obj));
435 seq_printf(m, "\n");
Chris Wilsona6172a82009-02-11 14:26:38 +0000436 }
Chris Wilsonde227ef2010-07-03 07:58:38 +0100437 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000438
439 return 0;
440}
441
Ben Gamari20172632009-02-17 20:08:50 -0500442static int i915_hws_info(struct seq_file *m, void *data)
443{
444 struct drm_info_node *node = (struct drm_info_node *) m->private;
445 struct drm_device *dev = node->minor->dev;
446 drm_i915_private_t *dev_priv = dev->dev_private;
447 int i;
448 volatile u32 *hws;
449
Eric Anholte20f9c62010-05-26 14:51:06 -0700450 hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
Ben Gamari20172632009-02-17 20:08:50 -0500451 if (hws == NULL)
452 return 0;
453
454 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
455 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
456 i * 4,
457 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
458 }
459 return 0;
460}
461
Chris Wilson5cdf5882010-09-27 15:51:07 +0100462static void i915_dump_object(struct seq_file *m,
463 struct io_mapping *mapping,
464 struct drm_i915_gem_object *obj_priv)
Ben Gamari6911a9b2009-04-02 11:24:54 -0700465{
Chris Wilson5cdf5882010-09-27 15:51:07 +0100466 int page, page_count, i;
Ben Gamari6911a9b2009-04-02 11:24:54 -0700467
Chris Wilson5cdf5882010-09-27 15:51:07 +0100468 page_count = obj_priv->base.size / PAGE_SIZE;
Ben Gamari6911a9b2009-04-02 11:24:54 -0700469 for (page = 0; page < page_count; page++) {
Chris Wilson5cdf5882010-09-27 15:51:07 +0100470 u32 *mem = io_mapping_map_wc(mapping,
471 obj_priv->gtt_offset + page * PAGE_SIZE);
Ben Gamari6911a9b2009-04-02 11:24:54 -0700472 for (i = 0; i < PAGE_SIZE; i += 4)
473 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
Chris Wilson5cdf5882010-09-27 15:51:07 +0100474 io_mapping_unmap(mem);
Ben Gamari6911a9b2009-04-02 11:24:54 -0700475 }
476}
477
478static int i915_batchbuffer_info(struct seq_file *m, void *data)
479{
480 struct drm_info_node *node = (struct drm_info_node *) m->private;
481 struct drm_device *dev = node->minor->dev;
482 drm_i915_private_t *dev_priv = dev->dev_private;
483 struct drm_gem_object *obj;
484 struct drm_i915_gem_object *obj_priv;
485 int ret;
486
Chris Wilsonde227ef2010-07-03 07:58:38 +0100487 ret = mutex_lock_interruptible(&dev->struct_mutex);
488 if (ret)
489 return ret;
Ben Gamari6911a9b2009-04-02 11:24:54 -0700490
Chris Wilson69dc4982010-10-19 10:36:51 +0100491 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
Daniel Vettera8089e82010-04-09 19:05:09 +0000492 obj = &obj_priv->base;
Ben Gamari6911a9b2009-04-02 11:24:54 -0700493 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
Chris Wilson5cdf5882010-09-27 15:51:07 +0100494 seq_printf(m, "--- gtt_offset = 0x%08x\n",
495 obj_priv->gtt_offset);
496 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
Ben Gamari6911a9b2009-04-02 11:24:54 -0700497 }
498 }
499
Chris Wilsonde227ef2010-07-03 07:58:38 +0100500 mutex_unlock(&dev->struct_mutex);
Ben Gamari6911a9b2009-04-02 11:24:54 -0700501
502 return 0;
503}
504
505static int i915_ringbuffer_data(struct seq_file *m, void *data)
506{
507 struct drm_info_node *node = (struct drm_info_node *) m->private;
508 struct drm_device *dev = node->minor->dev;
509 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100510 struct intel_ring_buffer *ring;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100511 int ret;
512
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100513 switch ((uintptr_t)node->info_ent->data) {
514 case RENDER_RING: ring = &dev_priv->render_ring; break;
515 case BSD_RING: ring = &dev_priv->bsd_ring; break;
516 case BLT_RING: ring = &dev_priv->blt_ring; break;
517 default: return -EINVAL;
518 }
519
Chris Wilsonde227ef2010-07-03 07:58:38 +0100520 ret = mutex_lock_interruptible(&dev->struct_mutex);
521 if (ret)
522 return ret;
Ben Gamari6911a9b2009-04-02 11:24:54 -0700523
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100524 if (!ring->gem_object) {
Ben Gamari6911a9b2009-04-02 11:24:54 -0700525 seq_printf(m, "No ringbuffer setup\n");
Chris Wilsonde227ef2010-07-03 07:58:38 +0100526 } else {
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100527 u8 *virt = ring->virtual_start;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100528 uint32_t off;
Ben Gamari6911a9b2009-04-02 11:24:54 -0700529
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100530 for (off = 0; off < ring->size; off += 4) {
Chris Wilsonde227ef2010-07-03 07:58:38 +0100531 uint32_t *ptr = (uint32_t *)(virt + off);
532 seq_printf(m, "%08x : %08x\n", off, *ptr);
533 }
Ben Gamari6911a9b2009-04-02 11:24:54 -0700534 }
Chris Wilsonde227ef2010-07-03 07:58:38 +0100535 mutex_unlock(&dev->struct_mutex);
Ben Gamari6911a9b2009-04-02 11:24:54 -0700536
537 return 0;
538}
539
540static int i915_ringbuffer_info(struct seq_file *m, void *data)
541{
542 struct drm_info_node *node = (struct drm_info_node *) m->private;
543 struct drm_device *dev = node->minor->dev;
544 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100545 struct intel_ring_buffer *ring;
Ben Gamari6911a9b2009-04-02 11:24:54 -0700546
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100547 switch ((uintptr_t)node->info_ent->data) {
548 case RENDER_RING: ring = &dev_priv->render_ring; break;
549 case BSD_RING: ring = &dev_priv->bsd_ring; break;
550 case BLT_RING: ring = &dev_priv->blt_ring; break;
551 default: return -EINVAL;
552 }
Ben Gamari6911a9b2009-04-02 11:24:54 -0700553
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100554 if (ring->size == 0)
555 return 0;
556
557 seq_printf(m, "Ring %s:\n", ring->name);
558 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
559 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
560 seq_printf(m, " Size : %08x\n", ring->size);
561 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
562 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
563 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
Ben Gamari6911a9b2009-04-02 11:24:54 -0700564
565 return 0;
566}
567
Chris Wilson9df30792010-02-18 10:24:56 +0000568static const char *pin_flag(int pinned)
569{
570 if (pinned > 0)
571 return " P";
572 else if (pinned < 0)
573 return " p";
574 else
575 return "";
576}
577
578static const char *tiling_flag(int tiling)
579{
580 switch (tiling) {
581 default:
582 case I915_TILING_NONE: return "";
583 case I915_TILING_X: return " X";
584 case I915_TILING_Y: return " Y";
585 }
586}
587
588static const char *dirty_flag(int dirty)
589{
590 return dirty ? " dirty" : "";
591}
592
593static const char *purgeable_flag(int purgeable)
594{
595 return purgeable ? " purgeable" : "";
596}
597
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700598static int i915_error_state(struct seq_file *m, void *unused)
599{
600 struct drm_info_node *node = (struct drm_info_node *) m->private;
601 struct drm_device *dev = node->minor->dev;
602 drm_i915_private_t *dev_priv = dev->dev_private;
603 struct drm_i915_error_state *error;
604 unsigned long flags;
Chris Wilson9df30792010-02-18 10:24:56 +0000605 int i, page, offset, elt;
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700606
607 spin_lock_irqsave(&dev_priv->error_lock, flags);
608 if (!dev_priv->first_error) {
609 seq_printf(m, "no error state collected\n");
610 goto out;
611 }
612
613 error = dev_priv->first_error;
614
Jesse Barnes8a905232009-07-11 16:48:03 -0400615 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
616 error->time.tv_usec);
Chris Wilson9df30792010-02-18 10:24:56 +0000617 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
Chris Wilsonf4068392010-10-27 20:36:41 +0100618 if (INTEL_INFO(dev)->gen >= 6) {
619 seq_printf(m, "ERROR: 0x%08x\n", error->error);
620 }
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700621 seq_printf(m, "EIR: 0x%08x\n", error->eir);
622 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
623 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
624 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
625 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
626 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
627 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100628 if (INTEL_INFO(dev)->gen >= 4) {
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700629 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
630 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
631 }
Chris Wilson9df30792010-02-18 10:24:56 +0000632 seq_printf(m, "seqno: 0x%08x\n", error->seqno);
633
634 if (error->active_bo_count) {
635 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
636
637 for (i = 0; i < error->active_bo_count; i++) {
638 seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
639 error->active_bo[i].gtt_offset,
640 error->active_bo[i].size,
641 error->active_bo[i].read_domains,
642 error->active_bo[i].write_domain,
643 error->active_bo[i].seqno,
644 pin_flag(error->active_bo[i].pinned),
645 tiling_flag(error->active_bo[i].tiling),
646 dirty_flag(error->active_bo[i].dirty),
647 purgeable_flag(error->active_bo[i].purgeable));
648
649 if (error->active_bo[i].name)
650 seq_printf(m, " (name: %d)", error->active_bo[i].name);
651 if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
652 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
653
654 seq_printf(m, "\n");
655 }
656 }
657
658 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
659 if (error->batchbuffer[i]) {
660 struct drm_i915_error_object *obj = error->batchbuffer[i];
661
662 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
663 offset = 0;
664 for (page = 0; page < obj->page_count; page++) {
665 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
666 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
667 offset += 4;
668 }
669 }
670 }
671 }
672
673 if (error->ringbuffer) {
674 struct drm_i915_error_object *obj = error->ringbuffer;
675
676 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
677 offset = 0;
678 for (page = 0; page < obj->page_count; page++) {
679 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
680 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
681 offset += 4;
682 }
683 }
684 }
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700685
Chris Wilson6ef3d422010-08-04 20:26:07 +0100686 if (error->overlay)
687 intel_overlay_print_error_state(m, error->overlay);
688
Jesse Barnes63eeaf32009-06-18 16:56:52 -0700689out:
690 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
691
692 return 0;
693}
Ben Gamari6911a9b2009-04-02 11:24:54 -0700694
Jesse Barnesf97108d2010-01-29 11:27:07 -0800695static int i915_rstdby_delays(struct seq_file *m, void *unused)
696{
697 struct drm_info_node *node = (struct drm_info_node *) m->private;
698 struct drm_device *dev = node->minor->dev;
699 drm_i915_private_t *dev_priv = dev->dev_private;
700 u16 crstanddelay = I915_READ16(CRSTANDVID);
701
702 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
703
704 return 0;
705}
706
707static int i915_cur_delayinfo(struct seq_file *m, void *unused)
708{
709 struct drm_info_node *node = (struct drm_info_node *) m->private;
710 struct drm_device *dev = node->minor->dev;
711 drm_i915_private_t *dev_priv = dev->dev_private;
712 u16 rgvswctl = I915_READ16(MEMSWCTL);
Jesse Barnes7648fa92010-05-20 14:28:11 -0700713 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800714
Jesse Barnes7648fa92010-05-20 14:28:11 -0700715 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
716 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
717 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
718 MEMSTAT_VID_SHIFT);
719 seq_printf(m, "Current P-state: %d\n",
720 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800721
722 return 0;
723}
724
725static int i915_delayfreq_table(struct seq_file *m, void *unused)
726{
727 struct drm_info_node *node = (struct drm_info_node *) m->private;
728 struct drm_device *dev = node->minor->dev;
729 drm_i915_private_t *dev_priv = dev->dev_private;
730 u32 delayfreq;
731 int i;
732
733 for (i = 0; i < 16; i++) {
734 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
Jesse Barnes7648fa92010-05-20 14:28:11 -0700735 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
736 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800737 }
738
739 return 0;
740}
741
742static inline int MAP_TO_MV(int map)
743{
744 return 1250 - (map * 25);
745}
746
747static int i915_inttoext_table(struct seq_file *m, void *unused)
748{
749 struct drm_info_node *node = (struct drm_info_node *) m->private;
750 struct drm_device *dev = node->minor->dev;
751 drm_i915_private_t *dev_priv = dev->dev_private;
752 u32 inttoext;
753 int i;
754
755 for (i = 1; i <= 32; i++) {
756 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
757 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
758 }
759
760 return 0;
761}
762
763static int i915_drpc_info(struct seq_file *m, void *unused)
764{
765 struct drm_info_node *node = (struct drm_info_node *) m->private;
766 struct drm_device *dev = node->minor->dev;
767 drm_i915_private_t *dev_priv = dev->dev_private;
768 u32 rgvmodectl = I915_READ(MEMMODECTL);
Jesse Barnes7648fa92010-05-20 14:28:11 -0700769 u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
770 u16 crstandvid = I915_READ16(CRSTANDVID);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800771
772 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
773 "yes" : "no");
774 seq_printf(m, "Boost freq: %d\n",
775 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
776 MEMMODE_BOOST_FREQ_SHIFT);
777 seq_printf(m, "HW control enabled: %s\n",
778 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
779 seq_printf(m, "SW control enabled: %s\n",
780 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
781 seq_printf(m, "Gated voltage change: %s\n",
782 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
783 seq_printf(m, "Starting frequency: P%d\n",
784 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -0700785 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -0800786 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -0700787 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
788 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
789 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
790 seq_printf(m, "Render standby enabled: %s\n",
791 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
Jesse Barnesf97108d2010-01-29 11:27:07 -0800792
793 return 0;
794}
795
Jesse Barnesb5e50c32010-02-05 12:42:41 -0800796static int i915_fbc_status(struct seq_file *m, void *unused)
797{
798 struct drm_info_node *node = (struct drm_info_node *) m->private;
799 struct drm_device *dev = node->minor->dev;
Jesse Barnesb5e50c32010-02-05 12:42:41 -0800800 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesb5e50c32010-02-05 12:42:41 -0800801
Adam Jacksonee5382a2010-04-23 11:17:39 -0400802 if (!I915_HAS_FBC(dev)) {
Jesse Barnesb5e50c32010-02-05 12:42:41 -0800803 seq_printf(m, "FBC unsupported on this chipset\n");
804 return 0;
805 }
806
Adam Jacksonee5382a2010-04-23 11:17:39 -0400807 if (intel_fbc_enabled(dev)) {
Jesse Barnesb5e50c32010-02-05 12:42:41 -0800808 seq_printf(m, "FBC enabled\n");
809 } else {
810 seq_printf(m, "FBC disabled: ");
811 switch (dev_priv->no_fbc_reason) {
Chris Wilsonbed4a672010-09-11 10:47:47 +0100812 case FBC_NO_OUTPUT:
813 seq_printf(m, "no outputs");
814 break;
Jesse Barnesb5e50c32010-02-05 12:42:41 -0800815 case FBC_STOLEN_TOO_SMALL:
816 seq_printf(m, "not enough stolen memory");
817 break;
818 case FBC_UNSUPPORTED_MODE:
819 seq_printf(m, "mode not supported");
820 break;
821 case FBC_MODE_TOO_LARGE:
822 seq_printf(m, "mode too large");
823 break;
824 case FBC_BAD_PLANE:
825 seq_printf(m, "FBC unsupported on plane");
826 break;
827 case FBC_NOT_TILED:
828 seq_printf(m, "scanout buffer not tiled");
829 break;
Jesse Barnes9c928d12010-07-23 15:20:00 -0700830 case FBC_MULTIPLE_PIPES:
831 seq_printf(m, "multiple pipes are enabled");
832 break;
Jesse Barnesb5e50c32010-02-05 12:42:41 -0800833 default:
834 seq_printf(m, "unknown reason");
835 }
836 seq_printf(m, "\n");
837 }
838 return 0;
839}
840
Jesse Barnes4a9bef32010-02-05 12:47:35 -0800841static int i915_sr_status(struct seq_file *m, void *unused)
842{
843 struct drm_info_node *node = (struct drm_info_node *) m->private;
844 struct drm_device *dev = node->minor->dev;
845 drm_i915_private_t *dev_priv = dev->dev_private;
846 bool sr_enabled = false;
847
Chris Wilsonf00a3dd2010-10-21 14:57:17 +0100848 if (IS_GEN5(dev))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +0100849 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100850 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
Jesse Barnes4a9bef32010-02-05 12:47:35 -0800851 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
852 else if (IS_I915GM(dev))
853 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
854 else if (IS_PINEVIEW(dev))
855 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
856
Chris Wilson5ba2aaa2010-08-19 18:04:08 +0100857 seq_printf(m, "self-refresh: %s\n",
858 sr_enabled ? "enabled" : "disabled");
Jesse Barnes4a9bef32010-02-05 12:47:35 -0800859
860 return 0;
861}
862
Jesse Barnes7648fa92010-05-20 14:28:11 -0700863static int i915_emon_status(struct seq_file *m, void *unused)
864{
865 struct drm_info_node *node = (struct drm_info_node *) m->private;
866 struct drm_device *dev = node->minor->dev;
867 drm_i915_private_t *dev_priv = dev->dev_private;
868 unsigned long temp, chipset, gfx;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100869 int ret;
870
871 ret = mutex_lock_interruptible(&dev->struct_mutex);
872 if (ret)
873 return ret;
Jesse Barnes7648fa92010-05-20 14:28:11 -0700874
875 temp = i915_mch_val(dev_priv);
876 chipset = i915_chipset_val(dev_priv);
877 gfx = i915_gfx_val(dev_priv);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100878 mutex_unlock(&dev->struct_mutex);
Jesse Barnes7648fa92010-05-20 14:28:11 -0700879
880 seq_printf(m, "GMCH temp: %ld\n", temp);
881 seq_printf(m, "Chipset power: %ld\n", chipset);
882 seq_printf(m, "GFX power: %ld\n", gfx);
883 seq_printf(m, "Total power: %ld\n", chipset + gfx);
884
885 return 0;
886}
887
888static int i915_gfxec(struct seq_file *m, void *unused)
889{
890 struct drm_info_node *node = (struct drm_info_node *) m->private;
891 struct drm_device *dev = node->minor->dev;
892 drm_i915_private_t *dev_priv = dev->dev_private;
893
894 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
895
896 return 0;
897}
898
Chris Wilson44834a62010-08-19 16:09:23 +0100899static int i915_opregion(struct seq_file *m, void *unused)
900{
901 struct drm_info_node *node = (struct drm_info_node *) m->private;
902 struct drm_device *dev = node->minor->dev;
903 drm_i915_private_t *dev_priv = dev->dev_private;
904 struct intel_opregion *opregion = &dev_priv->opregion;
905 int ret;
906
907 ret = mutex_lock_interruptible(&dev->struct_mutex);
908 if (ret)
909 return ret;
910
911 if (opregion->header)
912 seq_write(m, opregion->header, OPREGION_SIZE);
913
914 mutex_unlock(&dev->struct_mutex);
915
916 return 0;
917}
918
Chris Wilson37811fc2010-08-25 22:45:57 +0100919static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
920{
921 struct drm_info_node *node = (struct drm_info_node *) m->private;
922 struct drm_device *dev = node->minor->dev;
923 drm_i915_private_t *dev_priv = dev->dev_private;
924 struct intel_fbdev *ifbdev;
925 struct intel_framebuffer *fb;
926 int ret;
927
928 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
929 if (ret)
930 return ret;
931
932 ifbdev = dev_priv->fbdev;
933 fb = to_intel_framebuffer(ifbdev->helper.fb);
934
935 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
936 fb->base.width,
937 fb->base.height,
938 fb->base.depth,
939 fb->base.bits_per_pixel);
940 describe_obj(m, to_intel_bo(fb->obj));
941 seq_printf(m, "\n");
942
943 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
944 if (&fb->base == ifbdev->helper.fb)
945 continue;
946
947 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
948 fb->base.width,
949 fb->base.height,
950 fb->base.depth,
951 fb->base.bits_per_pixel);
952 describe_obj(m, to_intel_bo(fb->obj));
953 seq_printf(m, "\n");
954 }
955
956 mutex_unlock(&dev->mode_config.mutex);
957
958 return 0;
959}
960
Chris Wilsonf3cd4742009-10-13 22:20:20 +0100961static int
962i915_wedged_open(struct inode *inode,
963 struct file *filp)
964{
965 filp->private_data = inode->i_private;
966 return 0;
967}
968
969static ssize_t
970i915_wedged_read(struct file *filp,
971 char __user *ubuf,
972 size_t max,
973 loff_t *ppos)
974{
975 struct drm_device *dev = filp->private_data;
976 drm_i915_private_t *dev_priv = dev->dev_private;
977 char buf[80];
978 int len;
979
980 len = snprintf(buf, sizeof (buf),
981 "wedged : %d\n",
982 atomic_read(&dev_priv->mm.wedged));
983
Dan Carpenterf4433a82010-09-08 21:44:47 +0200984 if (len > sizeof (buf))
985 len = sizeof (buf);
986
Chris Wilsonf3cd4742009-10-13 22:20:20 +0100987 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
988}
989
990static ssize_t
991i915_wedged_write(struct file *filp,
992 const char __user *ubuf,
993 size_t cnt,
994 loff_t *ppos)
995{
996 struct drm_device *dev = filp->private_data;
997 drm_i915_private_t *dev_priv = dev->dev_private;
998 char buf[20];
999 int val = 1;
1000
1001 if (cnt > 0) {
1002 if (cnt > sizeof (buf) - 1)
1003 return -EINVAL;
1004
1005 if (copy_from_user(buf, ubuf, cnt))
1006 return -EFAULT;
1007 buf[cnt] = 0;
1008
1009 val = simple_strtoul(buf, NULL, 0);
1010 }
1011
1012 DRM_INFO("Manually setting wedged to %d\n", val);
1013
1014 atomic_set(&dev_priv->mm.wedged, val);
1015 if (val) {
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001016 wake_up_all(&dev_priv->irq_queue);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01001017 queue_work(dev_priv->wq, &dev_priv->error_work);
1018 }
1019
1020 return cnt;
1021}
1022
1023static const struct file_operations i915_wedged_fops = {
1024 .owner = THIS_MODULE,
1025 .open = i915_wedged_open,
1026 .read = i915_wedged_read,
1027 .write = i915_wedged_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001028 .llseek = default_llseek,
Chris Wilsonf3cd4742009-10-13 22:20:20 +01001029};
1030
1031/* As the drm_debugfs_init() routines are called before dev->dev_private is
1032 * allocated we need to hook into the minor for release. */
1033static int
1034drm_add_fake_info_node(struct drm_minor *minor,
1035 struct dentry *ent,
1036 const void *key)
1037{
1038 struct drm_info_node *node;
1039
1040 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
1041 if (node == NULL) {
1042 debugfs_remove(ent);
1043 return -ENOMEM;
1044 }
1045
1046 node->minor = minor;
1047 node->dent = ent;
1048 node->info_ent = (void *) key;
1049 list_add(&node->list, &minor->debugfs_nodes.list);
1050
1051 return 0;
1052}
1053
1054static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
1055{
1056 struct drm_device *dev = minor->dev;
1057 struct dentry *ent;
1058
1059 ent = debugfs_create_file("i915_wedged",
1060 S_IRUGO | S_IWUSR,
1061 root, dev,
1062 &i915_wedged_fops);
1063 if (IS_ERR(ent))
1064 return PTR_ERR(ent);
1065
1066 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
1067}
Ben Gamari9e3a6d12009-07-01 22:26:53 -04001068
Ben Gamari27c202a2009-07-01 22:26:52 -04001069static struct drm_info_list i915_debugfs_list[] = {
Chris Wilson70d39fe2010-08-25 16:03:34 +01001070 {"i915_capabilities", i915_capabilities, 0, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01001071 {"i915_gem_objects", i915_gem_object_info, 0},
Ben Gamari433e12f2009-02-17 20:08:51 -05001072 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1073 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1074 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
Chris Wilsonf13d3f72010-09-20 17:36:15 +01001075 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
Chris Wilsond21d5972010-09-26 11:19:33 +01001076 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01001077 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05001078 {"i915_gem_request", i915_gem_request_info, 0},
1079 {"i915_gem_seqno", i915_gem_seqno_info, 0},
Chris Wilsona6172a82009-02-11 14:26:38 +00001080 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05001081 {"i915_gem_interrupt", i915_interrupt_info, 0},
1082 {"i915_gem_hws", i915_hws_info, 0},
Chris Wilsonc2c347a92010-10-27 15:11:53 +01001083 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RENDER_RING},
1084 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RENDER_RING},
1085 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BSD_RING},
1086 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BSD_RING},
1087 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BLT_RING},
1088 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BLT_RING},
Ben Gamari6911a9b2009-04-02 11:24:54 -07001089 {"i915_batchbuffers", i915_batchbuffer_info, 0},
Jesse Barnes63eeaf32009-06-18 16:56:52 -07001090 {"i915_error_state", i915_error_state, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08001091 {"i915_rstdby_delays", i915_rstdby_delays, 0},
1092 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1093 {"i915_delayfreq_table", i915_delayfreq_table, 0},
1094 {"i915_inttoext_table", i915_inttoext_table, 0},
1095 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07001096 {"i915_emon_status", i915_emon_status, 0},
1097 {"i915_gfxec", i915_gfxec, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001098 {"i915_fbc_status", i915_fbc_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001099 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01001100 {"i915_opregion", i915_opregion, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01001101 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05001102};
Ben Gamari27c202a2009-07-01 22:26:52 -04001103#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05001104
Ben Gamari27c202a2009-07-01 22:26:52 -04001105int i915_debugfs_init(struct drm_minor *minor)
Ben Gamari20172632009-02-17 20:08:50 -05001106{
Chris Wilsonf3cd4742009-10-13 22:20:20 +01001107 int ret;
1108
1109 ret = i915_wedged_create(minor->debugfs_root, minor);
1110 if (ret)
1111 return ret;
1112
Ben Gamari27c202a2009-07-01 22:26:52 -04001113 return drm_debugfs_create_files(i915_debugfs_list,
1114 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05001115 minor->debugfs_root, minor);
1116}
1117
Ben Gamari27c202a2009-07-01 22:26:52 -04001118void i915_debugfs_cleanup(struct drm_minor *minor)
Ben Gamari20172632009-02-17 20:08:50 -05001119{
Ben Gamari27c202a2009-07-01 22:26:52 -04001120 drm_debugfs_remove_files(i915_debugfs_list,
1121 I915_DEBUGFS_ENTRIES, minor);
Kristian Høgsberg33db6792009-11-11 12:19:16 -05001122 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1123 1, minor);
Ben Gamari20172632009-02-17 20:08:50 -05001124}
1125
1126#endif /* CONFIG_DEBUG_FS */