blob: 748af58b0cea8a68b7a9aefa20b36b671539bca4 [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include <linux/seq_file.h>
Chris Wilsonf3cd4742009-10-13 22:20:20 +010030#include <linux/debugfs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040032#include <linux/export.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
Simon Farnsworth4e5359c2010-09-01 17:47:52 +010034#include "intel_drv.h"
Chris Wilsone5c65262010-11-01 11:35:28 +000035#include "intel_ringbuffer.h"
David Howells760285e2012-10-02 18:01:07 +010036#include <drm/i915_drm.h>
Ben Gamari20172632009-02-17 20:08:50 -050037#include "i915_drv.h"
38
39#define DRM_I915_RING_DEBUG 1
40
41
42#if defined(CONFIG_DEBUG_FS)
43
Chris Wilsonf13d3f72010-09-20 17:36:15 +010044enum {
Chris Wilson69dc4982010-10-19 10:36:51 +010045 ACTIVE_LIST,
Chris Wilsonf13d3f72010-09-20 17:36:15 +010046 INACTIVE_LIST,
Chris Wilsond21d5972010-09-26 11:19:33 +010047 PINNED_LIST,
Chris Wilsonf13d3f72010-09-20 17:36:15 +010048};
Ben Gamari433e12f2009-02-17 20:08:51 -050049
Chris Wilson70d39fe2010-08-25 16:03:34 +010050static const char *yesno(int v)
51{
52 return v ? "yes" : "no";
53}
54
55static int i915_capabilities(struct seq_file *m, void *data)
56{
57 struct drm_info_node *node = (struct drm_info_node *) m->private;
58 struct drm_device *dev = node->minor->dev;
59 const struct intel_device_info *info = INTEL_INFO(dev);
60
61 seq_printf(m, "gen: %d\n", info->gen);
Paulo Zanoni03d00ac2011-10-14 18:17:41 -030062 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
Damien Lespiau79fc46d2013-04-23 16:37:17 +010063#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
64#define SEP_SEMICOLON ;
65 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
66#undef PRINT_FLAG
67#undef SEP_SEMICOLON
Chris Wilson70d39fe2010-08-25 16:03:34 +010068
69 return 0;
70}
Ben Gamari433e12f2009-02-17 20:08:51 -050071
Chris Wilson05394f32010-11-08 19:18:58 +000072static const char *get_pin_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000073{
Chris Wilson05394f32010-11-08 19:18:58 +000074 if (obj->user_pin_count > 0)
Chris Wilsona6172a82009-02-11 14:26:38 +000075 return "P";
Chris Wilson05394f32010-11-08 19:18:58 +000076 else if (obj->pin_count > 0)
Chris Wilsona6172a82009-02-11 14:26:38 +000077 return "p";
78 else
79 return " ";
80}
81
Chris Wilson05394f32010-11-08 19:18:58 +000082static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000083{
Akshay Joshi0206e352011-08-16 15:34:10 -040084 switch (obj->tiling_mode) {
85 default:
86 case I915_TILING_NONE: return " ";
87 case I915_TILING_X: return "X";
88 case I915_TILING_Y: return "Y";
89 }
Chris Wilsona6172a82009-02-11 14:26:38 +000090}
91
Ben Widawsky1d693bc2013-07-31 17:00:00 -070092static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
93{
94 return obj->has_global_gtt_mapping ? "g" : " ";
95}
96
Chris Wilson37811fc2010-08-25 22:45:57 +010097static void
98describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
99{
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700100 struct i915_vma *vma;
101 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100102 &obj->base,
103 get_pin_flag(obj),
104 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700105 get_global_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800106 obj->base.size / 1024,
Chris Wilson37811fc2010-08-25 22:45:57 +0100107 obj->base.read_domains,
108 obj->base.write_domain,
Chris Wilson0201f1e2012-07-20 12:41:01 +0100109 obj->last_read_seqno,
110 obj->last_write_seqno,
Chris Wilsoncaea7472010-11-12 13:53:37 +0000111 obj->last_fenced_seqno,
Mika Kuoppala84734a02013-07-12 16:50:57 +0300112 i915_cache_level_str(obj->cache_level),
Chris Wilson37811fc2010-08-25 22:45:57 +0100113 obj->dirty ? " dirty" : "",
114 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
115 if (obj->base.name)
116 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilsonc110a6d2012-08-11 15:41:02 +0100117 if (obj->pin_count)
118 seq_printf(m, " (pinned x %d)", obj->pin_count);
Chris Wilson37811fc2010-08-25 22:45:57 +0100119 if (obj->fence_reg != I915_FENCE_REG_NONE)
120 seq_printf(m, " (fence: %d)", obj->fence_reg);
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700121 list_for_each_entry(vma, &obj->vma_list, vma_link) {
122 if (!i915_is_ggtt(vma->vm))
123 seq_puts(m, " (pp");
124 else
125 seq_puts(m, " (g");
126 seq_printf(m, "gtt offset: %08lx, size: %08lx)",
127 vma->node.start, vma->node.size);
128 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000129 if (obj->stolen)
130 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
Chris Wilson6299f992010-11-24 12:23:44 +0000131 if (obj->pin_mappable || obj->fault_mappable) {
132 char s[3], *t = s;
133 if (obj->pin_mappable)
134 *t++ = 'p';
135 if (obj->fault_mappable)
136 *t++ = 'f';
137 *t = '\0';
138 seq_printf(m, " (%s mappable)", s);
139 }
Chris Wilson69dc4982010-10-19 10:36:51 +0100140 if (obj->ring != NULL)
141 seq_printf(m, " (%s)", obj->ring->name);
Chris Wilson37811fc2010-08-25 22:45:57 +0100142}
143
Ben Gamari433e12f2009-02-17 20:08:51 -0500144static int i915_gem_object_list_info(struct seq_file *m, void *data)
Ben Gamari20172632009-02-17 20:08:50 -0500145{
146 struct drm_info_node *node = (struct drm_info_node *) m->private;
Ben Gamari433e12f2009-02-17 20:08:51 -0500147 uintptr_t list = (uintptr_t) node->info_ent->data;
148 struct list_head *head;
Ben Gamari20172632009-02-17 20:08:50 -0500149 struct drm_device *dev = node->minor->dev;
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700150 struct drm_i915_private *dev_priv = dev->dev_private;
151 struct i915_address_space *vm = &dev_priv->gtt.base;
Chris Wilson05394f32010-11-08 19:18:58 +0000152 struct drm_i915_gem_object *obj;
Chris Wilson8f2480f2010-09-26 11:44:19 +0100153 size_t total_obj_size, total_gtt_size;
154 int count, ret;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100155
156 ret = mutex_lock_interruptible(&dev->struct_mutex);
157 if (ret)
158 return ret;
Ben Gamari20172632009-02-17 20:08:50 -0500159
Ben Gamari433e12f2009-02-17 20:08:51 -0500160 switch (list) {
161 case ACTIVE_LIST:
Damien Lespiau267f0c92013-06-24 22:59:48 +0100162 seq_puts(m, "Active:\n");
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700163 head = &vm->active_list;
Ben Gamari433e12f2009-02-17 20:08:51 -0500164 break;
165 case INACTIVE_LIST:
Damien Lespiau267f0c92013-06-24 22:59:48 +0100166 seq_puts(m, "Inactive:\n");
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700167 head = &vm->inactive_list;
Ben Gamari433e12f2009-02-17 20:08:51 -0500168 break;
Ben Gamari433e12f2009-02-17 20:08:51 -0500169 default:
Chris Wilsonde227ef2010-07-03 07:58:38 +0100170 mutex_unlock(&dev->struct_mutex);
171 return -EINVAL;
Ben Gamari433e12f2009-02-17 20:08:51 -0500172 }
173
Chris Wilson8f2480f2010-09-26 11:44:19 +0100174 total_obj_size = total_gtt_size = count = 0;
Chris Wilson05394f32010-11-08 19:18:58 +0000175 list_for_each_entry(obj, head, mm_list) {
Damien Lespiau267f0c92013-06-24 22:59:48 +0100176 seq_puts(m, " ");
Chris Wilson05394f32010-11-08 19:18:58 +0000177 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100178 seq_putc(m, '\n');
Chris Wilson05394f32010-11-08 19:18:58 +0000179 total_obj_size += obj->base.size;
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700180 total_gtt_size += i915_gem_obj_ggtt_size(obj);
Chris Wilson8f2480f2010-09-26 11:44:19 +0100181 count++;
Ben Gamari20172632009-02-17 20:08:50 -0500182 }
Chris Wilsonde227ef2010-07-03 07:58:38 +0100183 mutex_unlock(&dev->struct_mutex);
Carl Worth5e118f42009-03-20 11:54:25 -0700184
Chris Wilson8f2480f2010-09-26 11:44:19 +0100185 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
186 count, total_obj_size, total_gtt_size);
Ben Gamari20172632009-02-17 20:08:50 -0500187 return 0;
188}
189
Chris Wilson6299f992010-11-24 12:23:44 +0000190#define count_objects(list, member) do { \
191 list_for_each_entry(obj, list, member) { \
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700192 size += i915_gem_obj_ggtt_size(obj); \
Chris Wilson6299f992010-11-24 12:23:44 +0000193 ++count; \
194 if (obj->map_and_fenceable) { \
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700195 mappable_size += i915_gem_obj_ggtt_size(obj); \
Chris Wilson6299f992010-11-24 12:23:44 +0000196 ++mappable_count; \
197 } \
198 } \
Akshay Joshi0206e352011-08-16 15:34:10 -0400199} while (0)
Chris Wilson6299f992010-11-24 12:23:44 +0000200
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100201struct file_stats {
202 int count;
203 size_t total, active, inactive, unbound;
204};
205
206static int per_file_stats(int id, void *ptr, void *data)
207{
208 struct drm_i915_gem_object *obj = ptr;
209 struct file_stats *stats = data;
210
211 stats->count++;
212 stats->total += obj->base.size;
213
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700214 if (i915_gem_obj_ggtt_bound(obj)) {
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100215 if (!list_empty(&obj->ring_list))
216 stats->active += obj->base.size;
217 else
218 stats->inactive += obj->base.size;
219 } else {
220 if (!list_empty(&obj->global_list))
221 stats->unbound += obj->base.size;
222 }
223
224 return 0;
225}
226
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100227static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100228{
229 struct drm_info_node *node = (struct drm_info_node *) m->private;
230 struct drm_device *dev = node->minor->dev;
231 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb7abb712012-08-20 11:33:30 +0200232 u32 count, mappable_count, purgeable_count;
233 size_t size, mappable_size, purgeable_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000234 struct drm_i915_gem_object *obj;
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700235 struct i915_address_space *vm = &dev_priv->gtt.base;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100236 struct drm_file *file;
Chris Wilson73aa8082010-09-30 11:46:12 +0100237 int ret;
238
239 ret = mutex_lock_interruptible(&dev->struct_mutex);
240 if (ret)
241 return ret;
242
Chris Wilson6299f992010-11-24 12:23:44 +0000243 seq_printf(m, "%u objects, %zu bytes\n",
244 dev_priv->mm.object_count,
245 dev_priv->mm.object_memory);
246
247 size = count = mappable_size = mappable_count = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -0700248 count_objects(&dev_priv->mm.bound_list, global_list);
Chris Wilson6299f992010-11-24 12:23:44 +0000249 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
250 count, mappable_count, size, mappable_size);
251
252 size = count = mappable_size = mappable_count = 0;
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700253 count_objects(&vm->active_list, mm_list);
Chris Wilson6299f992010-11-24 12:23:44 +0000254 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
255 count, mappable_count, size, mappable_size);
256
257 size = count = mappable_size = mappable_count = 0;
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700258 count_objects(&vm->inactive_list, mm_list);
Chris Wilson6299f992010-11-24 12:23:44 +0000259 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
260 count, mappable_count, size, mappable_size);
261
Chris Wilsonb7abb712012-08-20 11:33:30 +0200262 size = count = purgeable_size = purgeable_count = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -0700263 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
Chris Wilson6c085a72012-08-20 11:40:46 +0200264 size += obj->base.size, ++count;
Chris Wilsonb7abb712012-08-20 11:33:30 +0200265 if (obj->madv == I915_MADV_DONTNEED)
266 purgeable_size += obj->base.size, ++purgeable_count;
267 }
Chris Wilson6c085a72012-08-20 11:40:46 +0200268 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
269
Chris Wilson6299f992010-11-24 12:23:44 +0000270 size = count = mappable_size = mappable_count = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -0700271 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Chris Wilson6299f992010-11-24 12:23:44 +0000272 if (obj->fault_mappable) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700273 size += i915_gem_obj_ggtt_size(obj);
Chris Wilson6299f992010-11-24 12:23:44 +0000274 ++count;
275 }
276 if (obj->pin_mappable) {
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700277 mappable_size += i915_gem_obj_ggtt_size(obj);
Chris Wilson6299f992010-11-24 12:23:44 +0000278 ++mappable_count;
279 }
Chris Wilsonb7abb712012-08-20 11:33:30 +0200280 if (obj->madv == I915_MADV_DONTNEED) {
281 purgeable_size += obj->base.size;
282 ++purgeable_count;
283 }
Chris Wilson6299f992010-11-24 12:23:44 +0000284 }
Chris Wilsonb7abb712012-08-20 11:33:30 +0200285 seq_printf(m, "%u purgeable objects, %zu bytes\n",
286 purgeable_count, purgeable_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000287 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
288 mappable_count, mappable_size);
289 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
290 count, size);
291
Ben Widawsky93d18792013-01-17 12:45:17 -0800292 seq_printf(m, "%zu [%lu] gtt total\n",
Ben Widawsky853ba5d2013-07-16 16:50:05 -0700293 dev_priv->gtt.base.total,
294 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
Chris Wilson73aa8082010-09-30 11:46:12 +0100295
Damien Lespiau267f0c92013-06-24 22:59:48 +0100296 seq_putc(m, '\n');
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100297 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
298 struct file_stats stats;
299
300 memset(&stats, 0, sizeof(stats));
301 idr_for_each(&file->object_idr, per_file_stats, &stats);
302 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
303 get_pid_task(file->pid, PIDTYPE_PID)->comm,
304 stats.count,
305 stats.total,
306 stats.active,
307 stats.inactive,
308 stats.unbound);
309 }
310
Chris Wilson73aa8082010-09-30 11:46:12 +0100311 mutex_unlock(&dev->struct_mutex);
312
313 return 0;
314}
315
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100316static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000317{
318 struct drm_info_node *node = (struct drm_info_node *) m->private;
319 struct drm_device *dev = node->minor->dev;
Chris Wilson1b502472012-04-24 15:47:30 +0100320 uintptr_t list = (uintptr_t) node->info_ent->data;
Chris Wilson08c18322011-01-10 00:00:24 +0000321 struct drm_i915_private *dev_priv = dev->dev_private;
322 struct drm_i915_gem_object *obj;
323 size_t total_obj_size, total_gtt_size;
324 int count, ret;
325
326 ret = mutex_lock_interruptible(&dev->struct_mutex);
327 if (ret)
328 return ret;
329
330 total_obj_size = total_gtt_size = count = 0;
Ben Widawsky35c20a62013-05-31 11:28:48 -0700331 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Chris Wilson1b502472012-04-24 15:47:30 +0100332 if (list == PINNED_LIST && obj->pin_count == 0)
333 continue;
334
Damien Lespiau267f0c92013-06-24 22:59:48 +0100335 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000336 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100337 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000338 total_obj_size += obj->base.size;
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700339 total_gtt_size += i915_gem_obj_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000340 count++;
341 }
342
343 mutex_unlock(&dev->struct_mutex);
344
345 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
346 count, total_obj_size, total_gtt_size);
347
348 return 0;
349}
350
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100351static int i915_gem_pageflip_info(struct seq_file *m, void *data)
352{
353 struct drm_info_node *node = (struct drm_info_node *) m->private;
354 struct drm_device *dev = node->minor->dev;
355 unsigned long flags;
356 struct intel_crtc *crtc;
357
358 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800359 const char pipe = pipe_name(crtc->pipe);
360 const char plane = plane_name(crtc->plane);
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100361 struct intel_unpin_work *work;
362
363 spin_lock_irqsave(&dev->event_lock, flags);
364 work = crtc->unpin_work;
365 if (work == NULL) {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800366 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100367 pipe, plane);
368 } else {
Chris Wilsone7d841c2012-12-03 11:36:30 +0000369 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800370 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100371 pipe, plane);
372 } else {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800373 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100374 pipe, plane);
375 }
376 if (work->enable_stall_check)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100377 seq_puts(m, "Stall check enabled, ");
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100378 else
Damien Lespiau267f0c92013-06-24 22:59:48 +0100379 seq_puts(m, "Stall check waiting for page flip ioctl, ");
Chris Wilsone7d841c2012-12-03 11:36:30 +0000380 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100381
382 if (work->old_fb_obj) {
Chris Wilson05394f32010-11-08 19:18:58 +0000383 struct drm_i915_gem_object *obj = work->old_fb_obj;
384 if (obj)
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700385 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
386 i915_gem_obj_ggtt_offset(obj));
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100387 }
388 if (work->pending_flip_obj) {
Chris Wilson05394f32010-11-08 19:18:58 +0000389 struct drm_i915_gem_object *obj = work->pending_flip_obj;
390 if (obj)
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700391 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
392 i915_gem_obj_ggtt_offset(obj));
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100393 }
394 }
395 spin_unlock_irqrestore(&dev->event_lock, flags);
396 }
397
398 return 0;
399}
400
Ben Gamari20172632009-02-17 20:08:50 -0500401static int i915_gem_request_info(struct seq_file *m, void *data)
402{
403 struct drm_info_node *node = (struct drm_info_node *) m->private;
404 struct drm_device *dev = node->minor->dev;
405 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100406 struct intel_ring_buffer *ring;
Ben Gamari20172632009-02-17 20:08:50 -0500407 struct drm_i915_gem_request *gem_request;
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100408 int ret, count, i;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100409
410 ret = mutex_lock_interruptible(&dev->struct_mutex);
411 if (ret)
412 return ret;
Ben Gamari20172632009-02-17 20:08:50 -0500413
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100414 count = 0;
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100415 for_each_ring(ring, dev_priv, i) {
416 if (list_empty(&ring->request_list))
417 continue;
418
419 seq_printf(m, "%s requests:\n", ring->name);
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100420 list_for_each_entry(gem_request,
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100421 &ring->request_list,
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100422 list) {
423 seq_printf(m, " %d @ %d\n",
424 gem_request->seqno,
425 (int) (jiffies - gem_request->emitted_jiffies));
426 }
427 count++;
Ben Gamari20172632009-02-17 20:08:50 -0500428 }
Chris Wilsonde227ef2010-07-03 07:58:38 +0100429 mutex_unlock(&dev->struct_mutex);
430
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100431 if (count == 0)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100432 seq_puts(m, "No requests\n");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100433
Ben Gamari20172632009-02-17 20:08:50 -0500434 return 0;
435}
436
Chris Wilsonb2223492010-10-27 15:27:33 +0100437static void i915_ring_seqno_info(struct seq_file *m,
438 struct intel_ring_buffer *ring)
439{
440 if (ring->get_seqno) {
Mika Kuoppala43a7b922012-12-04 15:12:01 +0200441 seq_printf(m, "Current sequence (%s): %u\n",
Chris Wilsonb2eadbc2012-08-09 10:58:30 +0100442 ring->name, ring->get_seqno(ring, false));
Chris Wilsonb2223492010-10-27 15:27:33 +0100443 }
444}
445
Ben Gamari20172632009-02-17 20:08:50 -0500446static int i915_gem_seqno_info(struct seq_file *m, void *data)
447{
448 struct drm_info_node *node = (struct drm_info_node *) m->private;
449 struct drm_device *dev = node->minor->dev;
450 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100451 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000452 int ret, i;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100453
454 ret = mutex_lock_interruptible(&dev->struct_mutex);
455 if (ret)
456 return ret;
Ben Gamari20172632009-02-17 20:08:50 -0500457
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100458 for_each_ring(ring, dev_priv, i)
459 i915_ring_seqno_info(m, ring);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100460
461 mutex_unlock(&dev->struct_mutex);
462
Ben Gamari20172632009-02-17 20:08:50 -0500463 return 0;
464}
465
466
467static int i915_interrupt_info(struct seq_file *m, void *data)
468{
469 struct drm_info_node *node = (struct drm_info_node *) m->private;
470 struct drm_device *dev = node->minor->dev;
471 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100472 struct intel_ring_buffer *ring;
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800473 int ret, i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100474
475 ret = mutex_lock_interruptible(&dev->struct_mutex);
476 if (ret)
477 return ret;
Ben Gamari20172632009-02-17 20:08:50 -0500478
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700479 if (IS_VALLEYVIEW(dev)) {
480 seq_printf(m, "Display IER:\t%08x\n",
481 I915_READ(VLV_IER));
482 seq_printf(m, "Display IIR:\t%08x\n",
483 I915_READ(VLV_IIR));
484 seq_printf(m, "Display IIR_RW:\t%08x\n",
485 I915_READ(VLV_IIR_RW));
486 seq_printf(m, "Display IMR:\t%08x\n",
487 I915_READ(VLV_IMR));
488 for_each_pipe(pipe)
489 seq_printf(m, "Pipe %c stat:\t%08x\n",
490 pipe_name(pipe),
491 I915_READ(PIPESTAT(pipe)));
492
493 seq_printf(m, "Master IER:\t%08x\n",
494 I915_READ(VLV_MASTER_IER));
495
496 seq_printf(m, "Render IER:\t%08x\n",
497 I915_READ(GTIER));
498 seq_printf(m, "Render IIR:\t%08x\n",
499 I915_READ(GTIIR));
500 seq_printf(m, "Render IMR:\t%08x\n",
501 I915_READ(GTIMR));
502
503 seq_printf(m, "PM IER:\t\t%08x\n",
504 I915_READ(GEN6_PMIER));
505 seq_printf(m, "PM IIR:\t\t%08x\n",
506 I915_READ(GEN6_PMIIR));
507 seq_printf(m, "PM IMR:\t\t%08x\n",
508 I915_READ(GEN6_PMIMR));
509
510 seq_printf(m, "Port hotplug:\t%08x\n",
511 I915_READ(PORT_HOTPLUG_EN));
512 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
513 I915_READ(VLV_DPFLIPSTAT));
514 seq_printf(m, "DPINVGTT:\t%08x\n",
515 I915_READ(DPINVGTT));
516
517 } else if (!HAS_PCH_SPLIT(dev)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800518 seq_printf(m, "Interrupt enable: %08x\n",
519 I915_READ(IER));
520 seq_printf(m, "Interrupt identity: %08x\n",
521 I915_READ(IIR));
522 seq_printf(m, "Interrupt mask: %08x\n",
523 I915_READ(IMR));
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800524 for_each_pipe(pipe)
525 seq_printf(m, "Pipe %c stat: %08x\n",
526 pipe_name(pipe),
527 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800528 } else {
529 seq_printf(m, "North Display Interrupt enable: %08x\n",
530 I915_READ(DEIER));
531 seq_printf(m, "North Display Interrupt identity: %08x\n",
532 I915_READ(DEIIR));
533 seq_printf(m, "North Display Interrupt mask: %08x\n",
534 I915_READ(DEIMR));
535 seq_printf(m, "South Display Interrupt enable: %08x\n",
536 I915_READ(SDEIER));
537 seq_printf(m, "South Display Interrupt identity: %08x\n",
538 I915_READ(SDEIIR));
539 seq_printf(m, "South Display Interrupt mask: %08x\n",
540 I915_READ(SDEIMR));
541 seq_printf(m, "Graphics Interrupt enable: %08x\n",
542 I915_READ(GTIER));
543 seq_printf(m, "Graphics Interrupt identity: %08x\n",
544 I915_READ(GTIIR));
545 seq_printf(m, "Graphics Interrupt mask: %08x\n",
546 I915_READ(GTIMR));
547 }
Ben Gamari20172632009-02-17 20:08:50 -0500548 seq_printf(m, "Interrupts received: %d\n",
549 atomic_read(&dev_priv->irq_received));
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100550 for_each_ring(ring, dev_priv, i) {
Jesse Barnesda64c6f2011-08-09 09:17:46 -0700551 if (IS_GEN6(dev) || IS_GEN7(dev)) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100552 seq_printf(m,
553 "Graphics Interrupt mask (%s): %08x\n",
554 ring->name, I915_READ_IMR(ring));
Chris Wilson9862e602011-01-04 22:22:17 +0000555 }
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100556 i915_ring_seqno_info(m, ring);
Chris Wilson9862e602011-01-04 22:22:17 +0000557 }
Chris Wilsonde227ef2010-07-03 07:58:38 +0100558 mutex_unlock(&dev->struct_mutex);
559
Ben Gamari20172632009-02-17 20:08:50 -0500560 return 0;
561}
562
Chris Wilsona6172a82009-02-11 14:26:38 +0000563static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
564{
565 struct drm_info_node *node = (struct drm_info_node *) m->private;
566 struct drm_device *dev = node->minor->dev;
567 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100568 int i, ret;
569
570 ret = mutex_lock_interruptible(&dev->struct_mutex);
571 if (ret)
572 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000573
574 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
575 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
576 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson05394f32010-11-08 19:18:58 +0000577 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
Chris Wilsona6172a82009-02-11 14:26:38 +0000578
Chris Wilson6c085a72012-08-20 11:40:46 +0200579 seq_printf(m, "Fence %d, pin count = %d, object = ",
580 i, dev_priv->fence_regs[i].pin_count);
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100581 if (obj == NULL)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100582 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100583 else
Chris Wilson05394f32010-11-08 19:18:58 +0000584 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100585 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000586 }
587
Chris Wilson05394f32010-11-08 19:18:58 +0000588 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000589 return 0;
590}
591
Ben Gamari20172632009-02-17 20:08:50 -0500592static int i915_hws_info(struct seq_file *m, void *data)
593{
594 struct drm_info_node *node = (struct drm_info_node *) m->private;
595 struct drm_device *dev = node->minor->dev;
596 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson4066c0a2010-10-29 21:00:54 +0100597 struct intel_ring_buffer *ring;
Daniel Vetter1a240d42012-11-29 22:18:51 +0100598 const u32 *hws;
Chris Wilson4066c0a2010-10-29 21:00:54 +0100599 int i;
Ben Gamari20172632009-02-17 20:08:50 -0500600
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000601 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
Daniel Vetter1a240d42012-11-29 22:18:51 +0100602 hws = ring->status_page.page_addr;
Ben Gamari20172632009-02-17 20:08:50 -0500603 if (hws == NULL)
604 return 0;
605
606 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
607 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
608 i * 4,
609 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
610 }
611 return 0;
612}
613
Daniel Vetterd5442302012-04-27 15:17:40 +0200614static ssize_t
615i915_error_state_write(struct file *filp,
616 const char __user *ubuf,
617 size_t cnt,
618 loff_t *ppos)
619{
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300620 struct i915_error_state_file_priv *error_priv = filp->private_data;
Daniel Vetterd5442302012-04-27 15:17:40 +0200621 struct drm_device *dev = error_priv->dev;
Daniel Vetter22bcfc62012-08-09 15:07:02 +0200622 int ret;
Daniel Vetterd5442302012-04-27 15:17:40 +0200623
624 DRM_DEBUG_DRIVER("Resetting error state\n");
625
Daniel Vetter22bcfc62012-08-09 15:07:02 +0200626 ret = mutex_lock_interruptible(&dev->struct_mutex);
627 if (ret)
628 return ret;
629
Daniel Vetterd5442302012-04-27 15:17:40 +0200630 i915_destroy_error_state(dev);
631 mutex_unlock(&dev->struct_mutex);
632
633 return cnt;
634}
635
636static int i915_error_state_open(struct inode *inode, struct file *file)
637{
638 struct drm_device *dev = inode->i_private;
Daniel Vetterd5442302012-04-27 15:17:40 +0200639 struct i915_error_state_file_priv *error_priv;
Daniel Vetterd5442302012-04-27 15:17:40 +0200640
641 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
642 if (!error_priv)
643 return -ENOMEM;
644
645 error_priv->dev = dev;
646
Mika Kuoppala95d5bfb2013-06-06 15:18:40 +0300647 i915_error_state_get(dev, error_priv);
Daniel Vetterd5442302012-04-27 15:17:40 +0200648
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300649 file->private_data = error_priv;
650
651 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +0200652}
653
654static int i915_error_state_release(struct inode *inode, struct file *file)
655{
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300656 struct i915_error_state_file_priv *error_priv = file->private_data;
Daniel Vetterd5442302012-04-27 15:17:40 +0200657
Mika Kuoppala95d5bfb2013-06-06 15:18:40 +0300658 i915_error_state_put(error_priv);
Daniel Vetterd5442302012-04-27 15:17:40 +0200659 kfree(error_priv);
660
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300661 return 0;
662}
663
664static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
665 size_t count, loff_t *pos)
666{
667 struct i915_error_state_file_priv *error_priv = file->private_data;
668 struct drm_i915_error_state_buf error_str;
669 loff_t tmp_pos = 0;
670 ssize_t ret_count = 0;
Mika Kuoppala4dc955f2013-06-06 15:18:41 +0300671 int ret;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300672
Mika Kuoppala4dc955f2013-06-06 15:18:41 +0300673 ret = i915_error_state_buf_init(&error_str, count, *pos);
674 if (ret)
675 return ret;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300676
Mika Kuoppalafc16b482013-06-06 15:18:39 +0300677 ret = i915_error_state_to_str(&error_str, error_priv);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300678 if (ret)
679 goto out;
680
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300681 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
682 error_str.buf,
683 error_str.bytes);
684
685 if (ret_count < 0)
686 ret = ret_count;
687 else
688 *pos = error_str.start + ret_count;
689out:
Mika Kuoppala4dc955f2013-06-06 15:18:41 +0300690 i915_error_state_buf_release(&error_str);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300691 return ret ?: ret_count;
Daniel Vetterd5442302012-04-27 15:17:40 +0200692}
693
694static const struct file_operations i915_error_state_fops = {
695 .owner = THIS_MODULE,
696 .open = i915_error_state_open,
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300697 .read = i915_error_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +0200698 .write = i915_error_state_write,
699 .llseek = default_llseek,
700 .release = i915_error_state_release,
701};
702
Kees Cook647416f2013-03-10 14:10:06 -0700703static int
704i915_next_seqno_get(void *data, u64 *val)
Mika Kuoppala40633212012-12-04 15:12:00 +0200705{
Kees Cook647416f2013-03-10 14:10:06 -0700706 struct drm_device *dev = data;
Mika Kuoppala40633212012-12-04 15:12:00 +0200707 drm_i915_private_t *dev_priv = dev->dev_private;
Mika Kuoppala40633212012-12-04 15:12:00 +0200708 int ret;
709
710 ret = mutex_lock_interruptible(&dev->struct_mutex);
711 if (ret)
712 return ret;
713
Kees Cook647416f2013-03-10 14:10:06 -0700714 *val = dev_priv->next_seqno;
Mika Kuoppala40633212012-12-04 15:12:00 +0200715 mutex_unlock(&dev->struct_mutex);
716
Kees Cook647416f2013-03-10 14:10:06 -0700717 return 0;
Mika Kuoppala40633212012-12-04 15:12:00 +0200718}
719
Kees Cook647416f2013-03-10 14:10:06 -0700720static int
721i915_next_seqno_set(void *data, u64 val)
Mika Kuoppala40633212012-12-04 15:12:00 +0200722{
Kees Cook647416f2013-03-10 14:10:06 -0700723 struct drm_device *dev = data;
Mika Kuoppala40633212012-12-04 15:12:00 +0200724 int ret;
725
Mika Kuoppala40633212012-12-04 15:12:00 +0200726 ret = mutex_lock_interruptible(&dev->struct_mutex);
727 if (ret)
728 return ret;
729
Mika Kuoppalae94fbaa2012-12-19 11:13:09 +0200730 ret = i915_gem_set_seqno(dev, val);
Mika Kuoppala40633212012-12-04 15:12:00 +0200731 mutex_unlock(&dev->struct_mutex);
732
Kees Cook647416f2013-03-10 14:10:06 -0700733 return ret;
Mika Kuoppala40633212012-12-04 15:12:00 +0200734}
735
Kees Cook647416f2013-03-10 14:10:06 -0700736DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
737 i915_next_seqno_get, i915_next_seqno_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +0300738 "0x%llx\n");
Mika Kuoppala40633212012-12-04 15:12:00 +0200739
Jesse Barnesf97108d2010-01-29 11:27:07 -0800740static int i915_rstdby_delays(struct seq_file *m, void *unused)
741{
742 struct drm_info_node *node = (struct drm_info_node *) m->private;
743 struct drm_device *dev = node->minor->dev;
744 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky616fdb52011-10-05 11:44:54 -0700745 u16 crstanddelay;
746 int ret;
747
748 ret = mutex_lock_interruptible(&dev->struct_mutex);
749 if (ret)
750 return ret;
751
752 crstanddelay = I915_READ16(CRSTANDVID);
753
754 mutex_unlock(&dev->struct_mutex);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800755
756 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
757
758 return 0;
759}
760
761static int i915_cur_delayinfo(struct seq_file *m, void *unused)
762{
763 struct drm_info_node *node = (struct drm_info_node *) m->private;
764 struct drm_device *dev = node->minor->dev;
765 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawskyd1ebd8162011-04-25 20:11:50 +0100766 int ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800767
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800768 if (IS_GEN5(dev)) {
769 u16 rgvswctl = I915_READ16(MEMSWCTL);
770 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
771
772 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
773 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
774 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
775 MEMSTAT_VID_SHIFT);
776 seq_printf(m, "Current P-state: %d\n",
777 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700778 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800779 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
780 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
781 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
Ben Widawskyf82855d2013-01-29 12:00:15 -0800782 u32 rpstat, cagf;
Jesse Barnesccab5c82011-01-18 15:49:25 -0800783 u32 rpupei, rpcurup, rpprevup;
784 u32 rpdownei, rpcurdown, rpprevdown;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800785 int max_freq;
786
787 /* RPSTAT1 is in the GT power well */
Ben Widawskyd1ebd8162011-04-25 20:11:50 +0100788 ret = mutex_lock_interruptible(&dev->struct_mutex);
789 if (ret)
790 return ret;
791
Ben Widawskyfcca7922011-04-25 11:23:07 -0700792 gen6_gt_force_wake_get(dev_priv);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800793
Jesse Barnesccab5c82011-01-18 15:49:25 -0800794 rpstat = I915_READ(GEN6_RPSTAT1);
795 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
796 rpcurup = I915_READ(GEN6_RP_CUR_UP);
797 rpprevup = I915_READ(GEN6_RP_PREV_UP);
798 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
799 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
800 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
Ben Widawskyf82855d2013-01-29 12:00:15 -0800801 if (IS_HASWELL(dev))
802 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
803 else
804 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
805 cagf *= GT_FREQUENCY_MULTIPLIER;
Jesse Barnesccab5c82011-01-18 15:49:25 -0800806
Ben Widawskyd1ebd8162011-04-25 20:11:50 +0100807 gen6_gt_force_wake_put(dev_priv);
808 mutex_unlock(&dev->struct_mutex);
809
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800810 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnesccab5c82011-01-18 15:49:25 -0800811 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800812 seq_printf(m, "Render p-state ratio: %d\n",
813 (gt_perf_status & 0xff00) >> 8);
814 seq_printf(m, "Render p-state VID: %d\n",
815 gt_perf_status & 0xff);
816 seq_printf(m, "Render p-state limit: %d\n",
817 rp_state_limits & 0xff);
Ben Widawskyf82855d2013-01-29 12:00:15 -0800818 seq_printf(m, "CAGF: %dMHz\n", cagf);
Jesse Barnesccab5c82011-01-18 15:49:25 -0800819 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
820 GEN6_CURICONT_MASK);
821 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
822 GEN6_CURBSYTAVG_MASK);
823 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
824 GEN6_CURBSYTAVG_MASK);
825 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
826 GEN6_CURIAVG_MASK);
827 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
828 GEN6_CURBSYTAVG_MASK);
829 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
830 GEN6_CURBSYTAVG_MASK);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800831
832 max_freq = (rp_state_cap & 0xff0000) >> 16;
833 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ben Widawskyc8735b02012-09-07 19:43:39 -0700834 max_freq * GT_FREQUENCY_MULTIPLIER);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800835
836 max_freq = (rp_state_cap & 0xff00) >> 8;
837 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ben Widawskyc8735b02012-09-07 19:43:39 -0700838 max_freq * GT_FREQUENCY_MULTIPLIER);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800839
840 max_freq = rp_state_cap & 0xff;
841 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ben Widawskyc8735b02012-09-07 19:43:39 -0700842 max_freq * GT_FREQUENCY_MULTIPLIER);
Ben Widawsky31c77382013-04-05 14:29:22 -0700843
844 seq_printf(m, "Max overclocked frequency: %dMHz\n",
845 dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700846 } else if (IS_VALLEYVIEW(dev)) {
847 u32 freq_sts, val;
848
Jesse Barnes259bd5d2013-04-22 15:59:30 -0700849 mutex_lock(&dev_priv->rps.hw_lock);
Jani Nikula64936252013-05-22 15:36:20 +0300850 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700851 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
852 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
853
Jani Nikula64936252013-05-22 15:36:20 +0300854 val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700855 seq_printf(m, "max GPU freq: %d MHz\n",
856 vlv_gpu_freq(dev_priv->mem_freq, val));
857
Jani Nikula64936252013-05-22 15:36:20 +0300858 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700859 seq_printf(m, "min GPU freq: %d MHz\n",
860 vlv_gpu_freq(dev_priv->mem_freq, val));
861
862 seq_printf(m, "current GPU freq: %d MHz\n",
863 vlv_gpu_freq(dev_priv->mem_freq,
864 (freq_sts >> 8) & 0xff));
Jesse Barnes259bd5d2013-04-22 15:59:30 -0700865 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800866 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +0100867 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800868 }
Jesse Barnesf97108d2010-01-29 11:27:07 -0800869
870 return 0;
871}
872
873static int i915_delayfreq_table(struct seq_file *m, void *unused)
874{
875 struct drm_info_node *node = (struct drm_info_node *) m->private;
876 struct drm_device *dev = node->minor->dev;
877 drm_i915_private_t *dev_priv = dev->dev_private;
878 u32 delayfreq;
Ben Widawsky616fdb52011-10-05 11:44:54 -0700879 int ret, i;
880
881 ret = mutex_lock_interruptible(&dev->struct_mutex);
882 if (ret)
883 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800884
885 for (i = 0; i < 16; i++) {
886 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
Jesse Barnes7648fa92010-05-20 14:28:11 -0700887 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
888 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800889 }
890
Ben Widawsky616fdb52011-10-05 11:44:54 -0700891 mutex_unlock(&dev->struct_mutex);
892
Jesse Barnesf97108d2010-01-29 11:27:07 -0800893 return 0;
894}
895
896static inline int MAP_TO_MV(int map)
897{
898 return 1250 - (map * 25);
899}
900
901static int i915_inttoext_table(struct seq_file *m, void *unused)
902{
903 struct drm_info_node *node = (struct drm_info_node *) m->private;
904 struct drm_device *dev = node->minor->dev;
905 drm_i915_private_t *dev_priv = dev->dev_private;
906 u32 inttoext;
Ben Widawsky616fdb52011-10-05 11:44:54 -0700907 int ret, i;
908
909 ret = mutex_lock_interruptible(&dev->struct_mutex);
910 if (ret)
911 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800912
913 for (i = 1; i <= 32; i++) {
914 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
915 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
916 }
917
Ben Widawsky616fdb52011-10-05 11:44:54 -0700918 mutex_unlock(&dev->struct_mutex);
919
Jesse Barnesf97108d2010-01-29 11:27:07 -0800920 return 0;
921}
922
Ben Widawsky4d855292011-12-12 19:34:16 -0800923static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -0800924{
925 struct drm_info_node *node = (struct drm_info_node *) m->private;
926 struct drm_device *dev = node->minor->dev;
927 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky616fdb52011-10-05 11:44:54 -0700928 u32 rgvmodectl, rstdbyctl;
929 u16 crstandvid;
930 int ret;
931
932 ret = mutex_lock_interruptible(&dev->struct_mutex);
933 if (ret)
934 return ret;
935
936 rgvmodectl = I915_READ(MEMMODECTL);
937 rstdbyctl = I915_READ(RSTDBYCTL);
938 crstandvid = I915_READ16(CRSTANDVID);
939
940 mutex_unlock(&dev->struct_mutex);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800941
942 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
943 "yes" : "no");
944 seq_printf(m, "Boost freq: %d\n",
945 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
946 MEMMODE_BOOST_FREQ_SHIFT);
947 seq_printf(m, "HW control enabled: %s\n",
948 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
949 seq_printf(m, "SW control enabled: %s\n",
950 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
951 seq_printf(m, "Gated voltage change: %s\n",
952 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
953 seq_printf(m, "Starting frequency: P%d\n",
954 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -0700955 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -0800956 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -0700957 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
958 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
959 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
960 seq_printf(m, "Render standby enabled: %s\n",
961 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
Damien Lespiau267f0c92013-06-24 22:59:48 +0100962 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -0800963 switch (rstdbyctl & RSX_STATUS_MASK) {
964 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +0100965 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -0800966 break;
967 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +0100968 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -0800969 break;
970 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +0100971 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -0800972 break;
973 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +0100974 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -0800975 break;
976 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +0100977 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -0800978 break;
979 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +0100980 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -0800981 break;
982 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +0100983 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -0800984 break;
985 }
Jesse Barnesf97108d2010-01-29 11:27:07 -0800986
987 return 0;
988}
989
Ben Widawsky4d855292011-12-12 19:34:16 -0800990static int gen6_drpc_info(struct seq_file *m)
991{
992
993 struct drm_info_node *node = (struct drm_info_node *) m->private;
994 struct drm_device *dev = node->minor->dev;
995 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskyecd8fae2012-09-26 10:34:02 -0700996 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
Daniel Vetter93b525d2012-01-25 13:52:43 +0100997 unsigned forcewake_count;
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100998 int count = 0, ret;
Ben Widawsky4d855292011-12-12 19:34:16 -0800999
1000 ret = mutex_lock_interruptible(&dev->struct_mutex);
1001 if (ret)
1002 return ret;
1003
Chris Wilson907b28c2013-07-19 20:36:52 +01001004 spin_lock_irq(&dev_priv->uncore.lock);
1005 forcewake_count = dev_priv->uncore.forcewake_count;
1006 spin_unlock_irq(&dev_priv->uncore.lock);
Daniel Vetter93b525d2012-01-25 13:52:43 +01001007
1008 if (forcewake_count) {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001009 seq_puts(m, "RC information inaccurate because somebody "
1010 "holds a forcewake reference \n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001011 } else {
1012 /* NB: we cannot use forcewake, else we read the wrong values */
1013 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1014 udelay(10);
1015 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1016 }
1017
1018 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001019 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001020
1021 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1022 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1023 mutex_unlock(&dev->struct_mutex);
Ben Widawsky44cbd332012-11-06 14:36:36 +00001024 mutex_lock(&dev_priv->rps.hw_lock);
1025 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1026 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky4d855292011-12-12 19:34:16 -08001027
1028 seq_printf(m, "Video Turbo Mode: %s\n",
1029 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1030 seq_printf(m, "HW control enabled: %s\n",
1031 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1032 seq_printf(m, "SW control enabled: %s\n",
1033 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1034 GEN6_RP_MEDIA_SW_MODE));
Eric Anholtfff24e22012-01-23 16:14:05 -08001035 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001036 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1037 seq_printf(m, "RC6 Enabled: %s\n",
1038 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1039 seq_printf(m, "Deep RC6 Enabled: %s\n",
1040 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1041 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1042 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001043 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001044 switch (gt_core_status & GEN6_RCn_MASK) {
1045 case GEN6_RC0:
1046 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001047 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001048 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001049 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001050 break;
1051 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001052 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001053 break;
1054 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001055 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001056 break;
1057 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001058 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001059 break;
1060 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001061 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001062 break;
1063 }
1064
1065 seq_printf(m, "Core Power Down: %s\n",
1066 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
Ben Widawskycce66a22012-03-27 18:59:38 -07001067
1068 /* Not exactly sure what this is */
1069 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1070 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1071 seq_printf(m, "RC6 residency since boot: %u\n",
1072 I915_READ(GEN6_GT_GFX_RC6));
1073 seq_printf(m, "RC6+ residency since boot: %u\n",
1074 I915_READ(GEN6_GT_GFX_RC6p));
1075 seq_printf(m, "RC6++ residency since boot: %u\n",
1076 I915_READ(GEN6_GT_GFX_RC6pp));
1077
Ben Widawskyecd8fae2012-09-26 10:34:02 -07001078 seq_printf(m, "RC6 voltage: %dmV\n",
1079 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1080 seq_printf(m, "RC6+ voltage: %dmV\n",
1081 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1082 seq_printf(m, "RC6++ voltage: %dmV\n",
1083 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
Ben Widawsky4d855292011-12-12 19:34:16 -08001084 return 0;
1085}
1086
1087static int i915_drpc_info(struct seq_file *m, void *unused)
1088{
1089 struct drm_info_node *node = (struct drm_info_node *) m->private;
1090 struct drm_device *dev = node->minor->dev;
1091
1092 if (IS_GEN6(dev) || IS_GEN7(dev))
1093 return gen6_drpc_info(m);
1094 else
1095 return ironlake_drpc_info(m);
1096}
1097
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001098static int i915_fbc_status(struct seq_file *m, void *unused)
1099{
1100 struct drm_info_node *node = (struct drm_info_node *) m->private;
1101 struct drm_device *dev = node->minor->dev;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001102 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001103
Adam Jacksonee5382a2010-04-23 11:17:39 -04001104 if (!I915_HAS_FBC(dev)) {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001105 seq_puts(m, "FBC unsupported on this chipset\n");
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001106 return 0;
1107 }
1108
Adam Jacksonee5382a2010-04-23 11:17:39 -04001109 if (intel_fbc_enabled(dev)) {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001110 seq_puts(m, "FBC enabled\n");
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001111 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001112 seq_puts(m, "FBC disabled: ");
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -07001113 switch (dev_priv->fbc.no_fbc_reason) {
Chris Wilson29ebf902013-07-27 17:23:55 +01001114 case FBC_OK:
1115 seq_puts(m, "FBC actived, but currently disabled in hardware");
1116 break;
1117 case FBC_UNSUPPORTED:
1118 seq_puts(m, "unsupported by this chipset");
1119 break;
Chris Wilsonbed4a672010-09-11 10:47:47 +01001120 case FBC_NO_OUTPUT:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001121 seq_puts(m, "no outputs");
Chris Wilsonbed4a672010-09-11 10:47:47 +01001122 break;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001123 case FBC_STOLEN_TOO_SMALL:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001124 seq_puts(m, "not enough stolen memory");
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001125 break;
1126 case FBC_UNSUPPORTED_MODE:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001127 seq_puts(m, "mode not supported");
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001128 break;
1129 case FBC_MODE_TOO_LARGE:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001130 seq_puts(m, "mode too large");
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001131 break;
1132 case FBC_BAD_PLANE:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001133 seq_puts(m, "FBC unsupported on plane");
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001134 break;
1135 case FBC_NOT_TILED:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001136 seq_puts(m, "scanout buffer not tiled");
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001137 break;
Jesse Barnes9c928d12010-07-23 15:20:00 -07001138 case FBC_MULTIPLE_PIPES:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001139 seq_puts(m, "multiple pipes are enabled");
Jesse Barnes9c928d12010-07-23 15:20:00 -07001140 break;
Jesse Barnesc1a9f042011-05-05 15:24:21 -07001141 case FBC_MODULE_PARAM:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001142 seq_puts(m, "disabled per module param (default off)");
Jesse Barnesc1a9f042011-05-05 15:24:21 -07001143 break;
Damien Lespiau8a5729a2013-06-24 16:22:02 +01001144 case FBC_CHIP_DEFAULT:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001145 seq_puts(m, "disabled per chip default");
Damien Lespiau8a5729a2013-06-24 16:22:02 +01001146 break;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001147 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001148 seq_puts(m, "unknown reason");
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001149 }
Damien Lespiau267f0c92013-06-24 22:59:48 +01001150 seq_putc(m, '\n');
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001151 }
1152 return 0;
1153}
1154
Paulo Zanoni92d44622013-05-31 16:33:24 -03001155static int i915_ips_status(struct seq_file *m, void *unused)
1156{
1157 struct drm_info_node *node = (struct drm_info_node *) m->private;
1158 struct drm_device *dev = node->minor->dev;
1159 struct drm_i915_private *dev_priv = dev->dev_private;
1160
Damien Lespiauf5adf942013-06-24 18:29:34 +01001161 if (!HAS_IPS(dev)) {
Paulo Zanoni92d44622013-05-31 16:33:24 -03001162 seq_puts(m, "not supported\n");
1163 return 0;
1164 }
1165
1166 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1167 seq_puts(m, "enabled\n");
1168 else
1169 seq_puts(m, "disabled\n");
1170
1171 return 0;
1172}
1173
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001174static int i915_sr_status(struct seq_file *m, void *unused)
1175{
1176 struct drm_info_node *node = (struct drm_info_node *) m->private;
1177 struct drm_device *dev = node->minor->dev;
1178 drm_i915_private_t *dev_priv = dev->dev_private;
1179 bool sr_enabled = false;
1180
Yuanhan Liu13982612010-12-15 15:42:31 +08001181 if (HAS_PCH_SPLIT(dev))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001182 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Chris Wilsona6c45cf2010-09-17 00:32:17 +01001183 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001184 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1185 else if (IS_I915GM(dev))
1186 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1187 else if (IS_PINEVIEW(dev))
1188 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1189
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001190 seq_printf(m, "self-refresh: %s\n",
1191 sr_enabled ? "enabled" : "disabled");
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001192
1193 return 0;
1194}
1195
Jesse Barnes7648fa92010-05-20 14:28:11 -07001196static int i915_emon_status(struct seq_file *m, void *unused)
1197{
1198 struct drm_info_node *node = (struct drm_info_node *) m->private;
1199 struct drm_device *dev = node->minor->dev;
1200 drm_i915_private_t *dev_priv = dev->dev_private;
1201 unsigned long temp, chipset, gfx;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001202 int ret;
1203
Chris Wilson582be6b2012-04-30 19:35:02 +01001204 if (!IS_GEN5(dev))
1205 return -ENODEV;
1206
Chris Wilsonde227ef2010-07-03 07:58:38 +01001207 ret = mutex_lock_interruptible(&dev->struct_mutex);
1208 if (ret)
1209 return ret;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001210
1211 temp = i915_mch_val(dev_priv);
1212 chipset = i915_chipset_val(dev_priv);
1213 gfx = i915_gfx_val(dev_priv);
Chris Wilsonde227ef2010-07-03 07:58:38 +01001214 mutex_unlock(&dev->struct_mutex);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001215
1216 seq_printf(m, "GMCH temp: %ld\n", temp);
1217 seq_printf(m, "Chipset power: %ld\n", chipset);
1218 seq_printf(m, "GFX power: %ld\n", gfx);
1219 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1220
1221 return 0;
1222}
1223
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001224static int i915_ring_freq_table(struct seq_file *m, void *unused)
1225{
1226 struct drm_info_node *node = (struct drm_info_node *) m->private;
1227 struct drm_device *dev = node->minor->dev;
1228 drm_i915_private_t *dev_priv = dev->dev_private;
1229 int ret;
1230 int gpu_freq, ia_freq;
1231
Jesse Barnes1c70c0c2011-06-29 13:34:36 -07001232 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001233 seq_puts(m, "unsupported on this chipset\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001234 return 0;
1235 }
1236
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001237 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001238 if (ret)
1239 return ret;
1240
Damien Lespiau267f0c92013-06-24 22:59:48 +01001241 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001242
Daniel Vetterc6a828d2012-08-08 23:35:35 +02001243 for (gpu_freq = dev_priv->rps.min_delay;
1244 gpu_freq <= dev_priv->rps.max_delay;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001245 gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001246 ia_freq = gpu_freq;
1247 sandybridge_pcode_read(dev_priv,
1248 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1249 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001250 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1251 gpu_freq * GT_FREQUENCY_MULTIPLIER,
1252 ((ia_freq >> 0) & 0xff) * 100,
1253 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001254 }
1255
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001256 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001257
1258 return 0;
1259}
1260
Jesse Barnes7648fa92010-05-20 14:28:11 -07001261static int i915_gfxec(struct seq_file *m, void *unused)
1262{
1263 struct drm_info_node *node = (struct drm_info_node *) m->private;
1264 struct drm_device *dev = node->minor->dev;
1265 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001266 int ret;
1267
1268 ret = mutex_lock_interruptible(&dev->struct_mutex);
1269 if (ret)
1270 return ret;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001271
1272 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1273
Ben Widawsky616fdb52011-10-05 11:44:54 -07001274 mutex_unlock(&dev->struct_mutex);
1275
Jesse Barnes7648fa92010-05-20 14:28:11 -07001276 return 0;
1277}
1278
Chris Wilson44834a62010-08-19 16:09:23 +01001279static int i915_opregion(struct seq_file *m, void *unused)
1280{
1281 struct drm_info_node *node = (struct drm_info_node *) m->private;
1282 struct drm_device *dev = node->minor->dev;
1283 drm_i915_private_t *dev_priv = dev->dev_private;
1284 struct intel_opregion *opregion = &dev_priv->opregion;
Daniel Vetter0d38f002012-04-21 22:49:10 +02001285 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
Chris Wilson44834a62010-08-19 16:09:23 +01001286 int ret;
1287
Daniel Vetter0d38f002012-04-21 22:49:10 +02001288 if (data == NULL)
1289 return -ENOMEM;
1290
Chris Wilson44834a62010-08-19 16:09:23 +01001291 ret = mutex_lock_interruptible(&dev->struct_mutex);
1292 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001293 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001294
Daniel Vetter0d38f002012-04-21 22:49:10 +02001295 if (opregion->header) {
1296 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1297 seq_write(m, data, OPREGION_SIZE);
1298 }
Chris Wilson44834a62010-08-19 16:09:23 +01001299
1300 mutex_unlock(&dev->struct_mutex);
1301
Daniel Vetter0d38f002012-04-21 22:49:10 +02001302out:
1303 kfree(data);
Chris Wilson44834a62010-08-19 16:09:23 +01001304 return 0;
1305}
1306
Chris Wilson37811fc2010-08-25 22:45:57 +01001307static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1308{
1309 struct drm_info_node *node = (struct drm_info_node *) m->private;
1310 struct drm_device *dev = node->minor->dev;
1311 drm_i915_private_t *dev_priv = dev->dev_private;
1312 struct intel_fbdev *ifbdev;
1313 struct intel_framebuffer *fb;
1314 int ret;
1315
1316 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1317 if (ret)
1318 return ret;
1319
1320 ifbdev = dev_priv->fbdev;
1321 fb = to_intel_framebuffer(ifbdev->helper.fb);
1322
Daniel Vetter623f9782012-12-11 16:21:38 +01001323 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001324 fb->base.width,
1325 fb->base.height,
1326 fb->base.depth,
Daniel Vetter623f9782012-12-11 16:21:38 +01001327 fb->base.bits_per_pixel,
1328 atomic_read(&fb->base.refcount.refcount));
Chris Wilson05394f32010-11-08 19:18:58 +00001329 describe_obj(m, fb->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +01001330 seq_putc(m, '\n');
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001331 mutex_unlock(&dev->mode_config.mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001332
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001333 mutex_lock(&dev->mode_config.fb_lock);
Chris Wilson37811fc2010-08-25 22:45:57 +01001334 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1335 if (&fb->base == ifbdev->helper.fb)
1336 continue;
1337
Daniel Vetter623f9782012-12-11 16:21:38 +01001338 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001339 fb->base.width,
1340 fb->base.height,
1341 fb->base.depth,
Daniel Vetter623f9782012-12-11 16:21:38 +01001342 fb->base.bits_per_pixel,
1343 atomic_read(&fb->base.refcount.refcount));
Chris Wilson05394f32010-11-08 19:18:58 +00001344 describe_obj(m, fb->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +01001345 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001346 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001347 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson37811fc2010-08-25 22:45:57 +01001348
1349 return 0;
1350}
1351
Ben Widawskye76d3632011-03-19 18:14:29 -07001352static int i915_context_status(struct seq_file *m, void *unused)
1353{
1354 struct drm_info_node *node = (struct drm_info_node *) m->private;
1355 struct drm_device *dev = node->minor->dev;
1356 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawskya168c292013-02-14 15:05:12 -08001357 struct intel_ring_buffer *ring;
1358 int ret, i;
Ben Widawskye76d3632011-03-19 18:14:29 -07001359
1360 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1361 if (ret)
1362 return ret;
1363
Daniel Vetter3e373942012-11-02 19:55:04 +01001364 if (dev_priv->ips.pwrctx) {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001365 seq_puts(m, "power context ");
Daniel Vetter3e373942012-11-02 19:55:04 +01001366 describe_obj(m, dev_priv->ips.pwrctx);
Damien Lespiau267f0c92013-06-24 22:59:48 +01001367 seq_putc(m, '\n');
Ben Widawskydc501fb2011-06-29 11:41:51 -07001368 }
Ben Widawskye76d3632011-03-19 18:14:29 -07001369
Daniel Vetter3e373942012-11-02 19:55:04 +01001370 if (dev_priv->ips.renderctx) {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001371 seq_puts(m, "render context ");
Daniel Vetter3e373942012-11-02 19:55:04 +01001372 describe_obj(m, dev_priv->ips.renderctx);
Damien Lespiau267f0c92013-06-24 22:59:48 +01001373 seq_putc(m, '\n');
Ben Widawskydc501fb2011-06-29 11:41:51 -07001374 }
Ben Widawskye76d3632011-03-19 18:14:29 -07001375
Ben Widawskya168c292013-02-14 15:05:12 -08001376 for_each_ring(ring, dev_priv, i) {
1377 if (ring->default_context) {
1378 seq_printf(m, "HW default context %s ring ", ring->name);
1379 describe_obj(m, ring->default_context->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +01001380 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001381 }
1382 }
1383
Ben Widawskye76d3632011-03-19 18:14:29 -07001384 mutex_unlock(&dev->mode_config.mutex);
1385
1386 return 0;
1387}
1388
Ben Widawsky6d794d42011-04-25 11:25:56 -07001389static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1390{
1391 struct drm_info_node *node = (struct drm_info_node *) m->private;
1392 struct drm_device *dev = node->minor->dev;
1393 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter9f1f46a2011-12-14 13:57:03 +01001394 unsigned forcewake_count;
Ben Widawsky6d794d42011-04-25 11:25:56 -07001395
Chris Wilson907b28c2013-07-19 20:36:52 +01001396 spin_lock_irq(&dev_priv->uncore.lock);
1397 forcewake_count = dev_priv->uncore.forcewake_count;
1398 spin_unlock_irq(&dev_priv->uncore.lock);
Daniel Vetter9f1f46a2011-12-14 13:57:03 +01001399
1400 seq_printf(m, "forcewake count = %u\n", forcewake_count);
Ben Widawsky6d794d42011-04-25 11:25:56 -07001401
1402 return 0;
1403}
1404
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001405static const char *swizzle_string(unsigned swizzle)
1406{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001407 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001408 case I915_BIT_6_SWIZZLE_NONE:
1409 return "none";
1410 case I915_BIT_6_SWIZZLE_9:
1411 return "bit9";
1412 case I915_BIT_6_SWIZZLE_9_10:
1413 return "bit9/bit10";
1414 case I915_BIT_6_SWIZZLE_9_11:
1415 return "bit9/bit11";
1416 case I915_BIT_6_SWIZZLE_9_10_11:
1417 return "bit9/bit10/bit11";
1418 case I915_BIT_6_SWIZZLE_9_17:
1419 return "bit9/bit17";
1420 case I915_BIT_6_SWIZZLE_9_10_17:
1421 return "bit9/bit10/bit17";
1422 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09001423 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001424 }
1425
1426 return "bug";
1427}
1428
1429static int i915_swizzle_info(struct seq_file *m, void *data)
1430{
1431 struct drm_info_node *node = (struct drm_info_node *) m->private;
1432 struct drm_device *dev = node->minor->dev;
1433 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter22bcfc62012-08-09 15:07:02 +02001434 int ret;
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001435
Daniel Vetter22bcfc62012-08-09 15:07:02 +02001436 ret = mutex_lock_interruptible(&dev->struct_mutex);
1437 if (ret)
1438 return ret;
1439
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001440 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1441 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1442 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1443 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1444
1445 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1446 seq_printf(m, "DDC = 0x%08x\n",
1447 I915_READ(DCC));
1448 seq_printf(m, "C0DRB3 = 0x%04x\n",
1449 I915_READ16(C0DRB3));
1450 seq_printf(m, "C1DRB3 = 0x%04x\n",
1451 I915_READ16(C1DRB3));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001452 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
1453 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1454 I915_READ(MAD_DIMM_C0));
1455 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1456 I915_READ(MAD_DIMM_C1));
1457 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1458 I915_READ(MAD_DIMM_C2));
1459 seq_printf(m, "TILECTL = 0x%08x\n",
1460 I915_READ(TILECTL));
1461 seq_printf(m, "ARB_MODE = 0x%08x\n",
1462 I915_READ(ARB_MODE));
1463 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1464 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001465 }
1466 mutex_unlock(&dev->struct_mutex);
1467
1468 return 0;
1469}
1470
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01001471static int i915_ppgtt_info(struct seq_file *m, void *data)
1472{
1473 struct drm_info_node *node = (struct drm_info_node *) m->private;
1474 struct drm_device *dev = node->minor->dev;
1475 struct drm_i915_private *dev_priv = dev->dev_private;
1476 struct intel_ring_buffer *ring;
1477 int i, ret;
1478
1479
1480 ret = mutex_lock_interruptible(&dev->struct_mutex);
1481 if (ret)
1482 return ret;
1483 if (INTEL_INFO(dev)->gen == 6)
1484 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1485
Chris Wilsona2c7f6f2012-09-01 20:51:22 +01001486 for_each_ring(ring, dev_priv, i) {
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01001487 seq_printf(m, "%s\n", ring->name);
1488 if (INTEL_INFO(dev)->gen == 7)
1489 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1490 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1491 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1492 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1493 }
1494 if (dev_priv->mm.aliasing_ppgtt) {
1495 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1496
Damien Lespiau267f0c92013-06-24 22:59:48 +01001497 seq_puts(m, "aliasing PPGTT:\n");
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01001498 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1499 }
1500 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1501 mutex_unlock(&dev->struct_mutex);
1502
1503 return 0;
1504}
1505
Jesse Barnes57f350b2012-03-28 13:39:25 -07001506static int i915_dpio_info(struct seq_file *m, void *data)
1507{
1508 struct drm_info_node *node = (struct drm_info_node *) m->private;
1509 struct drm_device *dev = node->minor->dev;
1510 struct drm_i915_private *dev_priv = dev->dev_private;
1511 int ret;
1512
1513
1514 if (!IS_VALLEYVIEW(dev)) {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001515 seq_puts(m, "unsupported\n");
Jesse Barnes57f350b2012-03-28 13:39:25 -07001516 return 0;
1517 }
1518
Daniel Vetter09153002012-12-12 14:06:44 +01001519 ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
Jesse Barnes57f350b2012-03-28 13:39:25 -07001520 if (ret)
1521 return ret;
1522
1523 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1524
1525 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
Jani Nikulaae992582013-05-22 15:36:19 +03001526 vlv_dpio_read(dev_priv, _DPIO_DIV_A));
Jesse Barnes57f350b2012-03-28 13:39:25 -07001527 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
Jani Nikulaae992582013-05-22 15:36:19 +03001528 vlv_dpio_read(dev_priv, _DPIO_DIV_B));
Jesse Barnes57f350b2012-03-28 13:39:25 -07001529
1530 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
Jani Nikulaae992582013-05-22 15:36:19 +03001531 vlv_dpio_read(dev_priv, _DPIO_REFSFR_A));
Jesse Barnes57f350b2012-03-28 13:39:25 -07001532 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
Jani Nikulaae992582013-05-22 15:36:19 +03001533 vlv_dpio_read(dev_priv, _DPIO_REFSFR_B));
Jesse Barnes57f350b2012-03-28 13:39:25 -07001534
1535 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
Jani Nikulaae992582013-05-22 15:36:19 +03001536 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
Jesse Barnes57f350b2012-03-28 13:39:25 -07001537 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
Jani Nikulaae992582013-05-22 15:36:19 +03001538 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
Jesse Barnes57f350b2012-03-28 13:39:25 -07001539
Ville Syrjälä4abb2c32013-06-14 14:02:53 +03001540 seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
1541 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A));
1542 seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
1543 vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B));
Jesse Barnes57f350b2012-03-28 13:39:25 -07001544
1545 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
Jani Nikulaae992582013-05-22 15:36:19 +03001546 vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
Jesse Barnes57f350b2012-03-28 13:39:25 -07001547
Daniel Vetter09153002012-12-12 14:06:44 +01001548 mutex_unlock(&dev_priv->dpio_lock);
Jesse Barnes57f350b2012-03-28 13:39:25 -07001549
1550 return 0;
1551}
1552
Ben Widawsky63573eb2013-07-04 11:02:07 -07001553static int i915_llc(struct seq_file *m, void *data)
1554{
1555 struct drm_info_node *node = (struct drm_info_node *) m->private;
1556 struct drm_device *dev = node->minor->dev;
1557 struct drm_i915_private *dev_priv = dev->dev_private;
1558
1559 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1560 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
1561 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
1562
1563 return 0;
1564}
1565
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03001566static int i915_edp_psr_status(struct seq_file *m, void *data)
1567{
1568 struct drm_info_node *node = m->private;
1569 struct drm_device *dev = node->minor->dev;
1570 struct drm_i915_private *dev_priv = dev->dev_private;
Rodrigo Vivi3f51e472013-07-11 18:45:00 -03001571 u32 psrstat, psrperf;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03001572
1573 if (!IS_HASWELL(dev)) {
1574 seq_puts(m, "PSR not supported on this platform\n");
Rodrigo Vivi3f51e472013-07-11 18:45:00 -03001575 } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
1576 seq_puts(m, "PSR enabled\n");
1577 } else {
1578 seq_puts(m, "PSR disabled: ");
1579 switch (dev_priv->no_psr_reason) {
1580 case PSR_NO_SOURCE:
1581 seq_puts(m, "not supported on this platform");
1582 break;
1583 case PSR_NO_SINK:
1584 seq_puts(m, "not supported by panel");
1585 break;
Rodrigo Vivi105b7c12013-07-11 18:45:02 -03001586 case PSR_MODULE_PARAM:
1587 seq_puts(m, "disabled by flag");
1588 break;
Rodrigo Vivi3f51e472013-07-11 18:45:00 -03001589 case PSR_CRTC_NOT_ACTIVE:
1590 seq_puts(m, "crtc not active");
1591 break;
1592 case PSR_PWR_WELL_ENABLED:
1593 seq_puts(m, "power well enabled");
1594 break;
1595 case PSR_NOT_TILED:
1596 seq_puts(m, "not tiled");
1597 break;
1598 case PSR_SPRITE_ENABLED:
1599 seq_puts(m, "sprite enabled");
1600 break;
1601 case PSR_S3D_ENABLED:
1602 seq_puts(m, "stereo 3d enabled");
1603 break;
1604 case PSR_INTERLACED_ENABLED:
1605 seq_puts(m, "interlaced enabled");
1606 break;
1607 case PSR_HSW_NOT_DDIA:
1608 seq_puts(m, "HSW ties PSR to DDI A (eDP)");
1609 break;
1610 default:
1611 seq_puts(m, "unknown reason");
1612 }
1613 seq_puts(m, "\n");
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03001614 return 0;
1615 }
1616
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03001617 psrstat = I915_READ(EDP_PSR_STATUS_CTL);
1618
1619 seq_puts(m, "PSR Current State: ");
1620 switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
1621 case EDP_PSR_STATUS_STATE_IDLE:
1622 seq_puts(m, "Reset state\n");
1623 break;
1624 case EDP_PSR_STATUS_STATE_SRDONACK:
1625 seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
1626 break;
1627 case EDP_PSR_STATUS_STATE_SRDENT:
1628 seq_puts(m, "SRD entry\n");
1629 break;
1630 case EDP_PSR_STATUS_STATE_BUFOFF:
1631 seq_puts(m, "Wait for buffer turn off\n");
1632 break;
1633 case EDP_PSR_STATUS_STATE_BUFON:
1634 seq_puts(m, "Wait for buffer turn on\n");
1635 break;
1636 case EDP_PSR_STATUS_STATE_AUXACK:
1637 seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
1638 break;
1639 case EDP_PSR_STATUS_STATE_SRDOFFACK:
1640 seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
1641 break;
1642 default:
1643 seq_puts(m, "Unknown\n");
1644 break;
1645 }
1646
1647 seq_puts(m, "Link Status: ");
1648 switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
1649 case EDP_PSR_STATUS_LINK_FULL_OFF:
1650 seq_puts(m, "Link is fully off\n");
1651 break;
1652 case EDP_PSR_STATUS_LINK_FULL_ON:
1653 seq_puts(m, "Link is fully on\n");
1654 break;
1655 case EDP_PSR_STATUS_LINK_STANDBY:
1656 seq_puts(m, "Link is in standby\n");
1657 break;
1658 default:
1659 seq_puts(m, "Unknown\n");
1660 break;
1661 }
1662
1663 seq_printf(m, "PSR Entry Count: %u\n",
1664 psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
1665 EDP_PSR_STATUS_COUNT_MASK);
1666
1667 seq_printf(m, "Max Sleep Timer Counter: %u\n",
1668 psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
1669 EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
1670
1671 seq_printf(m, "Had AUX error: %s\n",
1672 yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
1673
1674 seq_printf(m, "Sending AUX: %s\n",
1675 yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
1676
1677 seq_printf(m, "Sending Idle: %s\n",
1678 yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
1679
1680 seq_printf(m, "Sending TP2 TP3: %s\n",
1681 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
1682
1683 seq_printf(m, "Sending TP1: %s\n",
1684 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
1685
1686 seq_printf(m, "Idle Count: %u\n",
1687 psrstat & EDP_PSR_STATUS_IDLE_MASK);
1688
1689 psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
1690 seq_printf(m, "Performance Counter: %u\n", psrperf);
1691
1692 return 0;
1693}
1694
Kees Cook647416f2013-03-10 14:10:06 -07001695static int
1696i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01001697{
Kees Cook647416f2013-03-10 14:10:06 -07001698 struct drm_device *dev = data;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01001699 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01001700
Kees Cook647416f2013-03-10 14:10:06 -07001701 *val = atomic_read(&dev_priv->gpu_error.reset_counter);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01001702
Kees Cook647416f2013-03-10 14:10:06 -07001703 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01001704}
1705
Kees Cook647416f2013-03-10 14:10:06 -07001706static int
1707i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01001708{
Kees Cook647416f2013-03-10 14:10:06 -07001709 struct drm_device *dev = data;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01001710
Kees Cook647416f2013-03-10 14:10:06 -07001711 DRM_INFO("Manually setting wedged to %llu\n", val);
Chris Wilson527f9e92010-11-11 01:16:58 +00001712 i915_handle_error(dev, val);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01001713
Kees Cook647416f2013-03-10 14:10:06 -07001714 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01001715}
1716
Kees Cook647416f2013-03-10 14:10:06 -07001717DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
1718 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03001719 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01001720
Kees Cook647416f2013-03-10 14:10:06 -07001721static int
1722i915_ring_stop_get(void *data, u64 *val)
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001723{
Kees Cook647416f2013-03-10 14:10:06 -07001724 struct drm_device *dev = data;
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001725 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001726
Kees Cook647416f2013-03-10 14:10:06 -07001727 *val = dev_priv->gpu_error.stop_rings;
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001728
Kees Cook647416f2013-03-10 14:10:06 -07001729 return 0;
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001730}
1731
Kees Cook647416f2013-03-10 14:10:06 -07001732static int
1733i915_ring_stop_set(void *data, u64 val)
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001734{
Kees Cook647416f2013-03-10 14:10:06 -07001735 struct drm_device *dev = data;
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001736 struct drm_i915_private *dev_priv = dev->dev_private;
Kees Cook647416f2013-03-10 14:10:06 -07001737 int ret;
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001738
Kees Cook647416f2013-03-10 14:10:06 -07001739 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001740
Daniel Vetter22bcfc62012-08-09 15:07:02 +02001741 ret = mutex_lock_interruptible(&dev->struct_mutex);
1742 if (ret)
1743 return ret;
1744
Daniel Vetter99584db2012-11-14 17:14:04 +01001745 dev_priv->gpu_error.stop_rings = val;
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001746 mutex_unlock(&dev->struct_mutex);
1747
Kees Cook647416f2013-03-10 14:10:06 -07001748 return 0;
Daniel Vettere5eb3d62012-05-03 14:48:16 +02001749}
1750
Kees Cook647416f2013-03-10 14:10:06 -07001751DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
1752 i915_ring_stop_get, i915_ring_stop_set,
1753 "0x%08llx\n");
Daniel Vetterd5442302012-04-27 15:17:40 +02001754
Chris Wilsondd624af2013-01-15 12:39:35 +00001755#define DROP_UNBOUND 0x1
1756#define DROP_BOUND 0x2
1757#define DROP_RETIRE 0x4
1758#define DROP_ACTIVE 0x8
1759#define DROP_ALL (DROP_UNBOUND | \
1760 DROP_BOUND | \
1761 DROP_RETIRE | \
1762 DROP_ACTIVE)
Kees Cook647416f2013-03-10 14:10:06 -07001763static int
1764i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00001765{
Kees Cook647416f2013-03-10 14:10:06 -07001766 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00001767
Kees Cook647416f2013-03-10 14:10:06 -07001768 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00001769}
1770
Kees Cook647416f2013-03-10 14:10:06 -07001771static int
1772i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00001773{
Kees Cook647416f2013-03-10 14:10:06 -07001774 struct drm_device *dev = data;
Chris Wilsondd624af2013-01-15 12:39:35 +00001775 struct drm_i915_private *dev_priv = dev->dev_private;
1776 struct drm_i915_gem_object *obj, *next;
Ben Widawsky5cef07e2013-07-16 16:50:08 -07001777 struct i915_address_space *vm = &dev_priv->gtt.base;
Kees Cook647416f2013-03-10 14:10:06 -07001778 int ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00001779
Kees Cook647416f2013-03-10 14:10:06 -07001780 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
Chris Wilsondd624af2013-01-15 12:39:35 +00001781
1782 /* No need to check and wait for gpu resets, only libdrm auto-restarts
1783 * on ioctls on -EAGAIN. */
1784 ret = mutex_lock_interruptible(&dev->struct_mutex);
1785 if (ret)
1786 return ret;
1787
1788 if (val & DROP_ACTIVE) {
1789 ret = i915_gpu_idle(dev);
1790 if (ret)
1791 goto unlock;
1792 }
1793
1794 if (val & (DROP_RETIRE | DROP_ACTIVE))
1795 i915_gem_retire_requests(dev);
1796
1797 if (val & DROP_BOUND) {
Ben Widawsky5cef07e2013-07-16 16:50:08 -07001798 list_for_each_entry_safe(obj, next, &vm->inactive_list,
Ben Widawsky31a46c92013-07-31 16:59:55 -07001799 mm_list) {
1800 if (obj->pin_count)
1801 continue;
1802
1803 ret = i915_gem_object_unbind(obj);
1804 if (ret)
1805 goto unlock;
1806 }
Chris Wilsondd624af2013-01-15 12:39:35 +00001807 }
1808
1809 if (val & DROP_UNBOUND) {
Ben Widawsky35c20a62013-05-31 11:28:48 -07001810 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1811 global_list)
Chris Wilsondd624af2013-01-15 12:39:35 +00001812 if (obj->pages_pin_count == 0) {
1813 ret = i915_gem_object_put_pages(obj);
1814 if (ret)
1815 goto unlock;
1816 }
1817 }
1818
1819unlock:
1820 mutex_unlock(&dev->struct_mutex);
1821
Kees Cook647416f2013-03-10 14:10:06 -07001822 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00001823}
1824
Kees Cook647416f2013-03-10 14:10:06 -07001825DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
1826 i915_drop_caches_get, i915_drop_caches_set,
1827 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00001828
Kees Cook647416f2013-03-10 14:10:06 -07001829static int
1830i915_max_freq_get(void *data, u64 *val)
Jesse Barnes358733e2011-07-27 11:53:01 -07001831{
Kees Cook647416f2013-03-10 14:10:06 -07001832 struct drm_device *dev = data;
Jesse Barnes358733e2011-07-27 11:53:01 -07001833 drm_i915_private_t *dev_priv = dev->dev_private;
Kees Cook647416f2013-03-10 14:10:06 -07001834 int ret;
Daniel Vetter004777c2012-08-09 15:07:01 +02001835
1836 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1837 return -ENODEV;
1838
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001839 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
Daniel Vetter004777c2012-08-09 15:07:01 +02001840 if (ret)
1841 return ret;
Jesse Barnes358733e2011-07-27 11:53:01 -07001842
Jesse Barnes0a073b82013-04-17 15:54:58 -07001843 if (IS_VALLEYVIEW(dev))
1844 *val = vlv_gpu_freq(dev_priv->mem_freq,
1845 dev_priv->rps.max_delay);
1846 else
1847 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001848 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes358733e2011-07-27 11:53:01 -07001849
Kees Cook647416f2013-03-10 14:10:06 -07001850 return 0;
Jesse Barnes358733e2011-07-27 11:53:01 -07001851}
1852
Kees Cook647416f2013-03-10 14:10:06 -07001853static int
1854i915_max_freq_set(void *data, u64 val)
Jesse Barnes358733e2011-07-27 11:53:01 -07001855{
Kees Cook647416f2013-03-10 14:10:06 -07001856 struct drm_device *dev = data;
Jesse Barnes358733e2011-07-27 11:53:01 -07001857 struct drm_i915_private *dev_priv = dev->dev_private;
Kees Cook647416f2013-03-10 14:10:06 -07001858 int ret;
Daniel Vetter004777c2012-08-09 15:07:01 +02001859
1860 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1861 return -ENODEV;
Jesse Barnes358733e2011-07-27 11:53:01 -07001862
Kees Cook647416f2013-03-10 14:10:06 -07001863 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
Jesse Barnes358733e2011-07-27 11:53:01 -07001864
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001865 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
Daniel Vetter004777c2012-08-09 15:07:01 +02001866 if (ret)
1867 return ret;
1868
Jesse Barnes358733e2011-07-27 11:53:01 -07001869 /*
1870 * Turbo will still be enabled, but won't go above the set value.
1871 */
Jesse Barnes0a073b82013-04-17 15:54:58 -07001872 if (IS_VALLEYVIEW(dev)) {
1873 val = vlv_freq_opcode(dev_priv->mem_freq, val);
1874 dev_priv->rps.max_delay = val;
1875 gen6_set_rps(dev, val);
1876 } else {
1877 do_div(val, GT_FREQUENCY_MULTIPLIER);
1878 dev_priv->rps.max_delay = val;
1879 gen6_set_rps(dev, val);
1880 }
1881
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001882 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes358733e2011-07-27 11:53:01 -07001883
Kees Cook647416f2013-03-10 14:10:06 -07001884 return 0;
Jesse Barnes358733e2011-07-27 11:53:01 -07001885}
1886
Kees Cook647416f2013-03-10 14:10:06 -07001887DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
1888 i915_max_freq_get, i915_max_freq_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03001889 "%llu\n");
Jesse Barnes358733e2011-07-27 11:53:01 -07001890
Kees Cook647416f2013-03-10 14:10:06 -07001891static int
1892i915_min_freq_get(void *data, u64 *val)
Jesse Barnes1523c312012-05-25 12:34:54 -07001893{
Kees Cook647416f2013-03-10 14:10:06 -07001894 struct drm_device *dev = data;
Jesse Barnes1523c312012-05-25 12:34:54 -07001895 drm_i915_private_t *dev_priv = dev->dev_private;
Kees Cook647416f2013-03-10 14:10:06 -07001896 int ret;
Daniel Vetter004777c2012-08-09 15:07:01 +02001897
1898 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1899 return -ENODEV;
1900
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001901 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
Daniel Vetter004777c2012-08-09 15:07:01 +02001902 if (ret)
1903 return ret;
Jesse Barnes1523c312012-05-25 12:34:54 -07001904
Jesse Barnes0a073b82013-04-17 15:54:58 -07001905 if (IS_VALLEYVIEW(dev))
1906 *val = vlv_gpu_freq(dev_priv->mem_freq,
1907 dev_priv->rps.min_delay);
1908 else
1909 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001910 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes1523c312012-05-25 12:34:54 -07001911
Kees Cook647416f2013-03-10 14:10:06 -07001912 return 0;
Jesse Barnes1523c312012-05-25 12:34:54 -07001913}
1914
Kees Cook647416f2013-03-10 14:10:06 -07001915static int
1916i915_min_freq_set(void *data, u64 val)
Jesse Barnes1523c312012-05-25 12:34:54 -07001917{
Kees Cook647416f2013-03-10 14:10:06 -07001918 struct drm_device *dev = data;
Jesse Barnes1523c312012-05-25 12:34:54 -07001919 struct drm_i915_private *dev_priv = dev->dev_private;
Kees Cook647416f2013-03-10 14:10:06 -07001920 int ret;
Daniel Vetter004777c2012-08-09 15:07:01 +02001921
1922 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1923 return -ENODEV;
Jesse Barnes1523c312012-05-25 12:34:54 -07001924
Kees Cook647416f2013-03-10 14:10:06 -07001925 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
Jesse Barnes1523c312012-05-25 12:34:54 -07001926
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001927 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
Daniel Vetter004777c2012-08-09 15:07:01 +02001928 if (ret)
1929 return ret;
1930
Jesse Barnes1523c312012-05-25 12:34:54 -07001931 /*
1932 * Turbo will still be enabled, but won't go below the set value.
1933 */
Jesse Barnes0a073b82013-04-17 15:54:58 -07001934 if (IS_VALLEYVIEW(dev)) {
1935 val = vlv_freq_opcode(dev_priv->mem_freq, val);
1936 dev_priv->rps.min_delay = val;
1937 valleyview_set_rps(dev, val);
1938 } else {
1939 do_div(val, GT_FREQUENCY_MULTIPLIER);
1940 dev_priv->rps.min_delay = val;
1941 gen6_set_rps(dev, val);
1942 }
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001943 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes1523c312012-05-25 12:34:54 -07001944
Kees Cook647416f2013-03-10 14:10:06 -07001945 return 0;
Jesse Barnes1523c312012-05-25 12:34:54 -07001946}
1947
Kees Cook647416f2013-03-10 14:10:06 -07001948DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
1949 i915_min_freq_get, i915_min_freq_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03001950 "%llu\n");
Jesse Barnes1523c312012-05-25 12:34:54 -07001951
Kees Cook647416f2013-03-10 14:10:06 -07001952static int
1953i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07001954{
Kees Cook647416f2013-03-10 14:10:06 -07001955 struct drm_device *dev = data;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07001956 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07001957 u32 snpcr;
Kees Cook647416f2013-03-10 14:10:06 -07001958 int ret;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07001959
Daniel Vetter004777c2012-08-09 15:07:01 +02001960 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1961 return -ENODEV;
1962
Daniel Vetter22bcfc62012-08-09 15:07:02 +02001963 ret = mutex_lock_interruptible(&dev->struct_mutex);
1964 if (ret)
1965 return ret;
1966
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07001967 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1968 mutex_unlock(&dev_priv->dev->struct_mutex);
1969
Kees Cook647416f2013-03-10 14:10:06 -07001970 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07001971
Kees Cook647416f2013-03-10 14:10:06 -07001972 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07001973}
1974
Kees Cook647416f2013-03-10 14:10:06 -07001975static int
1976i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07001977{
Kees Cook647416f2013-03-10 14:10:06 -07001978 struct drm_device *dev = data;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07001979 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07001980 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07001981
Daniel Vetter004777c2012-08-09 15:07:01 +02001982 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1983 return -ENODEV;
1984
Kees Cook647416f2013-03-10 14:10:06 -07001985 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07001986 return -EINVAL;
1987
Kees Cook647416f2013-03-10 14:10:06 -07001988 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07001989
1990 /* Update the cache sharing policy here as well */
1991 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1992 snpcr &= ~GEN6_MBC_SNPCR_MASK;
1993 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
1994 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1995
Kees Cook647416f2013-03-10 14:10:06 -07001996 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07001997}
1998
Kees Cook647416f2013-03-10 14:10:06 -07001999DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
2000 i915_cache_sharing_get, i915_cache_sharing_set,
2001 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07002002
Chris Wilsonf3cd4742009-10-13 22:20:20 +01002003/* As the drm_debugfs_init() routines are called before dev->dev_private is
2004 * allocated we need to hook into the minor for release. */
2005static int
2006drm_add_fake_info_node(struct drm_minor *minor,
2007 struct dentry *ent,
2008 const void *key)
2009{
2010 struct drm_info_node *node;
2011
2012 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
2013 if (node == NULL) {
2014 debugfs_remove(ent);
2015 return -ENOMEM;
2016 }
2017
2018 node->minor = minor;
2019 node->dent = ent;
2020 node->info_ent = (void *) key;
Marcin Slusarzb3e067c2011-11-09 22:20:35 +01002021
2022 mutex_lock(&minor->debugfs_lock);
2023 list_add(&node->list, &minor->debugfs_list);
2024 mutex_unlock(&minor->debugfs_lock);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01002025
2026 return 0;
2027}
2028
Ben Widawsky6d794d42011-04-25 11:25:56 -07002029static int i915_forcewake_open(struct inode *inode, struct file *file)
2030{
2031 struct drm_device *dev = inode->i_private;
2032 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07002033
Daniel Vetter075edca2012-01-24 09:44:28 +01002034 if (INTEL_INFO(dev)->gen < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07002035 return 0;
2036
Ben Widawsky6d794d42011-04-25 11:25:56 -07002037 gen6_gt_force_wake_get(dev_priv);
Ben Widawsky6d794d42011-04-25 11:25:56 -07002038
2039 return 0;
2040}
2041
Ben Widawskyc43b5632012-04-16 14:07:40 -07002042static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07002043{
2044 struct drm_device *dev = inode->i_private;
2045 struct drm_i915_private *dev_priv = dev->dev_private;
2046
Daniel Vetter075edca2012-01-24 09:44:28 +01002047 if (INTEL_INFO(dev)->gen < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07002048 return 0;
2049
Ben Widawsky6d794d42011-04-25 11:25:56 -07002050 gen6_gt_force_wake_put(dev_priv);
Ben Widawsky6d794d42011-04-25 11:25:56 -07002051
2052 return 0;
2053}
2054
2055static const struct file_operations i915_forcewake_fops = {
2056 .owner = THIS_MODULE,
2057 .open = i915_forcewake_open,
2058 .release = i915_forcewake_release,
2059};
2060
2061static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
2062{
2063 struct drm_device *dev = minor->dev;
2064 struct dentry *ent;
2065
2066 ent = debugfs_create_file("i915_forcewake_user",
Ben Widawsky8eb57292011-05-11 15:10:58 -07002067 S_IRUSR,
Ben Widawsky6d794d42011-04-25 11:25:56 -07002068 root, dev,
2069 &i915_forcewake_fops);
2070 if (IS_ERR(ent))
2071 return PTR_ERR(ent);
2072
Ben Widawsky8eb57292011-05-11 15:10:58 -07002073 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
Ben Widawsky6d794d42011-04-25 11:25:56 -07002074}
2075
Daniel Vetter6a9c3082011-12-14 13:57:11 +01002076static int i915_debugfs_create(struct dentry *root,
2077 struct drm_minor *minor,
2078 const char *name,
2079 const struct file_operations *fops)
Jesse Barnes358733e2011-07-27 11:53:01 -07002080{
2081 struct drm_device *dev = minor->dev;
2082 struct dentry *ent;
2083
Daniel Vetter6a9c3082011-12-14 13:57:11 +01002084 ent = debugfs_create_file(name,
Jesse Barnes358733e2011-07-27 11:53:01 -07002085 S_IRUGO | S_IWUSR,
2086 root, dev,
Daniel Vetter6a9c3082011-12-14 13:57:11 +01002087 fops);
Jesse Barnes358733e2011-07-27 11:53:01 -07002088 if (IS_ERR(ent))
2089 return PTR_ERR(ent);
2090
Daniel Vetter6a9c3082011-12-14 13:57:11 +01002091 return drm_add_fake_info_node(minor, ent, fops);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07002092}
2093
Ben Gamari27c202a2009-07-01 22:26:52 -04002094static struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00002095 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01002096 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00002097 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson1b502472012-04-24 15:47:30 +01002098 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
Ben Gamari433e12f2009-02-17 20:08:51 -05002099 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
Ben Gamari433e12f2009-02-17 20:08:51 -05002100 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01002101 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05002102 {"i915_gem_request", i915_gem_request_info, 0},
2103 {"i915_gem_seqno", i915_gem_seqno_info, 0},
Chris Wilsona6172a82009-02-11 14:26:38 +00002104 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05002105 {"i915_gem_interrupt", i915_interrupt_info, 0},
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002106 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
2107 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
2108 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
Xiang, Haihao9010ebf2013-05-29 09:22:36 -07002109 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
Jesse Barnesf97108d2010-01-29 11:27:07 -08002110 {"i915_rstdby_delays", i915_rstdby_delays, 0},
2111 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
2112 {"i915_delayfreq_table", i915_delayfreq_table, 0},
2113 {"i915_inttoext_table", i915_inttoext_table, 0},
2114 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07002115 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07002116 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07002117 {"i915_gfxec", i915_gfxec, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08002118 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03002119 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08002120 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01002121 {"i915_opregion", i915_opregion, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01002122 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07002123 {"i915_context_status", i915_context_status, 0},
Ben Widawsky6d794d42011-04-25 11:25:56 -07002124 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002125 {"i915_swizzle_info", i915_swizzle_info, 0},
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002126 {"i915_ppgtt_info", i915_ppgtt_info, 0},
Jesse Barnes57f350b2012-03-28 13:39:25 -07002127 {"i915_dpio", i915_dpio_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07002128 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002129 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Ben Gamari20172632009-02-17 20:08:50 -05002130};
Ben Gamari27c202a2009-07-01 22:26:52 -04002131#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05002132
Daniel Vetter34b96742013-07-04 20:49:44 +02002133struct i915_debugfs_files {
2134 const char *name;
2135 const struct file_operations *fops;
2136} i915_debugfs_files[] = {
2137 {"i915_wedged", &i915_wedged_fops},
2138 {"i915_max_freq", &i915_max_freq_fops},
2139 {"i915_min_freq", &i915_min_freq_fops},
2140 {"i915_cache_sharing", &i915_cache_sharing_fops},
2141 {"i915_ring_stop", &i915_ring_stop_fops},
2142 {"i915_gem_drop_caches", &i915_drop_caches_fops},
2143 {"i915_error_state", &i915_error_state_fops},
2144 {"i915_next_seqno", &i915_next_seqno_fops},
2145};
2146
Ben Gamari27c202a2009-07-01 22:26:52 -04002147int i915_debugfs_init(struct drm_minor *minor)
Ben Gamari20172632009-02-17 20:08:50 -05002148{
Daniel Vetter34b96742013-07-04 20:49:44 +02002149 int ret, i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01002150
Ben Widawsky6d794d42011-04-25 11:25:56 -07002151 ret = i915_forcewake_create(minor->debugfs_root, minor);
2152 if (ret)
2153 return ret;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01002154
Daniel Vetter34b96742013-07-04 20:49:44 +02002155 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2156 ret = i915_debugfs_create(minor->debugfs_root, minor,
2157 i915_debugfs_files[i].name,
2158 i915_debugfs_files[i].fops);
2159 if (ret)
2160 return ret;
2161 }
Mika Kuoppala40633212012-12-04 15:12:00 +02002162
Ben Gamari27c202a2009-07-01 22:26:52 -04002163 return drm_debugfs_create_files(i915_debugfs_list,
2164 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05002165 minor->debugfs_root, minor);
2166}
2167
Ben Gamari27c202a2009-07-01 22:26:52 -04002168void i915_debugfs_cleanup(struct drm_minor *minor)
Ben Gamari20172632009-02-17 20:08:50 -05002169{
Daniel Vetter34b96742013-07-04 20:49:44 +02002170 int i;
2171
Ben Gamari27c202a2009-07-01 22:26:52 -04002172 drm_debugfs_remove_files(i915_debugfs_list,
2173 I915_DEBUGFS_ENTRIES, minor);
Ben Widawsky6d794d42011-04-25 11:25:56 -07002174 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
2175 1, minor);
Daniel Vetter34b96742013-07-04 20:49:44 +02002176 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2177 struct drm_info_list *info_list =
2178 (struct drm_info_list *) i915_debugfs_files[i].fops;
2179
2180 drm_debugfs_remove_files(info_list, 1, minor);
2181 }
Ben Gamari20172632009-02-17 20:08:50 -05002182}
2183
2184#endif /* CONFIG_DEBUG_FS */