blob: 1fad1235a0c71ac25549fb8996f6269ae43dac28 [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Chris Wilsonf3cd4742009-10-13 22:20:20 +010029#include <linux/debugfs.h>
Chris Wilson6d2b88852013-08-07 18:30:54 +010030#include <linux/list_sort.h>
Simon Farnsworth4e5359c2010-09-01 17:47:52 +010031#include "intel_drv.h"
Ben Gamari20172632009-02-17 20:08:50 -050032
David Weinehall36cdd012016-08-22 13:59:31 +030033static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
34{
35 return to_i915(node->minor->dev);
36}
37
Damien Lespiau497666d2013-10-15 18:55:39 +010038/* As the drm_debugfs_init() routines are called before dev->dev_private is
39 * allocated we need to hook into the minor for release. */
40static int
41drm_add_fake_info_node(struct drm_minor *minor,
42 struct dentry *ent,
43 const void *key)
44{
45 struct drm_info_node *node;
46
47 node = kmalloc(sizeof(*node), GFP_KERNEL);
48 if (node == NULL) {
49 debugfs_remove(ent);
50 return -ENOMEM;
51 }
52
53 node->minor = minor;
54 node->dent = ent;
David Weinehall36cdd012016-08-22 13:59:31 +030055 node->info_ent = (void *)key;
Damien Lespiau497666d2013-10-15 18:55:39 +010056
57 mutex_lock(&minor->debugfs_lock);
58 list_add(&node->list, &minor->debugfs_list);
59 mutex_unlock(&minor->debugfs_lock);
60
61 return 0;
62}
63
Chris Wilson70d39fe2010-08-25 16:03:34 +010064static int i915_capabilities(struct seq_file *m, void *data)
65{
David Weinehall36cdd012016-08-22 13:59:31 +030066 struct drm_i915_private *dev_priv = node_to_i915(m->private);
67 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Chris Wilson70d39fe2010-08-25 16:03:34 +010068
David Weinehall36cdd012016-08-22 13:59:31 +030069 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020070 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030071 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Damien Lespiau79fc46d2013-04-23 16:37:17 +010072#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
Joonas Lahtinen604db652016-10-05 13:50:16 +030073 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
Damien Lespiau79fc46d2013-04-23 16:37:17 +010074#undef PRINT_FLAG
Chris Wilson70d39fe2010-08-25 16:03:34 +010075
76 return 0;
77}
Ben Gamari433e12f2009-02-17 20:08:51 -050078
Imre Deaka7363de2016-05-12 16:18:52 +030079static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000080{
Chris Wilson573adb32016-08-04 16:32:39 +010081 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000082}
83
Imre Deaka7363de2016-05-12 16:18:52 +030084static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010085{
86 return obj->pin_display ? 'p' : ' ';
87}
88
Imre Deaka7363de2016-05-12 16:18:52 +030089static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000090{
Chris Wilson3e510a82016-08-05 10:14:23 +010091 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040092 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010093 case I915_TILING_NONE: return ' ';
94 case I915_TILING_X: return 'X';
95 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040096 }
Chris Wilsona6172a82009-02-11 14:26:38 +000097}
98
Imre Deaka7363de2016-05-12 16:18:52 +030099static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700100{
Chris Wilson275f0392016-10-24 13:42:14 +0100101 return !list_empty(&obj->userfault_link) ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100102}
103
Imre Deaka7363de2016-05-12 16:18:52 +0300104static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100105{
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100106 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700107}
108
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100109static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
110{
111 u64 size = 0;
112 struct i915_vma *vma;
113
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000114 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson3272db52016-08-04 16:32:32 +0100115 if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100116 size += vma->node.size;
117 }
118
119 return size;
120}
121
Chris Wilson37811fc2010-08-25 22:45:57 +0100122static void
123describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
124{
Chris Wilsonb4716182015-04-27 13:41:17 +0100125 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000126 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700127 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100128 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800129 int pin_count = 0;
130
Chris Wilson188c1ab2016-04-03 14:14:20 +0100131 lockdep_assert_held(&obj->base.dev->struct_mutex);
132
Chris Wilsond07f0e52016-10-28 13:58:44 +0100133 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100134 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100135 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100136 get_pin_flag(obj),
137 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700138 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100139 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800140 obj->base.size / 1024,
Chris Wilson37811fc2010-08-25 22:45:57 +0100141 obj->base.read_domains,
Chris Wilsond07f0e52016-10-28 13:58:44 +0100142 obj->base.write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300143 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100144 obj->mm.dirty ? " dirty" : "",
145 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100146 if (obj->base.name)
147 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000148 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100149 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800150 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300151 }
152 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsoncc98b412013-08-09 12:25:09 +0100153 if (obj->pin_display)
154 seq_printf(m, " (display)");
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000155 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100156 if (!drm_mm_node_allocated(&vma->node))
157 continue;
158
Tvrtko Ursulin8d2fdc32015-05-27 10:52:32 +0100159 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
Chris Wilson3272db52016-08-04 16:32:32 +0100160 i915_vma_is_ggtt(vma) ? "g" : "pp",
Tvrtko Ursulin8d2fdc32015-05-27 10:52:32 +0100161 vma->node.start, vma->node.size);
Chris Wilson3272db52016-08-04 16:32:32 +0100162 if (i915_vma_is_ggtt(vma))
Chris Wilson596c5922016-02-26 11:03:20 +0000163 seq_printf(m, ", type: %u", vma->ggtt_view.type);
Chris Wilson49ef5292016-08-18 17:17:00 +0100164 if (vma->fence)
165 seq_printf(m, " , fence: %d%s",
166 vma->fence->id,
167 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000168 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700169 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000170 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100171 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100172
Chris Wilsond07f0e52016-10-28 13:58:44 +0100173 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100174 if (engine)
175 seq_printf(m, " (%s)", engine->name);
176
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100177 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
178 if (frontbuffer_bits)
179 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100180}
181
Chris Wilson6d2b88852013-08-07 18:30:54 +0100182static int obj_rank_by_stolen(void *priv,
183 struct list_head *A, struct list_head *B)
184{
185 struct drm_i915_gem_object *a =
Ben Widawskyb25cb2f2013-08-14 11:38:33 +0200186 container_of(A, struct drm_i915_gem_object, obj_exec_link);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100187 struct drm_i915_gem_object *b =
Ben Widawskyb25cb2f2013-08-14 11:38:33 +0200188 container_of(B, struct drm_i915_gem_object, obj_exec_link);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100189
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200190 if (a->stolen->start < b->stolen->start)
191 return -1;
192 if (a->stolen->start > b->stolen->start)
193 return 1;
194 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100195}
196
197static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
198{
David Weinehall36cdd012016-08-22 13:59:31 +0300199 struct drm_i915_private *dev_priv = node_to_i915(m->private);
200 struct drm_device *dev = &dev_priv->drm;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100201 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300202 u64 total_obj_size, total_gtt_size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100203 LIST_HEAD(stolen);
204 int count, ret;
205
206 ret = mutex_lock_interruptible(&dev->struct_mutex);
207 if (ret)
208 return ret;
209
210 total_obj_size = total_gtt_size = count = 0;
Joonas Lahtinen56cea322016-11-02 12:16:04 +0200211 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
Chris Wilson6d2b88852013-08-07 18:30:54 +0100212 if (obj->stolen == NULL)
213 continue;
214
Ben Widawskyb25cb2f2013-08-14 11:38:33 +0200215 list_add(&obj->obj_exec_link, &stolen);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100216
217 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100218 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100219 count++;
220 }
Joonas Lahtinen56cea322016-11-02 12:16:04 +0200221 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
Chris Wilson6d2b88852013-08-07 18:30:54 +0100222 if (obj->stolen == NULL)
223 continue;
224
Ben Widawskyb25cb2f2013-08-14 11:38:33 +0200225 list_add(&obj->obj_exec_link, &stolen);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100226
227 total_obj_size += obj->base.size;
228 count++;
229 }
230 list_sort(NULL, &stolen, obj_rank_by_stolen);
231 seq_puts(m, "Stolen:\n");
232 while (!list_empty(&stolen)) {
Ben Widawskyb25cb2f2013-08-14 11:38:33 +0200233 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100234 seq_puts(m, " ");
235 describe_obj(m, obj);
236 seq_putc(m, '\n');
Ben Widawskyb25cb2f2013-08-14 11:38:33 +0200237 list_del_init(&obj->obj_exec_link);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100238 }
239 mutex_unlock(&dev->struct_mutex);
240
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300241 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100242 count, total_obj_size, total_gtt_size);
243 return 0;
244}
245
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100246struct file_stats {
Chris Wilson6313c202014-03-19 13:45:45 +0000247 struct drm_i915_file_private *file_priv;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300248 unsigned long count;
249 u64 total, unbound;
250 u64 global, shared;
251 u64 active, inactive;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100252};
253
254static int per_file_stats(int id, void *ptr, void *data)
255{
256 struct drm_i915_gem_object *obj = ptr;
257 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000258 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100259
260 stats->count++;
261 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100262 if (!obj->bind_count)
263 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000264 if (obj->base.name || obj->base.dma_buf)
265 stats->shared += obj->base.size;
266
Chris Wilson894eeec2016-08-04 07:52:20 +0100267 list_for_each_entry(vma, &obj->vma_list, obj_link) {
268 if (!drm_mm_node_allocated(&vma->node))
269 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000270
Chris Wilson3272db52016-08-04 16:32:32 +0100271 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100272 stats->global += vma->node.size;
273 } else {
274 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
Chris Wilson6313c202014-03-19 13:45:45 +0000275
Chris Wilson2bfa9962016-08-04 07:52:25 +0100276 if (ppgtt->base.file != stats->file_priv)
Chris Wilson6313c202014-03-19 13:45:45 +0000277 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000278 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100279
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100280 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100281 stats->active += vma->node.size;
282 else
283 stats->inactive += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100284 }
285
286 return 0;
287}
288
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100289#define print_file_stats(m, name, stats) do { \
290 if (stats.count) \
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300291 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100292 name, \
293 stats.count, \
294 stats.total, \
295 stats.active, \
296 stats.inactive, \
297 stats.global, \
298 stats.shared, \
299 stats.unbound); \
300} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800301
302static void print_batch_pool_stats(struct seq_file *m,
303 struct drm_i915_private *dev_priv)
304{
305 struct drm_i915_gem_object *obj;
306 struct file_stats stats;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000307 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530308 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000309 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800310
311 memset(&stats, 0, sizeof(stats));
312
Akash Goel3b3f1652016-10-13 22:44:48 +0530313 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000314 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100315 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000316 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100317 batch_pool_link)
318 per_file_stats(0, obj, &stats);
319 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100320 }
Brad Volkin493018d2014-12-11 12:13:08 -0800321
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100322 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800323}
324
Chris Wilson15da9562016-05-24 14:53:43 +0100325static int per_file_ctx_stats(int id, void *ptr, void *data)
326{
327 struct i915_gem_context *ctx = ptr;
328 int n;
329
330 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
331 if (ctx->engine[n].state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +0100332 per_file_stats(0, ctx->engine[n].state->obj, data);
Chris Wilsondca33ec2016-08-02 22:50:20 +0100333 if (ctx->engine[n].ring)
Chris Wilson57e88532016-08-15 10:48:57 +0100334 per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
Chris Wilson15da9562016-05-24 14:53:43 +0100335 }
336
337 return 0;
338}
339
340static void print_context_stats(struct seq_file *m,
341 struct drm_i915_private *dev_priv)
342{
David Weinehall36cdd012016-08-22 13:59:31 +0300343 struct drm_device *dev = &dev_priv->drm;
Chris Wilson15da9562016-05-24 14:53:43 +0100344 struct file_stats stats;
345 struct drm_file *file;
346
347 memset(&stats, 0, sizeof(stats));
348
David Weinehall36cdd012016-08-22 13:59:31 +0300349 mutex_lock(&dev->struct_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100350 if (dev_priv->kernel_context)
351 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
352
David Weinehall36cdd012016-08-22 13:59:31 +0300353 list_for_each_entry(file, &dev->filelist, lhead) {
Chris Wilson15da9562016-05-24 14:53:43 +0100354 struct drm_i915_file_private *fpriv = file->driver_priv;
355 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
356 }
David Weinehall36cdd012016-08-22 13:59:31 +0300357 mutex_unlock(&dev->struct_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100358
359 print_file_stats(m, "[k]contexts", stats);
360}
361
David Weinehall36cdd012016-08-22 13:59:31 +0300362static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100363{
David Weinehall36cdd012016-08-22 13:59:31 +0300364 struct drm_i915_private *dev_priv = node_to_i915(m->private);
365 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300366 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson2bd160a2016-08-15 10:48:45 +0100367 u32 count, mapped_count, purgeable_count, dpy_count;
368 u64 size, mapped_size, purgeable_size, dpy_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000369 struct drm_i915_gem_object *obj;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100370 struct drm_file *file;
Chris Wilson73aa8082010-09-30 11:46:12 +0100371 int ret;
372
373 ret = mutex_lock_interruptible(&dev->struct_mutex);
374 if (ret)
375 return ret;
376
Chris Wilson3ef7f222016-10-18 13:02:48 +0100377 seq_printf(m, "%u objects, %llu bytes\n",
Chris Wilson6299f992010-11-24 12:23:44 +0000378 dev_priv->mm.object_count,
379 dev_priv->mm.object_memory);
380
Chris Wilson1544c422016-08-15 13:18:16 +0100381 size = count = 0;
382 mapped_size = mapped_count = 0;
383 purgeable_size = purgeable_count = 0;
Joonas Lahtinen56cea322016-11-02 12:16:04 +0200384 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100385 size += obj->base.size;
386 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200387
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100388 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200389 purgeable_size += obj->base.size;
390 ++purgeable_count;
391 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100392
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100393 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100394 mapped_count++;
395 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100396 }
Chris Wilson6299f992010-11-24 12:23:44 +0000397 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100398 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
399
400 size = count = dpy_size = dpy_count = 0;
Joonas Lahtinen56cea322016-11-02 12:16:04 +0200401 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100402 size += obj->base.size;
403 ++count;
404
405 if (obj->pin_display) {
406 dpy_size += obj->base.size;
407 ++dpy_count;
408 }
409
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100410 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100411 purgeable_size += obj->base.size;
412 ++purgeable_count;
413 }
414
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100415 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100416 mapped_count++;
417 mapped_size += obj->base.size;
418 }
419 }
420 seq_printf(m, "%u bound objects, %llu bytes\n",
421 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300422 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200423 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100424 seq_printf(m, "%u mapped objects, %llu bytes\n",
425 mapped_count, mapped_size);
426 seq_printf(m, "%u display objects (pinned), %llu bytes\n",
427 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000428
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300429 seq_printf(m, "%llu [%llu] gtt total\n",
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300430 ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
Chris Wilson73aa8082010-09-30 11:46:12 +0100431
Damien Lespiau267f0c92013-06-24 22:59:48 +0100432 seq_putc(m, '\n');
Brad Volkin493018d2014-12-11 12:13:08 -0800433 print_batch_pool_stats(m, dev_priv);
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200434 mutex_unlock(&dev->struct_mutex);
435
436 mutex_lock(&dev->filelist_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100437 print_context_stats(m, dev_priv);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100438 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
439 struct file_stats stats;
Chris Wilsonc84455b2016-08-15 10:49:08 +0100440 struct drm_i915_file_private *file_priv = file->driver_priv;
441 struct drm_i915_gem_request *request;
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900442 struct task_struct *task;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100443
444 memset(&stats, 0, sizeof(stats));
Chris Wilson6313c202014-03-19 13:45:45 +0000445 stats.file_priv = file->driver_priv;
Chris Wilson5b5ffff2014-06-17 09:56:24 +0100446 spin_lock(&file->table_lock);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100447 idr_for_each(&file->object_idr, per_file_stats, &stats);
Chris Wilson5b5ffff2014-06-17 09:56:24 +0100448 spin_unlock(&file->table_lock);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900449 /*
450 * Although we have a valid reference on file->pid, that does
451 * not guarantee that the task_struct who called get_pid() is
452 * still alive (e.g. get_pid(current) => fork() => exit()).
453 * Therefore, we need to protect this ->comm access using RCU.
454 */
Chris Wilsonc84455b2016-08-15 10:49:08 +0100455 mutex_lock(&dev->struct_mutex);
456 request = list_first_entry_or_null(&file_priv->mm.request_list,
457 struct drm_i915_gem_request,
458 client_list);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900459 rcu_read_lock();
Chris Wilsonc84455b2016-08-15 10:49:08 +0100460 task = pid_task(request && request->ctx->pid ?
461 request->ctx->pid : file->pid,
462 PIDTYPE_PID);
Brad Volkin493018d2014-12-11 12:13:08 -0800463 print_file_stats(m, task ? task->comm : "<unknown>", stats);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900464 rcu_read_unlock();
Chris Wilsonc84455b2016-08-15 10:49:08 +0100465 mutex_unlock(&dev->struct_mutex);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100466 }
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200467 mutex_unlock(&dev->filelist_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100468
469 return 0;
470}
471
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100472static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000473{
Damien Lespiau9f25d002014-05-13 15:30:28 +0100474 struct drm_info_node *node = m->private;
David Weinehall36cdd012016-08-22 13:59:31 +0300475 struct drm_i915_private *dev_priv = node_to_i915(node);
476 struct drm_device *dev = &dev_priv->drm;
Chris Wilson5f4b0912016-08-19 12:56:25 +0100477 bool show_pin_display_only = !!node->info_ent->data;
Chris Wilson08c18322011-01-10 00:00:24 +0000478 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300479 u64 total_obj_size, total_gtt_size;
Chris Wilson08c18322011-01-10 00:00:24 +0000480 int count, ret;
481
482 ret = mutex_lock_interruptible(&dev->struct_mutex);
483 if (ret)
484 return ret;
485
486 total_obj_size = total_gtt_size = count = 0;
Joonas Lahtinen56cea322016-11-02 12:16:04 +0200487 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
Chris Wilson6da84822016-08-15 10:48:44 +0100488 if (show_pin_display_only && !obj->pin_display)
Chris Wilson1b502472012-04-24 15:47:30 +0100489 continue;
490
Damien Lespiau267f0c92013-06-24 22:59:48 +0100491 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000492 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100493 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000494 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100495 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000496 count++;
497 }
498
499 mutex_unlock(&dev->struct_mutex);
500
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300501 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson08c18322011-01-10 00:00:24 +0000502 count, total_obj_size, total_gtt_size);
503
504 return 0;
505}
506
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100507static int i915_gem_pageflip_info(struct seq_file *m, void *data)
508{
David Weinehall36cdd012016-08-22 13:59:31 +0300509 struct drm_i915_private *dev_priv = node_to_i915(m->private);
510 struct drm_device *dev = &dev_priv->drm;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100511 struct intel_crtc *crtc;
Daniel Vetter8a270eb2014-06-17 22:34:37 +0200512 int ret;
513
514 ret = mutex_lock_interruptible(&dev->struct_mutex);
515 if (ret)
516 return ret;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100517
Damien Lespiaud3fcc802014-05-13 23:32:22 +0100518 for_each_intel_crtc(dev, crtc) {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800519 const char pipe = pipe_name(crtc->pipe);
520 const char plane = plane_name(crtc->plane);
Maarten Lankhorst51cbaf02016-05-17 15:07:49 +0200521 struct intel_flip_work *work;
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100522
Daniel Vetter5e2d7af2014-09-15 14:55:22 +0200523 spin_lock_irq(&dev->event_lock);
Daniel Vetter5a21b662016-05-24 17:13:53 +0200524 work = crtc->flip_work;
525 if (work == NULL) {
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800526 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100527 pipe, plane);
528 } else {
Daniel Vetter5a21b662016-05-24 17:13:53 +0200529 u32 pending;
530 u32 addr;
531
532 pending = atomic_read(&work->pending);
533 if (pending) {
534 seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
535 pipe, plane);
536 } else {
537 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
538 pipe, plane);
539 }
540 if (work->flip_queued_req) {
Joonas Lahtinen24327f82016-11-08 09:11:48 +0200541 struct intel_engine_cs *engine = work->flip_queued_req->engine;
Daniel Vetter5a21b662016-05-24 17:13:53 +0200542
Chris Wilson312c3c42016-11-24 14:47:50 +0000543 seq_printf(m, "Flip queued on %s at seqno %x, last submitted seqno %x [current breadcrumb %x], completed? %d\n",
Daniel Vetter5a21b662016-05-24 17:13:53 +0200544 engine->name,
Joonas Lahtinen24327f82016-11-08 09:11:48 +0200545 work->flip_queued_req->global_seqno,
Chris Wilson312c3c42016-11-24 14:47:50 +0000546 intel_engine_last_submit(engine),
Chris Wilson1b7744e2016-07-01 17:23:17 +0100547 intel_engine_get_seqno(engine),
Chris Wilsonf69a02c2016-07-01 17:23:16 +0100548 i915_gem_request_completed(work->flip_queued_req));
Daniel Vetter5a21b662016-05-24 17:13:53 +0200549 } else
550 seq_printf(m, "Flip not associated with any ring\n");
551 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
552 work->flip_queued_vblank,
553 work->flip_ready_vblank,
554 intel_crtc_get_vblank_counter(crtc));
555 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
556
David Weinehall36cdd012016-08-22 13:59:31 +0300557 if (INTEL_GEN(dev_priv) >= 4)
Daniel Vetter5a21b662016-05-24 17:13:53 +0200558 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
559 else
560 addr = I915_READ(DSPADDR(crtc->plane));
561 seq_printf(m, "Current scanout address 0x%08x\n", addr);
562
563 if (work->pending_flip_obj) {
564 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
565 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100566 }
567 }
Daniel Vetter5e2d7af2014-09-15 14:55:22 +0200568 spin_unlock_irq(&dev->event_lock);
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100569 }
570
Daniel Vetter8a270eb2014-06-17 22:34:37 +0200571 mutex_unlock(&dev->struct_mutex);
572
Simon Farnsworth4e5359c2010-09-01 17:47:52 +0100573 return 0;
574}
575
Brad Volkin493018d2014-12-11 12:13:08 -0800576static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
577{
David Weinehall36cdd012016-08-22 13:59:31 +0300578 struct drm_i915_private *dev_priv = node_to_i915(m->private);
579 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800580 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000581 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530582 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100583 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000584 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800585
586 ret = mutex_lock_interruptible(&dev->struct_mutex);
587 if (ret)
588 return ret;
589
Akash Goel3b3f1652016-10-13 22:44:48 +0530590 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000591 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100592 int count;
593
594 count = 0;
595 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000596 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100597 batch_pool_link)
598 count++;
599 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000600 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100601
602 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000603 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100604 batch_pool_link) {
605 seq_puts(m, " ");
606 describe_obj(m, obj);
607 seq_putc(m, '\n');
608 }
609
610 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100611 }
Brad Volkin493018d2014-12-11 12:13:08 -0800612 }
613
Chris Wilson8d9d5742015-04-07 16:20:38 +0100614 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800615
616 mutex_unlock(&dev->struct_mutex);
617
618 return 0;
619}
620
Chris Wilson1b365952016-10-04 21:11:31 +0100621static void print_request(struct seq_file *m,
622 struct drm_i915_gem_request *rq,
623 const char *prefix)
624{
Chris Wilson20311bd2016-11-14 20:41:03 +0000625 seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
Chris Wilson65e47602016-10-28 13:58:49 +0100626 rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
Chris Wilson20311bd2016-11-14 20:41:03 +0000627 rq->priotree.priority,
Chris Wilson1b365952016-10-04 21:11:31 +0100628 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
Chris Wilson562f5d42016-10-28 13:58:54 +0100629 rq->timeline->common->name);
Chris Wilson1b365952016-10-04 21:11:31 +0100630}
631
Ben Gamari20172632009-02-17 20:08:50 -0500632static int i915_gem_request_info(struct seq_file *m, void *data)
633{
David Weinehall36cdd012016-08-22 13:59:31 +0300634 struct drm_i915_private *dev_priv = node_to_i915(m->private);
635 struct drm_device *dev = &dev_priv->drm;
Daniel Vettereed29a52015-05-21 14:21:25 +0200636 struct drm_i915_gem_request *req;
Akash Goel3b3f1652016-10-13 22:44:48 +0530637 struct intel_engine_cs *engine;
638 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000639 int ret, any;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100640
641 ret = mutex_lock_interruptible(&dev->struct_mutex);
642 if (ret)
643 return ret;
Ben Gamari20172632009-02-17 20:08:50 -0500644
Chris Wilson2d1070b2015-04-01 10:36:56 +0100645 any = 0;
Akash Goel3b3f1652016-10-13 22:44:48 +0530646 for_each_engine(engine, dev_priv, id) {
Chris Wilson2d1070b2015-04-01 10:36:56 +0100647 int count;
648
649 count = 0;
Chris Wilson73cb9702016-10-28 13:58:46 +0100650 list_for_each_entry(req, &engine->timeline->requests, link)
Chris Wilson2d1070b2015-04-01 10:36:56 +0100651 count++;
652 if (count == 0)
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100653 continue;
654
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000655 seq_printf(m, "%s requests: %d\n", engine->name, count);
Chris Wilson73cb9702016-10-28 13:58:46 +0100656 list_for_each_entry(req, &engine->timeline->requests, link)
Chris Wilson1b365952016-10-04 21:11:31 +0100657 print_request(m, req, " ");
Chris Wilson2d1070b2015-04-01 10:36:56 +0100658
659 any++;
Ben Gamari20172632009-02-17 20:08:50 -0500660 }
Chris Wilsonde227ef2010-07-03 07:58:38 +0100661 mutex_unlock(&dev->struct_mutex);
662
Chris Wilson2d1070b2015-04-01 10:36:56 +0100663 if (any == 0)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100664 seq_puts(m, "No requests\n");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100665
Ben Gamari20172632009-02-17 20:08:50 -0500666 return 0;
667}
668
Chris Wilsonb2223492010-10-27 15:27:33 +0100669static void i915_ring_seqno_info(struct seq_file *m,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000670 struct intel_engine_cs *engine)
Chris Wilsonb2223492010-10-27 15:27:33 +0100671{
Chris Wilson688e6c72016-07-01 17:23:15 +0100672 struct intel_breadcrumbs *b = &engine->breadcrumbs;
673 struct rb_node *rb;
674
Chris Wilson12471ba2016-04-09 10:57:55 +0100675 seq_printf(m, "Current sequence (%s): %x\n",
Chris Wilson1b7744e2016-07-01 17:23:17 +0100676 engine->name, intel_engine_get_seqno(engine));
Chris Wilson688e6c72016-07-01 17:23:15 +0100677
Chris Wilsonf6168e32016-10-28 13:58:55 +0100678 spin_lock_irq(&b->lock);
Chris Wilson688e6c72016-07-01 17:23:15 +0100679 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
680 struct intel_wait *w = container_of(rb, typeof(*w), node);
681
682 seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
683 engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
684 }
Chris Wilsonf6168e32016-10-28 13:58:55 +0100685 spin_unlock_irq(&b->lock);
Chris Wilsonb2223492010-10-27 15:27:33 +0100686}
687
Ben Gamari20172632009-02-17 20:08:50 -0500688static int i915_gem_seqno_info(struct seq_file *m, void *data)
689{
David Weinehall36cdd012016-08-22 13:59:31 +0300690 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000691 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530692 enum intel_engine_id id;
Ben Gamari20172632009-02-17 20:08:50 -0500693
Akash Goel3b3f1652016-10-13 22:44:48 +0530694 for_each_engine(engine, dev_priv, id)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000695 i915_ring_seqno_info(m, engine);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100696
Ben Gamari20172632009-02-17 20:08:50 -0500697 return 0;
698}
699
700
701static int i915_interrupt_info(struct seq_file *m, void *data)
702{
David Weinehall36cdd012016-08-22 13:59:31 +0300703 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000704 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530705 enum intel_engine_id id;
Chris Wilson4bb05042016-09-03 07:53:43 +0100706 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100707
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200708 intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500709
David Weinehall36cdd012016-08-22 13:59:31 +0300710 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300711 seq_printf(m, "Master Interrupt Control:\t%08x\n",
712 I915_READ(GEN8_MASTER_IRQ));
713
714 seq_printf(m, "Display IER:\t%08x\n",
715 I915_READ(VLV_IER));
716 seq_printf(m, "Display IIR:\t%08x\n",
717 I915_READ(VLV_IIR));
718 seq_printf(m, "Display IIR_RW:\t%08x\n",
719 I915_READ(VLV_IIR_RW));
720 seq_printf(m, "Display IMR:\t%08x\n",
721 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100722 for_each_pipe(dev_priv, pipe) {
723 enum intel_display_power_domain power_domain;
724
725 power_domain = POWER_DOMAIN_PIPE(pipe);
726 if (!intel_display_power_get_if_enabled(dev_priv,
727 power_domain)) {
728 seq_printf(m, "Pipe %c power disabled\n",
729 pipe_name(pipe));
730 continue;
731 }
732
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300733 seq_printf(m, "Pipe %c stat:\t%08x\n",
734 pipe_name(pipe),
735 I915_READ(PIPESTAT(pipe)));
736
Chris Wilson9c870d02016-10-24 13:42:15 +0100737 intel_display_power_put(dev_priv, power_domain);
738 }
739
740 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300741 seq_printf(m, "Port hotplug:\t%08x\n",
742 I915_READ(PORT_HOTPLUG_EN));
743 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
744 I915_READ(VLV_DPFLIPSTAT));
745 seq_printf(m, "DPINVGTT:\t%08x\n",
746 I915_READ(DPINVGTT));
Chris Wilson9c870d02016-10-24 13:42:15 +0100747 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300748
749 for (i = 0; i < 4; i++) {
750 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
751 i, I915_READ(GEN8_GT_IMR(i)));
752 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
753 i, I915_READ(GEN8_GT_IIR(i)));
754 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
755 i, I915_READ(GEN8_GT_IER(i)));
756 }
757
758 seq_printf(m, "PCU interrupt mask:\t%08x\n",
759 I915_READ(GEN8_PCU_IMR));
760 seq_printf(m, "PCU interrupt identity:\t%08x\n",
761 I915_READ(GEN8_PCU_IIR));
762 seq_printf(m, "PCU interrupt enable:\t%08x\n",
763 I915_READ(GEN8_PCU_IER));
David Weinehall36cdd012016-08-22 13:59:31 +0300764 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700765 seq_printf(m, "Master Interrupt Control:\t%08x\n",
766 I915_READ(GEN8_MASTER_IRQ));
767
768 for (i = 0; i < 4; i++) {
769 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
770 i, I915_READ(GEN8_GT_IMR(i)));
771 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
772 i, I915_READ(GEN8_GT_IIR(i)));
773 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
774 i, I915_READ(GEN8_GT_IER(i)));
775 }
776
Damien Lespiau055e3932014-08-18 13:49:10 +0100777 for_each_pipe(dev_priv, pipe) {
Imre Deake1296492016-02-12 18:55:17 +0200778 enum intel_display_power_domain power_domain;
779
780 power_domain = POWER_DOMAIN_PIPE(pipe);
781 if (!intel_display_power_get_if_enabled(dev_priv,
782 power_domain)) {
Paulo Zanoni22c59962014-08-08 17:45:32 -0300783 seq_printf(m, "Pipe %c power disabled\n",
784 pipe_name(pipe));
785 continue;
786 }
Ben Widawskya123f152013-11-02 21:07:10 -0700787 seq_printf(m, "Pipe %c IMR:\t%08x\n",
Damien Lespiau07d27e22014-03-03 17:31:46 +0000788 pipe_name(pipe),
789 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
Ben Widawskya123f152013-11-02 21:07:10 -0700790 seq_printf(m, "Pipe %c IIR:\t%08x\n",
Damien Lespiau07d27e22014-03-03 17:31:46 +0000791 pipe_name(pipe),
792 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
Ben Widawskya123f152013-11-02 21:07:10 -0700793 seq_printf(m, "Pipe %c IER:\t%08x\n",
Damien Lespiau07d27e22014-03-03 17:31:46 +0000794 pipe_name(pipe),
795 I915_READ(GEN8_DE_PIPE_IER(pipe)));
Imre Deake1296492016-02-12 18:55:17 +0200796
797 intel_display_power_put(dev_priv, power_domain);
Ben Widawskya123f152013-11-02 21:07:10 -0700798 }
799
800 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
801 I915_READ(GEN8_DE_PORT_IMR));
802 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
803 I915_READ(GEN8_DE_PORT_IIR));
804 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
805 I915_READ(GEN8_DE_PORT_IER));
806
807 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
808 I915_READ(GEN8_DE_MISC_IMR));
809 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
810 I915_READ(GEN8_DE_MISC_IIR));
811 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
812 I915_READ(GEN8_DE_MISC_IER));
813
814 seq_printf(m, "PCU interrupt mask:\t%08x\n",
815 I915_READ(GEN8_PCU_IMR));
816 seq_printf(m, "PCU interrupt identity:\t%08x\n",
817 I915_READ(GEN8_PCU_IIR));
818 seq_printf(m, "PCU interrupt enable:\t%08x\n",
819 I915_READ(GEN8_PCU_IER));
David Weinehall36cdd012016-08-22 13:59:31 +0300820 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700821 seq_printf(m, "Display IER:\t%08x\n",
822 I915_READ(VLV_IER));
823 seq_printf(m, "Display IIR:\t%08x\n",
824 I915_READ(VLV_IIR));
825 seq_printf(m, "Display IIR_RW:\t%08x\n",
826 I915_READ(VLV_IIR_RW));
827 seq_printf(m, "Display IMR:\t%08x\n",
828 I915_READ(VLV_IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100829 for_each_pipe(dev_priv, pipe)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700830 seq_printf(m, "Pipe %c stat:\t%08x\n",
831 pipe_name(pipe),
832 I915_READ(PIPESTAT(pipe)));
833
834 seq_printf(m, "Master IER:\t%08x\n",
835 I915_READ(VLV_MASTER_IER));
836
837 seq_printf(m, "Render IER:\t%08x\n",
838 I915_READ(GTIER));
839 seq_printf(m, "Render IIR:\t%08x\n",
840 I915_READ(GTIIR));
841 seq_printf(m, "Render IMR:\t%08x\n",
842 I915_READ(GTIMR));
843
844 seq_printf(m, "PM IER:\t\t%08x\n",
845 I915_READ(GEN6_PMIER));
846 seq_printf(m, "PM IIR:\t\t%08x\n",
847 I915_READ(GEN6_PMIIR));
848 seq_printf(m, "PM IMR:\t\t%08x\n",
849 I915_READ(GEN6_PMIMR));
850
851 seq_printf(m, "Port hotplug:\t%08x\n",
852 I915_READ(PORT_HOTPLUG_EN));
853 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
854 I915_READ(VLV_DPFLIPSTAT));
855 seq_printf(m, "DPINVGTT:\t%08x\n",
856 I915_READ(DPINVGTT));
857
David Weinehall36cdd012016-08-22 13:59:31 +0300858 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800859 seq_printf(m, "Interrupt enable: %08x\n",
860 I915_READ(IER));
861 seq_printf(m, "Interrupt identity: %08x\n",
862 I915_READ(IIR));
863 seq_printf(m, "Interrupt mask: %08x\n",
864 I915_READ(IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100865 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800866 seq_printf(m, "Pipe %c stat: %08x\n",
867 pipe_name(pipe),
868 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800869 } else {
870 seq_printf(m, "North Display Interrupt enable: %08x\n",
871 I915_READ(DEIER));
872 seq_printf(m, "North Display Interrupt identity: %08x\n",
873 I915_READ(DEIIR));
874 seq_printf(m, "North Display Interrupt mask: %08x\n",
875 I915_READ(DEIMR));
876 seq_printf(m, "South Display Interrupt enable: %08x\n",
877 I915_READ(SDEIER));
878 seq_printf(m, "South Display Interrupt identity: %08x\n",
879 I915_READ(SDEIIR));
880 seq_printf(m, "South Display Interrupt mask: %08x\n",
881 I915_READ(SDEIMR));
882 seq_printf(m, "Graphics Interrupt enable: %08x\n",
883 I915_READ(GTIER));
884 seq_printf(m, "Graphics Interrupt identity: %08x\n",
885 I915_READ(GTIIR));
886 seq_printf(m, "Graphics Interrupt mask: %08x\n",
887 I915_READ(GTIMR));
888 }
Akash Goel3b3f1652016-10-13 22:44:48 +0530889 for_each_engine(engine, dev_priv, id) {
David Weinehall36cdd012016-08-22 13:59:31 +0300890 if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100891 seq_printf(m,
892 "Graphics Interrupt mask (%s): %08x\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000893 engine->name, I915_READ_IMR(engine));
Chris Wilson9862e602011-01-04 22:22:17 +0000894 }
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000895 i915_ring_seqno_info(m, engine);
Chris Wilson9862e602011-01-04 22:22:17 +0000896 }
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200897 intel_runtime_pm_put(dev_priv);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100898
Ben Gamari20172632009-02-17 20:08:50 -0500899 return 0;
900}
901
Chris Wilsona6172a82009-02-11 14:26:38 +0000902static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
903{
David Weinehall36cdd012016-08-22 13:59:31 +0300904 struct drm_i915_private *dev_priv = node_to_i915(m->private);
905 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100906 int i, ret;
907
908 ret = mutex_lock_interruptible(&dev->struct_mutex);
909 if (ret)
910 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000911
Chris Wilsona6172a82009-02-11 14:26:38 +0000912 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
913 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100914 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000915
Chris Wilson6c085a72012-08-20 11:40:46 +0200916 seq_printf(m, "Fence %d, pin count = %d, object = ",
917 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100918 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100919 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100920 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100921 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100922 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000923 }
924
Chris Wilson05394f32010-11-08 19:18:58 +0000925 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000926 return 0;
927}
928
Chris Wilson98a2f412016-10-12 10:05:18 +0100929#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
930
Daniel Vetterd5442302012-04-27 15:17:40 +0200931static ssize_t
932i915_error_state_write(struct file *filp,
933 const char __user *ubuf,
934 size_t cnt,
935 loff_t *ppos)
936{
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300937 struct i915_error_state_file_priv *error_priv = filp->private_data;
Daniel Vetterd5442302012-04-27 15:17:40 +0200938
939 DRM_DEBUG_DRIVER("Resetting error state\n");
Tvrtko Ursulin12ff05e2016-12-01 14:16:43 +0000940 i915_destroy_error_state(error_priv->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +0200941
942 return cnt;
943}
944
945static int i915_error_state_open(struct inode *inode, struct file *file)
946{
David Weinehall36cdd012016-08-22 13:59:31 +0300947 struct drm_i915_private *dev_priv = inode->i_private;
Daniel Vetterd5442302012-04-27 15:17:40 +0200948 struct i915_error_state_file_priv *error_priv;
Daniel Vetterd5442302012-04-27 15:17:40 +0200949
950 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
951 if (!error_priv)
952 return -ENOMEM;
953
Tvrtko Ursulin12ff05e2016-12-01 14:16:43 +0000954 error_priv->i915 = dev_priv;
Daniel Vetterd5442302012-04-27 15:17:40 +0200955
David Weinehall36cdd012016-08-22 13:59:31 +0300956 i915_error_state_get(&dev_priv->drm, error_priv);
Daniel Vetterd5442302012-04-27 15:17:40 +0200957
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300958 file->private_data = error_priv;
959
960 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +0200961}
962
963static int i915_error_state_release(struct inode *inode, struct file *file)
964{
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300965 struct i915_error_state_file_priv *error_priv = file->private_data;
Daniel Vetterd5442302012-04-27 15:17:40 +0200966
Mika Kuoppala95d5bfb2013-06-06 15:18:40 +0300967 i915_error_state_put(error_priv);
Daniel Vetterd5442302012-04-27 15:17:40 +0200968 kfree(error_priv);
969
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300970 return 0;
971}
972
973static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
974 size_t count, loff_t *pos)
975{
976 struct i915_error_state_file_priv *error_priv = file->private_data;
977 struct drm_i915_error_state_buf error_str;
978 loff_t tmp_pos = 0;
979 ssize_t ret_count = 0;
Mika Kuoppala4dc955f2013-06-06 15:18:41 +0300980 int ret;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300981
Tvrtko Ursulin12ff05e2016-12-01 14:16:43 +0000982 ret = i915_error_state_buf_init(&error_str, error_priv->i915,
983 count, *pos);
Mika Kuoppala4dc955f2013-06-06 15:18:41 +0300984 if (ret)
985 return ret;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300986
Mika Kuoppalafc16b482013-06-06 15:18:39 +0300987 ret = i915_error_state_to_str(&error_str, error_priv);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300988 if (ret)
989 goto out;
990
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300991 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
992 error_str.buf,
993 error_str.bytes);
994
995 if (ret_count < 0)
996 ret = ret_count;
997 else
998 *pos = error_str.start + ret_count;
999out:
Mika Kuoppala4dc955f2013-06-06 15:18:41 +03001000 i915_error_state_buf_release(&error_str);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001001 return ret ?: ret_count;
Daniel Vetterd5442302012-04-27 15:17:40 +02001002}
1003
1004static const struct file_operations i915_error_state_fops = {
1005 .owner = THIS_MODULE,
1006 .open = i915_error_state_open,
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001007 .read = i915_error_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +02001008 .write = i915_error_state_write,
1009 .llseek = default_llseek,
1010 .release = i915_error_state_release,
1011};
1012
Chris Wilson98a2f412016-10-12 10:05:18 +01001013#endif
1014
Kees Cook647416f2013-03-10 14:10:06 -07001015static int
1016i915_next_seqno_get(void *data, u64 *val)
Mika Kuoppala40633212012-12-04 15:12:00 +02001017{
David Weinehall36cdd012016-08-22 13:59:31 +03001018 struct drm_i915_private *dev_priv = data;
Mika Kuoppala40633212012-12-04 15:12:00 +02001019
Joonas Lahtinen4c266ed2016-11-24 14:47:49 +00001020 *val = 1 + atomic_read(&dev_priv->gt.global_timeline.seqno);
Kees Cook647416f2013-03-10 14:10:06 -07001021 return 0;
Mika Kuoppala40633212012-12-04 15:12:00 +02001022}
1023
Kees Cook647416f2013-03-10 14:10:06 -07001024static int
1025i915_next_seqno_set(void *data, u64 val)
Mika Kuoppala40633212012-12-04 15:12:00 +02001026{
David Weinehall36cdd012016-08-22 13:59:31 +03001027 struct drm_i915_private *dev_priv = data;
1028 struct drm_device *dev = &dev_priv->drm;
Mika Kuoppala40633212012-12-04 15:12:00 +02001029 int ret;
1030
Mika Kuoppala40633212012-12-04 15:12:00 +02001031 ret = mutex_lock_interruptible(&dev->struct_mutex);
1032 if (ret)
1033 return ret;
1034
Chris Wilson73cb9702016-10-28 13:58:46 +01001035 ret = i915_gem_set_global_seqno(dev, val);
Mika Kuoppala40633212012-12-04 15:12:00 +02001036 mutex_unlock(&dev->struct_mutex);
1037
Kees Cook647416f2013-03-10 14:10:06 -07001038 return ret;
Mika Kuoppala40633212012-12-04 15:12:00 +02001039}
1040
Kees Cook647416f2013-03-10 14:10:06 -07001041DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1042 i915_next_seqno_get, i915_next_seqno_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03001043 "0x%llx\n");
Mika Kuoppala40633212012-12-04 15:12:00 +02001044
Deepak Sadb4bd12014-03-31 11:30:02 +05301045static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001046{
David Weinehall36cdd012016-08-22 13:59:31 +03001047 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1048 struct drm_device *dev = &dev_priv->drm;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001049 int ret = 0;
1050
1051 intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001052
David Weinehall36cdd012016-08-22 13:59:31 +03001053 if (IS_GEN5(dev_priv)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001054 u16 rgvswctl = I915_READ16(MEMSWCTL);
1055 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1056
1057 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1058 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1059 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1060 MEMSTAT_VID_SHIFT);
1061 seq_printf(m, "Current P-state: %d\n",
1062 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001063 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Wayne Boyer666a4532015-12-09 12:29:35 -08001064 u32 freq_sts;
1065
1066 mutex_lock(&dev_priv->rps.hw_lock);
1067 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1068 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1069 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1070
1071 seq_printf(m, "actual GPU freq: %d MHz\n",
1072 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1073
1074 seq_printf(m, "current GPU freq: %d MHz\n",
1075 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1076
1077 seq_printf(m, "max GPU freq: %d MHz\n",
1078 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1079
1080 seq_printf(m, "min GPU freq: %d MHz\n",
1081 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1082
1083 seq_printf(m, "idle GPU freq: %d MHz\n",
1084 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1085
1086 seq_printf(m,
1087 "efficient (RPe) frequency: %d MHz\n",
1088 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1089 mutex_unlock(&dev_priv->rps.hw_lock);
David Weinehall36cdd012016-08-22 13:59:31 +03001090 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001091 u32 rp_state_limits;
1092 u32 gt_perf_status;
1093 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001094 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001095 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001096 u32 rpupei, rpcurup, rpprevup;
1097 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001098 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001099 int max_freq;
1100
Bob Paauwe35040562015-06-25 14:54:07 -07001101 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001102 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001103 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1104 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1105 } else {
1106 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1107 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1108 }
1109
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001110 /* RPSTAT1 is in the GT power well */
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001111 ret = mutex_lock_interruptible(&dev->struct_mutex);
1112 if (ret)
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001113 goto out;
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001114
Mika Kuoppala59bad942015-01-16 11:34:40 +02001115 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001116
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001117 reqf = I915_READ(GEN6_RPNSWREQ);
David Weinehall36cdd012016-08-22 13:59:31 +03001118 if (IS_GEN9(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301119 reqf >>= 23;
1120 else {
1121 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001122 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301123 reqf >>= 24;
1124 else
1125 reqf >>= 25;
1126 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001127 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001128
Chris Wilson0d8f9492014-03-27 09:06:14 +00001129 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1130 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1131 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1132
Jesse Barnesccab5c82011-01-18 15:49:25 -08001133 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301134 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1135 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1136 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1137 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1138 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1139 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
David Weinehall36cdd012016-08-22 13:59:31 +03001140 if (IS_GEN9(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301141 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
David Weinehall36cdd012016-08-22 13:59:31 +03001142 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Ben Widawskyf82855d2013-01-29 12:00:15 -08001143 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1144 else
1145 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001146 cagf = intel_gpu_freq(dev_priv, cagf);
Jesse Barnesccab5c82011-01-18 15:49:25 -08001147
Mika Kuoppala59bad942015-01-16 11:34:40 +02001148 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001149 mutex_unlock(&dev->struct_mutex);
1150
David Weinehall36cdd012016-08-22 13:59:31 +03001151 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001152 pm_ier = I915_READ(GEN6_PMIER);
1153 pm_imr = I915_READ(GEN6_PMIMR);
1154 pm_isr = I915_READ(GEN6_PMISR);
1155 pm_iir = I915_READ(GEN6_PMIIR);
1156 pm_mask = I915_READ(GEN6_PMINTRMSK);
1157 } else {
1158 pm_ier = I915_READ(GEN8_GT_IER(2));
1159 pm_imr = I915_READ(GEN8_GT_IMR(2));
1160 pm_isr = I915_READ(GEN8_GT_ISR(2));
1161 pm_iir = I915_READ(GEN8_GT_IIR(2));
1162 pm_mask = I915_READ(GEN6_PMINTRMSK);
1163 }
Chris Wilson0d8f9492014-03-27 09:06:14 +00001164 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001165 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +05301166 seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001167 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001168 seq_printf(m, "Render p-state ratio: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03001169 (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001170 seq_printf(m, "Render p-state VID: %d\n",
1171 gt_perf_status & 0xff);
1172 seq_printf(m, "Render p-state limit: %d\n",
1173 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001174 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1175 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1176 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1177 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001178 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001179 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301180 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1181 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1182 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1183 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1184 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1185 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Chris Wilsond86ed342015-04-27 13:41:19 +01001186 seq_printf(m, "Up threshold: %d%%\n",
1187 dev_priv->rps.up_threshold);
1188
Akash Goeld6cda9c2016-04-23 00:05:46 +05301189 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1190 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1191 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1192 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1193 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1194 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Chris Wilsond86ed342015-04-27 13:41:19 +01001195 seq_printf(m, "Down threshold: %d%%\n",
1196 dev_priv->rps.down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001197
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001198 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001199 rp_state_cap >> 16) & 0xff;
David Weinehall36cdd012016-08-22 13:59:31 +03001200 max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001201 GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001202 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001203 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001204
1205 max_freq = (rp_state_cap & 0xff00) >> 8;
David Weinehall36cdd012016-08-22 13:59:31 +03001206 max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001207 GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001208 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001209 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001210
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001211 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001212 rp_state_cap >> 0) & 0xff;
David Weinehall36cdd012016-08-22 13:59:31 +03001213 max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001214 GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001215 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001216 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001217 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001218 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001219
Chris Wilsond86ed342015-04-27 13:41:19 +01001220 seq_printf(m, "Current freq: %d MHz\n",
1221 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1222 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001223 seq_printf(m, "Idle freq: %d MHz\n",
1224 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001225 seq_printf(m, "Min freq: %d MHz\n",
1226 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001227 seq_printf(m, "Boost freq: %d MHz\n",
1228 intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001229 seq_printf(m, "Max freq: %d MHz\n",
1230 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1231 seq_printf(m,
1232 "efficient (RPe) frequency: %d MHz\n",
1233 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001234 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001235 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001236 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001237
Mika Kahola1170f282015-09-25 14:00:32 +03001238 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
1239 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1240 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1241
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001242out:
1243 intel_runtime_pm_put(dev_priv);
1244 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001245}
1246
Ben Widawskyd6369512016-09-20 16:54:32 +03001247static void i915_instdone_info(struct drm_i915_private *dev_priv,
1248 struct seq_file *m,
1249 struct intel_instdone *instdone)
1250{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001251 int slice;
1252 int subslice;
1253
Ben Widawskyd6369512016-09-20 16:54:32 +03001254 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1255 instdone->instdone);
1256
1257 if (INTEL_GEN(dev_priv) <= 3)
1258 return;
1259
1260 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1261 instdone->slice_common);
1262
1263 if (INTEL_GEN(dev_priv) <= 6)
1264 return;
1265
Ben Widawskyf9e61372016-09-20 16:54:33 +03001266 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1267 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1268 slice, subslice, instdone->sampler[slice][subslice]);
1269
1270 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1271 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1272 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001273}
1274
Chris Wilsonf6544492015-01-26 18:03:04 +02001275static int i915_hangcheck_info(struct seq_file *m, void *unused)
1276{
David Weinehall36cdd012016-08-22 13:59:31 +03001277 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001278 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001279 u64 acthd[I915_NUM_ENGINES];
1280 u32 seqno[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001281 struct intel_instdone instdone;
Dave Gordonc3232b12016-03-23 18:19:53 +00001282 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001283
Chris Wilson8af29b02016-09-09 14:11:47 +01001284 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1285 seq_printf(m, "Wedged\n");
1286 if (test_bit(I915_RESET_IN_PROGRESS, &dev_priv->gpu_error.flags))
1287 seq_printf(m, "Reset in progress\n");
1288 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1289 seq_printf(m, "Waiter holding struct mutex\n");
1290 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1291 seq_printf(m, "struct_mutex blocked for reset\n");
1292
Chris Wilsonf6544492015-01-26 18:03:04 +02001293 if (!i915.enable_hangcheck) {
1294 seq_printf(m, "Hangcheck disabled\n");
1295 return 0;
1296 }
1297
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001298 intel_runtime_pm_get(dev_priv);
1299
Akash Goel3b3f1652016-10-13 22:44:48 +05301300 for_each_engine(engine, dev_priv, id) {
Chris Wilson7e37f882016-08-02 22:50:21 +01001301 acthd[id] = intel_engine_get_active_head(engine);
Chris Wilson1b7744e2016-07-01 17:23:17 +01001302 seqno[id] = intel_engine_get_seqno(engine);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001303 }
1304
Akash Goel3b3f1652016-10-13 22:44:48 +05301305 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001306
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001307 intel_runtime_pm_put(dev_priv);
1308
Chris Wilsonf6544492015-01-26 18:03:04 +02001309 if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
1310 seq_printf(m, "Hangcheck active, fires in %dms\n",
1311 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1312 jiffies));
1313 } else
1314 seq_printf(m, "Hangcheck inactive\n");
1315
Akash Goel3b3f1652016-10-13 22:44:48 +05301316 for_each_engine(engine, dev_priv, id) {
Chris Wilson33f53712016-10-04 21:11:32 +01001317 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1318 struct rb_node *rb;
1319
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001320 seq_printf(m, "%s:\n", engine->name);
Chris Wilson14fd0d62016-04-07 07:29:10 +01001321 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
Chris Wilsoncb399ea2016-11-01 10:03:16 +00001322 engine->hangcheck.seqno, seqno[id],
1323 intel_engine_last_submit(engine));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001324 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
Chris Wilson83348ba2016-08-09 17:47:51 +01001325 yesno(intel_engine_has_waiter(engine)),
1326 yesno(test_bit(engine->id,
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001327 &dev_priv->gpu_error.missed_irq_rings)),
1328 yesno(engine->hangcheck.stalled));
1329
Chris Wilsonf6168e32016-10-28 13:58:55 +01001330 spin_lock_irq(&b->lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001331 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1332 struct intel_wait *w = container_of(rb, typeof(*w), node);
1333
1334 seq_printf(m, "\t%s [%d] waiting for %x\n",
1335 w->tsk->comm, w->tsk->pid, w->seqno);
1336 }
Chris Wilsonf6168e32016-10-28 13:58:55 +01001337 spin_unlock_irq(&b->lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001338
Chris Wilsonf6544492015-01-26 18:03:04 +02001339 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001340 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001341 (long long)acthd[id]);
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001342 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1343 hangcheck_action_to_str(engine->hangcheck.action),
1344 engine->hangcheck.action,
1345 jiffies_to_msecs(jiffies -
1346 engine->hangcheck.action_timestamp));
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001347
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001348 if (engine->id == RCS) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001349 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001350
Ben Widawskyd6369512016-09-20 16:54:32 +03001351 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001352
Ben Widawskyd6369512016-09-20 16:54:32 +03001353 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001354
Ben Widawskyd6369512016-09-20 16:54:32 +03001355 i915_instdone_info(dev_priv, m,
1356 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001357 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001358 }
1359
1360 return 0;
1361}
1362
Ben Widawsky4d855292011-12-12 19:34:16 -08001363static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001364{
David Weinehall36cdd012016-08-22 13:59:31 +03001365 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001366 u32 rgvmodectl, rstdbyctl;
1367 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001368
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001369 intel_runtime_pm_get(dev_priv);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001370
1371 rgvmodectl = I915_READ(MEMMODECTL);
1372 rstdbyctl = I915_READ(RSTDBYCTL);
1373 crstandvid = I915_READ16(CRSTANDVID);
1374
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001375 intel_runtime_pm_put(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001376
Jani Nikula742f4912015-09-03 11:16:09 +03001377 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001378 seq_printf(m, "Boost freq: %d\n",
1379 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1380 MEMMODE_BOOST_FREQ_SHIFT);
1381 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001382 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001383 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001384 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001385 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001386 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001387 seq_printf(m, "Starting frequency: P%d\n",
1388 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001389 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001390 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001391 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1392 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1393 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1394 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001395 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001396 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001397 switch (rstdbyctl & RSX_STATUS_MASK) {
1398 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001399 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001400 break;
1401 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001402 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001403 break;
1404 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001405 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001406 break;
1407 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001408 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001409 break;
1410 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001411 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001412 break;
1413 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001414 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001415 break;
1416 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001417 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001418 break;
1419 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001420
1421 return 0;
1422}
1423
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001424static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001425{
David Weinehall36cdd012016-08-22 13:59:31 +03001426 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001427 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001428
1429 spin_lock_irq(&dev_priv->uncore.lock);
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001430 for_each_fw_domain(fw_domain, dev_priv) {
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001431 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001432 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001433 fw_domain->wake_count);
1434 }
1435 spin_unlock_irq(&dev_priv->uncore.lock);
1436
1437 return 0;
1438}
1439
Deepak S669ab5a2014-01-10 15:18:26 +05301440static int vlv_drpc_info(struct seq_file *m)
1441{
David Weinehall36cdd012016-08-22 13:59:31 +03001442 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001443 u32 rpmodectl1, rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301444
Imre Deakd46c0512014-04-14 20:24:27 +03001445 intel_runtime_pm_get(dev_priv);
1446
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001447 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301448 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1449 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1450
Imre Deakd46c0512014-04-14 20:24:27 +03001451 intel_runtime_pm_put(dev_priv);
1452
Deepak S669ab5a2014-01-10 15:18:26 +05301453 seq_printf(m, "Video Turbo Mode: %s\n",
1454 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1455 seq_printf(m, "Turbo enabled: %s\n",
1456 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1457 seq_printf(m, "HW control enabled: %s\n",
1458 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1459 seq_printf(m, "SW control enabled: %s\n",
1460 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1461 GEN6_RP_MEDIA_SW_MODE));
1462 seq_printf(m, "RC6 Enabled: %s\n",
1463 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1464 GEN6_RC_CTL_EI_MODE(1))));
1465 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001466 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301467 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001468 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301469
Imre Deak9cc19be2014-04-14 20:24:24 +03001470 seq_printf(m, "Render RC6 residency since boot: %u\n",
1471 I915_READ(VLV_GT_RENDER_RC6));
1472 seq_printf(m, "Media RC6 residency since boot: %u\n",
1473 I915_READ(VLV_GT_MEDIA_RC6));
1474
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001475 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301476}
1477
Ben Widawsky4d855292011-12-12 19:34:16 -08001478static int gen6_drpc_info(struct seq_file *m)
1479{
David Weinehall36cdd012016-08-22 13:59:31 +03001480 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1481 struct drm_device *dev = &dev_priv->drm;
Ben Widawskyecd8fae2012-09-26 10:34:02 -07001482 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301483 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Daniel Vetter93b525d2012-01-25 13:52:43 +01001484 unsigned forcewake_count;
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001485 int count = 0, ret;
Ben Widawsky4d855292011-12-12 19:34:16 -08001486
1487 ret = mutex_lock_interruptible(&dev->struct_mutex);
1488 if (ret)
1489 return ret;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001490 intel_runtime_pm_get(dev_priv);
Ben Widawsky4d855292011-12-12 19:34:16 -08001491
Chris Wilson907b28c2013-07-19 20:36:52 +01001492 spin_lock_irq(&dev_priv->uncore.lock);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001493 forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
Chris Wilson907b28c2013-07-19 20:36:52 +01001494 spin_unlock_irq(&dev_priv->uncore.lock);
Daniel Vetter93b525d2012-01-25 13:52:43 +01001495
1496 if (forcewake_count) {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001497 seq_puts(m, "RC information inaccurate because somebody "
1498 "holds a forcewake reference \n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001499 } else {
1500 /* NB: we cannot use forcewake, else we read the wrong values */
1501 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1502 udelay(10);
1503 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1504 }
1505
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001506 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001507 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001508
1509 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1510 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001511 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301512 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1513 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1514 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001515 mutex_unlock(&dev->struct_mutex);
Ben Widawsky44cbd332012-11-06 14:36:36 +00001516 mutex_lock(&dev_priv->rps.hw_lock);
1517 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1518 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky4d855292011-12-12 19:34:16 -08001519
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001520 intel_runtime_pm_put(dev_priv);
1521
Ben Widawsky4d855292011-12-12 19:34:16 -08001522 seq_printf(m, "Video Turbo Mode: %s\n",
1523 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1524 seq_printf(m, "HW control enabled: %s\n",
1525 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1526 seq_printf(m, "SW control enabled: %s\n",
1527 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1528 GEN6_RP_MEDIA_SW_MODE));
Eric Anholtfff24e22012-01-23 16:14:05 -08001529 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001530 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1531 seq_printf(m, "RC6 Enabled: %s\n",
1532 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001533 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301534 seq_printf(m, "Render Well Gating Enabled: %s\n",
1535 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1536 seq_printf(m, "Media Well Gating Enabled: %s\n",
1537 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1538 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001539 seq_printf(m, "Deep RC6 Enabled: %s\n",
1540 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1541 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1542 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001543 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001544 switch (gt_core_status & GEN6_RCn_MASK) {
1545 case GEN6_RC0:
1546 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001547 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001548 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001549 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001550 break;
1551 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001552 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001553 break;
1554 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001555 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001556 break;
1557 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001558 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001559 break;
1560 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001561 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001562 break;
1563 }
1564
1565 seq_printf(m, "Core Power Down: %s\n",
1566 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001567 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301568 seq_printf(m, "Render Power Well: %s\n",
1569 (gen9_powergate_status &
1570 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1571 seq_printf(m, "Media Power Well: %s\n",
1572 (gen9_powergate_status &
1573 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1574 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001575
1576 /* Not exactly sure what this is */
1577 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1578 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1579 seq_printf(m, "RC6 residency since boot: %u\n",
1580 I915_READ(GEN6_GT_GFX_RC6));
1581 seq_printf(m, "RC6+ residency since boot: %u\n",
1582 I915_READ(GEN6_GT_GFX_RC6p));
1583 seq_printf(m, "RC6++ residency since boot: %u\n",
1584 I915_READ(GEN6_GT_GFX_RC6pp));
1585
Ben Widawskyecd8fae2012-09-26 10:34:02 -07001586 seq_printf(m, "RC6 voltage: %dmV\n",
1587 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1588 seq_printf(m, "RC6+ voltage: %dmV\n",
1589 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1590 seq_printf(m, "RC6++ voltage: %dmV\n",
1591 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
Akash Goelf2dd7572016-06-27 20:10:01 +05301592 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001593}
1594
1595static int i915_drpc_info(struct seq_file *m, void *unused)
1596{
David Weinehall36cdd012016-08-22 13:59:31 +03001597 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky4d855292011-12-12 19:34:16 -08001598
David Weinehall36cdd012016-08-22 13:59:31 +03001599 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Deepak S669ab5a2014-01-10 15:18:26 +05301600 return vlv_drpc_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +03001601 else if (INTEL_GEN(dev_priv) >= 6)
Ben Widawsky4d855292011-12-12 19:34:16 -08001602 return gen6_drpc_info(m);
1603 else
1604 return ironlake_drpc_info(m);
1605}
1606
Daniel Vetter9a851782015-06-18 10:30:22 +02001607static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1608{
David Weinehall36cdd012016-08-22 13:59:31 +03001609 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001610
1611 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1612 dev_priv->fb_tracking.busy_bits);
1613
1614 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1615 dev_priv->fb_tracking.flip_bits);
1616
1617 return 0;
1618}
1619
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001620static int i915_fbc_status(struct seq_file *m, void *unused)
1621{
David Weinehall36cdd012016-08-22 13:59:31 +03001622 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001623
David Weinehall36cdd012016-08-22 13:59:31 +03001624 if (!HAS_FBC(dev_priv)) {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001625 seq_puts(m, "FBC unsupported on this chipset\n");
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001626 return 0;
1627 }
1628
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001629 intel_runtime_pm_get(dev_priv);
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001630 mutex_lock(&dev_priv->fbc.lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001631
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001632 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001633 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001634 else
1635 seq_printf(m, "FBC disabled: %s\n",
Paulo Zanonibf6189c2015-10-27 14:50:03 -02001636 dev_priv->fbc.no_fbc_reason);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001637
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001638 if (intel_fbc_is_active(dev_priv) && INTEL_GEN(dev_priv) >= 7) {
1639 uint32_t mask = INTEL_GEN(dev_priv) >= 8 ?
1640 BDW_FBC_COMPRESSION_MASK :
1641 IVB_FBC_COMPRESSION_MASK;
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001642 seq_printf(m, "Compressing: %s\n",
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001643 yesno(I915_READ(FBC_STATUS2) & mask));
1644 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001645
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001646 mutex_unlock(&dev_priv->fbc.lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001647 intel_runtime_pm_put(dev_priv);
1648
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001649 return 0;
1650}
1651
Rodrigo Vivida46f932014-08-01 02:04:45 -07001652static int i915_fbc_fc_get(void *data, u64 *val)
1653{
David Weinehall36cdd012016-08-22 13:59:31 +03001654 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001655
David Weinehall36cdd012016-08-22 13:59:31 +03001656 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001657 return -ENODEV;
1658
Rodrigo Vivida46f932014-08-01 02:04:45 -07001659 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001660
1661 return 0;
1662}
1663
1664static int i915_fbc_fc_set(void *data, u64 val)
1665{
David Weinehall36cdd012016-08-22 13:59:31 +03001666 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001667 u32 reg;
1668
David Weinehall36cdd012016-08-22 13:59:31 +03001669 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001670 return -ENODEV;
1671
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001672 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001673
1674 reg = I915_READ(ILK_DPFC_CONTROL);
1675 dev_priv->fbc.false_color = val;
1676
1677 I915_WRITE(ILK_DPFC_CONTROL, val ?
1678 (reg | FBC_CTL_FALSE_COLOR) :
1679 (reg & ~FBC_CTL_FALSE_COLOR));
1680
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001681 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001682 return 0;
1683}
1684
1685DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
1686 i915_fbc_fc_get, i915_fbc_fc_set,
1687 "%llu\n");
1688
Paulo Zanoni92d44622013-05-31 16:33:24 -03001689static int i915_ips_status(struct seq_file *m, void *unused)
1690{
David Weinehall36cdd012016-08-22 13:59:31 +03001691 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Paulo Zanoni92d44622013-05-31 16:33:24 -03001692
David Weinehall36cdd012016-08-22 13:59:31 +03001693 if (!HAS_IPS(dev_priv)) {
Paulo Zanoni92d44622013-05-31 16:33:24 -03001694 seq_puts(m, "not supported\n");
1695 return 0;
1696 }
1697
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001698 intel_runtime_pm_get(dev_priv);
1699
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001700 seq_printf(m, "Enabled by kernel parameter: %s\n",
1701 yesno(i915.enable_ips));
1702
David Weinehall36cdd012016-08-22 13:59:31 +03001703 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001704 seq_puts(m, "Currently: unknown\n");
1705 } else {
1706 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1707 seq_puts(m, "Currently: enabled\n");
1708 else
1709 seq_puts(m, "Currently: disabled\n");
1710 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001711
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001712 intel_runtime_pm_put(dev_priv);
1713
Paulo Zanoni92d44622013-05-31 16:33:24 -03001714 return 0;
1715}
1716
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001717static int i915_sr_status(struct seq_file *m, void *unused)
1718{
David Weinehall36cdd012016-08-22 13:59:31 +03001719 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001720 bool sr_enabled = false;
1721
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001722 intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01001723 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001724
David Weinehall36cdd012016-08-22 13:59:31 +03001725 if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001726 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001727 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001728 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001729 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001730 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001731 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001732 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001733 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001734 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001735 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001736
Chris Wilson9c870d02016-10-24 13:42:15 +01001737 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001738 intel_runtime_pm_put(dev_priv);
1739
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001740 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001741
1742 return 0;
1743}
1744
Jesse Barnes7648fa92010-05-20 14:28:11 -07001745static int i915_emon_status(struct seq_file *m, void *unused)
1746{
David Weinehall36cdd012016-08-22 13:59:31 +03001747 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1748 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001749 unsigned long temp, chipset, gfx;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001750 int ret;
1751
David Weinehall36cdd012016-08-22 13:59:31 +03001752 if (!IS_GEN5(dev_priv))
Chris Wilson582be6b2012-04-30 19:35:02 +01001753 return -ENODEV;
1754
Chris Wilsonde227ef2010-07-03 07:58:38 +01001755 ret = mutex_lock_interruptible(&dev->struct_mutex);
1756 if (ret)
1757 return ret;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001758
1759 temp = i915_mch_val(dev_priv);
1760 chipset = i915_chipset_val(dev_priv);
1761 gfx = i915_gfx_val(dev_priv);
Chris Wilsonde227ef2010-07-03 07:58:38 +01001762 mutex_unlock(&dev->struct_mutex);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001763
1764 seq_printf(m, "GMCH temp: %ld\n", temp);
1765 seq_printf(m, "Chipset power: %ld\n", chipset);
1766 seq_printf(m, "GFX power: %ld\n", gfx);
1767 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1768
1769 return 0;
1770}
1771
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001772static int i915_ring_freq_table(struct seq_file *m, void *unused)
1773{
David Weinehall36cdd012016-08-22 13:59:31 +03001774 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001775 int ret = 0;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001776 int gpu_freq, ia_freq;
Akash Goelf936ec32015-06-29 14:50:22 +05301777 unsigned int max_gpu_freq, min_gpu_freq;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001778
Carlos Santa26310342016-08-17 12:30:41 -07001779 if (!HAS_LLC(dev_priv)) {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001780 seq_puts(m, "unsupported on this chipset\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001781 return 0;
1782 }
1783
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001784 intel_runtime_pm_get(dev_priv);
1785
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001786 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001787 if (ret)
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001788 goto out;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001789
David Weinehall36cdd012016-08-22 13:59:31 +03001790 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
Akash Goelf936ec32015-06-29 14:50:22 +05301791 /* Convert GT frequency to 50 HZ units */
1792 min_gpu_freq =
1793 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
1794 max_gpu_freq =
1795 dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
1796 } else {
1797 min_gpu_freq = dev_priv->rps.min_freq_softlimit;
1798 max_gpu_freq = dev_priv->rps.max_freq_softlimit;
1799 }
1800
Damien Lespiau267f0c92013-06-24 22:59:48 +01001801 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001802
Akash Goelf936ec32015-06-29 14:50:22 +05301803 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001804 ia_freq = gpu_freq;
1805 sandybridge_pcode_read(dev_priv,
1806 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1807 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001808 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301809 intel_gpu_freq(dev_priv, (gpu_freq *
David Weinehall36cdd012016-08-22 13:59:31 +03001810 (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001811 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001812 ((ia_freq >> 0) & 0xff) * 100,
1813 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001814 }
1815
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001816 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001817
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001818out:
1819 intel_runtime_pm_put(dev_priv);
1820 return ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001821}
1822
Chris Wilson44834a62010-08-19 16:09:23 +01001823static int i915_opregion(struct seq_file *m, void *unused)
1824{
David Weinehall36cdd012016-08-22 13:59:31 +03001825 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1826 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001827 struct intel_opregion *opregion = &dev_priv->opregion;
1828 int ret;
1829
1830 ret = mutex_lock_interruptible(&dev->struct_mutex);
1831 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001832 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001833
Jani Nikula2455a8e2015-12-14 12:50:53 +02001834 if (opregion->header)
1835 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001836
1837 mutex_unlock(&dev->struct_mutex);
1838
Daniel Vetter0d38f002012-04-21 22:49:10 +02001839out:
Chris Wilson44834a62010-08-19 16:09:23 +01001840 return 0;
1841}
1842
Jani Nikulaada8f952015-12-15 13:17:12 +02001843static int i915_vbt(struct seq_file *m, void *unused)
1844{
David Weinehall36cdd012016-08-22 13:59:31 +03001845 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001846
1847 if (opregion->vbt)
1848 seq_write(m, opregion->vbt, opregion->vbt_size);
1849
1850 return 0;
1851}
1852
Chris Wilson37811fc2010-08-25 22:45:57 +01001853static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1854{
David Weinehall36cdd012016-08-22 13:59:31 +03001855 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1856 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301857 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001858 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001859 int ret;
1860
1861 ret = mutex_lock_interruptible(&dev->struct_mutex);
1862 if (ret)
1863 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001864
Daniel Vetter06957262015-08-10 13:34:08 +02001865#ifdef CONFIG_DRM_FBDEV_EMULATION
David Weinehall36cdd012016-08-22 13:59:31 +03001866 if (dev_priv->fbdev) {
1867 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001868
Chris Wilson25bcce92016-07-02 15:36:00 +01001869 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1870 fbdev_fb->base.width,
1871 fbdev_fb->base.height,
1872 fbdev_fb->base.depth,
1873 fbdev_fb->base.bits_per_pixel,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001874 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001875 drm_framebuffer_read_refcount(&fbdev_fb->base));
1876 describe_obj(m, fbdev_fb->obj);
1877 seq_putc(m, '\n');
1878 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001879#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001880
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001881 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001882 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301883 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1884 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001885 continue;
1886
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001887 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001888 fb->base.width,
1889 fb->base.height,
1890 fb->base.depth,
Daniel Vetter623f9782012-12-11 16:21:38 +01001891 fb->base.bits_per_pixel,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001892 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001893 drm_framebuffer_read_refcount(&fb->base));
Chris Wilson05394f32010-11-08 19:18:58 +00001894 describe_obj(m, fb->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +01001895 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001896 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001897 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001898 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001899
1900 return 0;
1901}
1902
Chris Wilson7e37f882016-08-02 22:50:21 +01001903static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001904{
1905 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
Chris Wilson7e37f882016-08-02 22:50:21 +01001906 ring->space, ring->head, ring->tail,
1907 ring->last_retired_head);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001908}
1909
Ben Widawskye76d3632011-03-19 18:14:29 -07001910static int i915_context_status(struct seq_file *m, void *unused)
1911{
David Weinehall36cdd012016-08-22 13:59:31 +03001912 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1913 struct drm_device *dev = &dev_priv->drm;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001914 struct intel_engine_cs *engine;
Chris Wilsone2efd132016-05-24 14:53:34 +01001915 struct i915_gem_context *ctx;
Akash Goel3b3f1652016-10-13 22:44:48 +05301916 enum intel_engine_id id;
Dave Gordonc3232b12016-03-23 18:19:53 +00001917 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001918
Daniel Vetterf3d28872014-05-29 23:23:08 +02001919 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001920 if (ret)
1921 return ret;
1922
Ben Widawskya33afea2013-09-17 21:12:45 -07001923 list_for_each_entry(ctx, &dev_priv->context_list, link) {
Chris Wilson5d1808e2016-04-28 09:56:51 +01001924 seq_printf(m, "HW context %u ", ctx->hw_id);
Chris Wilsonc84455b2016-08-15 10:49:08 +01001925 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001926 struct task_struct *task;
1927
Chris Wilsonc84455b2016-08-15 10:49:08 +01001928 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001929 if (task) {
1930 seq_printf(m, "(%s [%d]) ",
1931 task->comm, task->pid);
1932 put_task_struct(task);
1933 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001934 } else if (IS_ERR(ctx->file_priv)) {
1935 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001936 } else {
1937 seq_puts(m, "(kernel) ");
1938 }
1939
Chris Wilsonbca44d82016-05-24 14:53:41 +01001940 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1941 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001942
Akash Goel3b3f1652016-10-13 22:44:48 +05301943 for_each_engine(engine, dev_priv, id) {
Chris Wilsonbca44d82016-05-24 14:53:41 +01001944 struct intel_context *ce = &ctx->engine[engine->id];
1945
1946 seq_printf(m, "%s: ", engine->name);
1947 seq_putc(m, ce->initialised ? 'I' : 'i');
1948 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001949 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001950 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001951 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001952 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001953 }
1954
Ben Widawskya33afea2013-09-17 21:12:45 -07001955 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001956 }
1957
Daniel Vetterf3d28872014-05-29 23:23:08 +02001958 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001959
1960 return 0;
1961}
1962
Thomas Daniel064ca1d2014-12-02 13:21:18 +00001963static void i915_dump_lrc_obj(struct seq_file *m,
Chris Wilsone2efd132016-05-24 14:53:34 +01001964 struct i915_gem_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001965 struct intel_engine_cs *engine)
Thomas Daniel064ca1d2014-12-02 13:21:18 +00001966{
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001967 struct i915_vma *vma = ctx->engine[engine->id].state;
Thomas Daniel064ca1d2014-12-02 13:21:18 +00001968 struct page *page;
Thomas Daniel064ca1d2014-12-02 13:21:18 +00001969 int j;
Thomas Daniel064ca1d2014-12-02 13:21:18 +00001970
Chris Wilson7069b142016-04-28 09:56:52 +01001971 seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
1972
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001973 if (!vma) {
1974 seq_puts(m, "\tFake context\n");
Thomas Daniel064ca1d2014-12-02 13:21:18 +00001975 return;
1976 }
1977
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001978 if (vma->flags & I915_VMA_GLOBAL_BIND)
1979 seq_printf(m, "\tBound in GGTT at 0x%08x\n",
Chris Wilsonbde13eb2016-08-15 10:49:07 +01001980 i915_ggtt_offset(vma));
Thomas Daniel064ca1d2014-12-02 13:21:18 +00001981
Chris Wilsona4f5ea62016-10-28 13:58:35 +01001982 if (i915_gem_object_pin_pages(vma->obj)) {
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001983 seq_puts(m, "\tFailed to get pages for context object\n\n");
Thomas Daniel064ca1d2014-12-02 13:21:18 +00001984 return;
1985 }
1986
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001987 page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
1988 if (page) {
1989 u32 *reg_state = kmap_atomic(page);
Thomas Daniel064ca1d2014-12-02 13:21:18 +00001990
1991 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001992 seq_printf(m,
1993 "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1994 j * 4,
Thomas Daniel064ca1d2014-12-02 13:21:18 +00001995 reg_state[j], reg_state[j + 1],
1996 reg_state[j + 2], reg_state[j + 3]);
1997 }
1998 kunmap_atomic(reg_state);
1999 }
2000
Chris Wilsona4f5ea62016-10-28 13:58:35 +01002001 i915_gem_object_unpin_pages(vma->obj);
Thomas Daniel064ca1d2014-12-02 13:21:18 +00002002 seq_putc(m, '\n');
2003}
2004
Ben Widawskyc0ab1ae2014-08-07 13:24:26 +01002005static int i915_dump_lrc(struct seq_file *m, void *unused)
2006{
David Weinehall36cdd012016-08-22 13:59:31 +03002007 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2008 struct drm_device *dev = &dev_priv->drm;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002009 struct intel_engine_cs *engine;
Chris Wilsone2efd132016-05-24 14:53:34 +01002010 struct i915_gem_context *ctx;
Akash Goel3b3f1652016-10-13 22:44:48 +05302011 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002012 int ret;
Ben Widawskyc0ab1ae2014-08-07 13:24:26 +01002013
2014 if (!i915.enable_execlists) {
2015 seq_printf(m, "Logical Ring Contexts are disabled\n");
2016 return 0;
2017 }
2018
2019 ret = mutex_lock_interruptible(&dev->struct_mutex);
2020 if (ret)
2021 return ret;
2022
Dave Gordone28e4042016-01-19 19:02:55 +00002023 list_for_each_entry(ctx, &dev_priv->context_list, link)
Akash Goel3b3f1652016-10-13 22:44:48 +05302024 for_each_engine(engine, dev_priv, id)
Chris Wilson24f1d3c2016-04-28 09:56:53 +01002025 i915_dump_lrc_obj(m, ctx, engine);
Ben Widawskyc0ab1ae2014-08-07 13:24:26 +01002026
2027 mutex_unlock(&dev->struct_mutex);
2028
2029 return 0;
2030}
2031
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002032static const char *swizzle_string(unsigned swizzle)
2033{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01002034 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002035 case I915_BIT_6_SWIZZLE_NONE:
2036 return "none";
2037 case I915_BIT_6_SWIZZLE_9:
2038 return "bit9";
2039 case I915_BIT_6_SWIZZLE_9_10:
2040 return "bit9/bit10";
2041 case I915_BIT_6_SWIZZLE_9_11:
2042 return "bit9/bit11";
2043 case I915_BIT_6_SWIZZLE_9_10_11:
2044 return "bit9/bit10/bit11";
2045 case I915_BIT_6_SWIZZLE_9_17:
2046 return "bit9/bit17";
2047 case I915_BIT_6_SWIZZLE_9_10_17:
2048 return "bit9/bit10/bit17";
2049 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09002050 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002051 }
2052
2053 return "bug";
2054}
2055
2056static int i915_swizzle_info(struct seq_file *m, void *data)
2057{
David Weinehall36cdd012016-08-22 13:59:31 +03002058 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002059
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002060 intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02002061
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002062 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2063 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2064 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2065 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2066
David Weinehall36cdd012016-08-22 13:59:31 +03002067 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002068 seq_printf(m, "DDC = 0x%08x\n",
2069 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01002070 seq_printf(m, "DDC2 = 0x%08x\n",
2071 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002072 seq_printf(m, "C0DRB3 = 0x%04x\n",
2073 I915_READ16(C0DRB3));
2074 seq_printf(m, "C1DRB3 = 0x%04x\n",
2075 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03002076 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002077 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2078 I915_READ(MAD_DIMM_C0));
2079 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2080 I915_READ(MAD_DIMM_C1));
2081 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2082 I915_READ(MAD_DIMM_C2));
2083 seq_printf(m, "TILECTL = 0x%08x\n",
2084 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03002085 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07002086 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2087 I915_READ(GAMTARBMODE));
2088 else
2089 seq_printf(m, "ARB_MODE = 0x%08x\n",
2090 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002091 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2092 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002093 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01002094
2095 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2096 seq_puts(m, "L-shaped memory detected\n");
2097
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002098 intel_runtime_pm_put(dev_priv);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002099
2100 return 0;
2101}
2102
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002103static int per_file_ctx(int id, void *ptr, void *data)
2104{
Chris Wilsone2efd132016-05-24 14:53:34 +01002105 struct i915_gem_context *ctx = ptr;
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002106 struct seq_file *m = data;
Daniel Vetterae6c4802014-08-06 15:04:53 +02002107 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2108
2109 if (!ppgtt) {
2110 seq_printf(m, " no ppgtt for context %d\n",
2111 ctx->user_handle);
2112 return 0;
2113 }
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002114
Oscar Mateof83d6512014-05-22 14:13:38 +01002115 if (i915_gem_context_is_default(ctx))
2116 seq_puts(m, " default context:\n");
2117 else
Oscar Mateo821d66d2014-07-03 16:28:00 +01002118 seq_printf(m, " context %d:\n", ctx->user_handle);
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002119 ppgtt->debug_dump(ppgtt, m);
2120
2121 return 0;
2122}
2123
David Weinehall36cdd012016-08-22 13:59:31 +03002124static void gen8_ppgtt_info(struct seq_file *m,
2125 struct drm_i915_private *dev_priv)
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002126{
Ben Widawsky77df6772013-11-02 21:07:30 -07002127 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Akash Goel3b3f1652016-10-13 22:44:48 +05302128 struct intel_engine_cs *engine;
2129 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002130 int i;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002131
Ben Widawsky77df6772013-11-02 21:07:30 -07002132 if (!ppgtt)
2133 return;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002134
Akash Goel3b3f1652016-10-13 22:44:48 +05302135 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002136 seq_printf(m, "%s\n", engine->name);
Ben Widawsky77df6772013-11-02 21:07:30 -07002137 for (i = 0; i < 4; i++) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002138 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
Ben Widawsky77df6772013-11-02 21:07:30 -07002139 pdp <<= 32;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002140 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
Ville Syrjäläa2a5b152014-03-31 18:17:16 +03002141 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
Ben Widawsky77df6772013-11-02 21:07:30 -07002142 }
2143 }
2144}
2145
David Weinehall36cdd012016-08-22 13:59:31 +03002146static void gen6_ppgtt_info(struct seq_file *m,
2147 struct drm_i915_private *dev_priv)
Ben Widawsky77df6772013-11-02 21:07:30 -07002148{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002149 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302150 enum intel_engine_id id;
Ben Widawsky77df6772013-11-02 21:07:30 -07002151
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002152 if (IS_GEN6(dev_priv))
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002153 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2154
Akash Goel3b3f1652016-10-13 22:44:48 +05302155 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002156 seq_printf(m, "%s\n", engine->name);
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002157 if (IS_GEN7(dev_priv))
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002158 seq_printf(m, "GFX_MODE: 0x%08x\n",
2159 I915_READ(RING_MODE_GEN7(engine)));
2160 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2161 I915_READ(RING_PP_DIR_BASE(engine)));
2162 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2163 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2164 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2165 I915_READ(RING_PP_DIR_DCLV(engine)));
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002166 }
2167 if (dev_priv->mm.aliasing_ppgtt) {
2168 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2169
Damien Lespiau267f0c92013-06-24 22:59:48 +01002170 seq_puts(m, "aliasing PPGTT:\n");
Mika Kuoppala44159dd2015-06-25 18:35:07 +03002171 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002172
Ben Widawsky87d60b62013-12-06 14:11:29 -08002173 ppgtt->debug_dump(ppgtt, m);
Daniel Vetterae6c4802014-08-06 15:04:53 +02002174 }
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002175
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002176 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
Ben Widawsky77df6772013-11-02 21:07:30 -07002177}
2178
2179static int i915_ppgtt_info(struct seq_file *m, void *data)
2180{
David Weinehall36cdd012016-08-22 13:59:31 +03002181 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2182 struct drm_device *dev = &dev_priv->drm;
Michel Thierryea91e402015-07-29 17:23:57 +01002183 struct drm_file *file;
Chris Wilson637ee292016-08-22 14:28:20 +01002184 int ret;
Ben Widawsky77df6772013-11-02 21:07:30 -07002185
Chris Wilson637ee292016-08-22 14:28:20 +01002186 mutex_lock(&dev->filelist_mutex);
2187 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawsky77df6772013-11-02 21:07:30 -07002188 if (ret)
Chris Wilson637ee292016-08-22 14:28:20 +01002189 goto out_unlock;
2190
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002191 intel_runtime_pm_get(dev_priv);
Ben Widawsky77df6772013-11-02 21:07:30 -07002192
David Weinehall36cdd012016-08-22 13:59:31 +03002193 if (INTEL_GEN(dev_priv) >= 8)
2194 gen8_ppgtt_info(m, dev_priv);
2195 else if (INTEL_GEN(dev_priv) >= 6)
2196 gen6_ppgtt_info(m, dev_priv);
Ben Widawsky77df6772013-11-02 21:07:30 -07002197
Michel Thierryea91e402015-07-29 17:23:57 +01002198 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2199 struct drm_i915_file_private *file_priv = file->driver_priv;
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002200 struct task_struct *task;
Michel Thierryea91e402015-07-29 17:23:57 +01002201
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002202 task = get_pid_task(file->pid, PIDTYPE_PID);
Dan Carpenter06812762015-10-02 18:14:22 +03002203 if (!task) {
2204 ret = -ESRCH;
Chris Wilson637ee292016-08-22 14:28:20 +01002205 goto out_rpm;
Dan Carpenter06812762015-10-02 18:14:22 +03002206 }
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002207 seq_printf(m, "\nproc: %s\n", task->comm);
2208 put_task_struct(task);
Michel Thierryea91e402015-07-29 17:23:57 +01002209 idr_for_each(&file_priv->context_idr, per_file_ctx,
2210 (void *)(unsigned long)m);
2211 }
2212
Chris Wilson637ee292016-08-22 14:28:20 +01002213out_rpm:
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002214 intel_runtime_pm_put(dev_priv);
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002215 mutex_unlock(&dev->struct_mutex);
Chris Wilson637ee292016-08-22 14:28:20 +01002216out_unlock:
2217 mutex_unlock(&dev->filelist_mutex);
Dan Carpenter06812762015-10-02 18:14:22 +03002218 return ret;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002219}
2220
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002221static int count_irq_waiters(struct drm_i915_private *i915)
2222{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002223 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302224 enum intel_engine_id id;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002225 int count = 0;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002226
Akash Goel3b3f1652016-10-13 22:44:48 +05302227 for_each_engine(engine, i915, id)
Chris Wilson688e6c72016-07-01 17:23:15 +01002228 count += intel_engine_has_waiter(engine);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002229
2230 return count;
2231}
2232
Chris Wilson7466c292016-08-15 09:49:33 +01002233static const char *rps_power_to_str(unsigned int power)
2234{
2235 static const char * const strings[] = {
2236 [LOW_POWER] = "low power",
2237 [BETWEEN] = "mixed",
2238 [HIGH_POWER] = "high power",
2239 };
2240
2241 if (power >= ARRAY_SIZE(strings) || !strings[power])
2242 return "unknown";
2243
2244 return strings[power];
2245}
2246
Chris Wilson1854d5c2015-04-07 16:20:32 +01002247static int i915_rps_boost_info(struct seq_file *m, void *data)
2248{
David Weinehall36cdd012016-08-22 13:59:31 +03002249 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2250 struct drm_device *dev = &dev_priv->drm;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002251 struct drm_file *file;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002252
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002253 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
Chris Wilson28176ef2016-10-28 13:58:56 +01002254 seq_printf(m, "GPU busy? %s [%d requests]\n",
2255 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002256 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
Chris Wilson7466c292016-08-15 09:49:33 +01002257 seq_printf(m, "Frequency requested %d\n",
2258 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
2259 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002260 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
2261 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
2262 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
2263 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002264 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
2265 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
2266 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
2267 intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002268
2269 mutex_lock(&dev->filelist_mutex);
Chris Wilson8d3afd72015-05-21 21:01:47 +01002270 spin_lock(&dev_priv->rps.client_lock);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002271 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2272 struct drm_i915_file_private *file_priv = file->driver_priv;
2273 struct task_struct *task;
2274
2275 rcu_read_lock();
2276 task = pid_task(file->pid, PIDTYPE_PID);
2277 seq_printf(m, "%s [%d]: %d boosts%s\n",
2278 task ? task->comm : "<unknown>",
2279 task ? task->pid : -1,
Chris Wilson2e1b8732015-04-27 13:41:22 +01002280 file_priv->rps.boosts,
2281 list_empty(&file_priv->rps.link) ? "" : ", active");
Chris Wilson1854d5c2015-04-07 16:20:32 +01002282 rcu_read_unlock();
2283 }
Chris Wilson197be2a2016-07-20 09:21:13 +01002284 seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts);
Chris Wilson8d3afd72015-05-21 21:01:47 +01002285 spin_unlock(&dev_priv->rps.client_lock);
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002286 mutex_unlock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002287
Chris Wilson7466c292016-08-15 09:49:33 +01002288 if (INTEL_GEN(dev_priv) >= 6 &&
2289 dev_priv->rps.enabled &&
Chris Wilson28176ef2016-10-28 13:58:56 +01002290 dev_priv->gt.active_requests) {
Chris Wilson7466c292016-08-15 09:49:33 +01002291 u32 rpup, rpupei;
2292 u32 rpdown, rpdownei;
2293
2294 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2295 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2296 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2297 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2298 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2299 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2300
2301 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2302 rps_power_to_str(dev_priv->rps.power));
2303 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
2304 100 * rpup / rpupei,
2305 dev_priv->rps.up_threshold);
2306 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
2307 100 * rpdown / rpdownei,
2308 dev_priv->rps.down_threshold);
2309 } else {
2310 seq_puts(m, "\nRPS Autotuning inactive\n");
2311 }
2312
Chris Wilson8d3afd72015-05-21 21:01:47 +01002313 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002314}
2315
Ben Widawsky63573eb2013-07-04 11:02:07 -07002316static int i915_llc(struct seq_file *m, void *data)
2317{
David Weinehall36cdd012016-08-22 13:59:31 +03002318 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002319 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002320
David Weinehall36cdd012016-08-22 13:59:31 +03002321 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002322 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2323 intel_uncore_edram_size(dev_priv)/1024/1024);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002324
2325 return 0;
2326}
2327
Alex Daifdf5d352015-08-12 15:43:37 +01002328static int i915_guc_load_status_info(struct seq_file *m, void *data)
2329{
David Weinehall36cdd012016-08-22 13:59:31 +03002330 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Alex Daifdf5d352015-08-12 15:43:37 +01002331 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
2332 u32 tmp, i;
2333
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002334 if (!HAS_GUC_UCODE(dev_priv))
Alex Daifdf5d352015-08-12 15:43:37 +01002335 return 0;
2336
2337 seq_printf(m, "GuC firmware status:\n");
2338 seq_printf(m, "\tpath: %s\n",
2339 guc_fw->guc_fw_path);
2340 seq_printf(m, "\tfetch: %s\n",
2341 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
2342 seq_printf(m, "\tload: %s\n",
2343 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
2344 seq_printf(m, "\tversion wanted: %d.%d\n",
2345 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
2346 seq_printf(m, "\tversion found: %d.%d\n",
2347 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
Alex Daifeda33e2015-10-19 16:10:54 -07002348 seq_printf(m, "\theader: offset is %d; size = %d\n",
2349 guc_fw->header_offset, guc_fw->header_size);
2350 seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2351 guc_fw->ucode_offset, guc_fw->ucode_size);
2352 seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2353 guc_fw->rsa_offset, guc_fw->rsa_size);
Alex Daifdf5d352015-08-12 15:43:37 +01002354
2355 tmp = I915_READ(GUC_STATUS);
2356
2357 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2358 seq_printf(m, "\tBootrom status = 0x%x\n",
2359 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2360 seq_printf(m, "\tuKernel status = 0x%x\n",
2361 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2362 seq_printf(m, "\tMIA Core status = 0x%x\n",
2363 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2364 seq_puts(m, "\nScratch registers:\n");
2365 for (i = 0; i < 16; i++)
2366 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2367
2368 return 0;
2369}
2370
Akash Goel5aa1ee42016-10-12 21:54:36 +05302371static void i915_guc_log_info(struct seq_file *m,
2372 struct drm_i915_private *dev_priv)
2373{
2374 struct intel_guc *guc = &dev_priv->guc;
2375
2376 seq_puts(m, "\nGuC logging stats:\n");
2377
2378 seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n",
2379 guc->log.flush_count[GUC_ISR_LOG_BUFFER],
2380 guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
2381
2382 seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n",
2383 guc->log.flush_count[GUC_DPC_LOG_BUFFER],
2384 guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
2385
2386 seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
2387 guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
2388 guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
2389
2390 seq_printf(m, "\tTotal flush interrupt count: %u\n",
2391 guc->log.flush_interrupt_count);
2392
2393 seq_printf(m, "\tCapture miss count: %u\n",
2394 guc->log.capture_miss_count);
2395}
2396
Dave Gordon8b417c22015-08-12 15:43:44 +01002397static void i915_guc_client_info(struct seq_file *m,
2398 struct drm_i915_private *dev_priv,
2399 struct i915_guc_client *client)
2400{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002401 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002402 enum intel_engine_id id;
Dave Gordon8b417c22015-08-12 15:43:44 +01002403 uint64_t tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002404
2405 seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
2406 client->priority, client->ctx_index, client->proc_desc_offset);
2407 seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
Chris Wilson357248b2016-11-29 12:10:21 +00002408 client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
Dave Gordon8b417c22015-08-12 15:43:44 +01002409 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2410 client->wq_size, client->wq_offset, client->wq_tail);
2411
Dave Gordon551aaec2016-05-13 15:36:33 +01002412 seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
Dave Gordon8b417c22015-08-12 15:43:44 +01002413 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
2414 seq_printf(m, "\tLast submission result: %d\n", client->retcode);
2415
Akash Goel3b3f1652016-10-13 22:44:48 +05302416 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002417 u64 submissions = client->submissions[id];
2418 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002419 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002420 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002421 }
2422 seq_printf(m, "\tTotal: %llu\n", tot);
2423}
2424
2425static int i915_guc_info(struct seq_file *m, void *data)
2426{
David Weinehall36cdd012016-08-22 13:59:31 +03002427 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002428 const struct intel_guc *guc = &dev_priv->guc;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002429 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002430 enum intel_engine_id id;
Chris Wilson334636c2016-11-29 12:10:20 +00002431 u64 total;
Dave Gordon8b417c22015-08-12 15:43:44 +01002432
Chris Wilson334636c2016-11-29 12:10:20 +00002433 if (!guc->execbuf_client) {
2434 seq_printf(m, "GuC submission %s\n",
2435 HAS_GUC_SCHED(dev_priv) ?
2436 "disabled" :
2437 "not supported");
Dave Gordon8b417c22015-08-12 15:43:44 +01002438 return 0;
Chris Wilson334636c2016-11-29 12:10:20 +00002439 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002440
Dave Gordon9636f6d2016-06-13 17:57:28 +01002441 seq_printf(m, "Doorbell map:\n");
Chris Wilson334636c2016-11-29 12:10:20 +00002442 seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc->doorbell_bitmap);
2443 seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002444
Chris Wilson334636c2016-11-29 12:10:20 +00002445 seq_printf(m, "GuC total action count: %llu\n", guc->action_count);
2446 seq_printf(m, "GuC action failure count: %u\n", guc->action_fail);
2447 seq_printf(m, "GuC last action command: 0x%x\n", guc->action_cmd);
2448 seq_printf(m, "GuC last action status: 0x%x\n", guc->action_status);
2449 seq_printf(m, "GuC last action error code: %d\n", guc->action_err);
Dave Gordon8b417c22015-08-12 15:43:44 +01002450
Chris Wilson334636c2016-11-29 12:10:20 +00002451 total = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002452 seq_printf(m, "\nGuC submissions:\n");
Akash Goel3b3f1652016-10-13 22:44:48 +05302453 for_each_engine(engine, dev_priv, id) {
Chris Wilson334636c2016-11-29 12:10:20 +00002454 u64 submissions = guc->submissions[id];
Dave Gordonc18468c2016-08-09 15:19:22 +01002455 total += submissions;
Alex Dai397097b2016-01-23 11:58:14 -08002456 seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
Chris Wilson334636c2016-11-29 12:10:20 +00002457 engine->name, submissions, guc->last_seqno[id]);
Dave Gordon8b417c22015-08-12 15:43:44 +01002458 }
2459 seq_printf(m, "\t%s: %llu\n", "Total", total);
2460
Chris Wilson334636c2016-11-29 12:10:20 +00002461 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2462 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002463
Akash Goel5aa1ee42016-10-12 21:54:36 +05302464 i915_guc_log_info(m, dev_priv);
2465
Dave Gordon8b417c22015-08-12 15:43:44 +01002466 /* Add more as required ... */
2467
2468 return 0;
2469}
2470
Alex Dai4c7e77f2015-08-12 15:43:40 +01002471static int i915_guc_log_dump(struct seq_file *m, void *data)
2472{
David Weinehall36cdd012016-08-22 13:59:31 +03002473 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson8b797af2016-08-15 10:48:51 +01002474 struct drm_i915_gem_object *obj;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002475 int i = 0, pg;
2476
Akash Goeld6b40b42016-10-12 21:54:29 +05302477 if (!dev_priv->guc.log.vma)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002478 return 0;
2479
Akash Goeld6b40b42016-10-12 21:54:29 +05302480 obj = dev_priv->guc.log.vma->obj;
Chris Wilson8b797af2016-08-15 10:48:51 +01002481 for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) {
2482 u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg));
Alex Dai4c7e77f2015-08-12 15:43:40 +01002483
2484 for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
2485 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2486 *(log + i), *(log + i + 1),
2487 *(log + i + 2), *(log + i + 3));
2488
2489 kunmap_atomic(log);
2490 }
2491
2492 seq_putc(m, '\n');
2493
2494 return 0;
2495}
2496
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302497static int i915_guc_log_control_get(void *data, u64 *val)
2498{
2499 struct drm_device *dev = data;
2500 struct drm_i915_private *dev_priv = to_i915(dev);
2501
2502 if (!dev_priv->guc.log.vma)
2503 return -EINVAL;
2504
2505 *val = i915.guc_log_level;
2506
2507 return 0;
2508}
2509
2510static int i915_guc_log_control_set(void *data, u64 val)
2511{
2512 struct drm_device *dev = data;
2513 struct drm_i915_private *dev_priv = to_i915(dev);
2514 int ret;
2515
2516 if (!dev_priv->guc.log.vma)
2517 return -EINVAL;
2518
2519 ret = mutex_lock_interruptible(&dev->struct_mutex);
2520 if (ret)
2521 return ret;
2522
2523 intel_runtime_pm_get(dev_priv);
2524 ret = i915_guc_log_control(dev_priv, val);
2525 intel_runtime_pm_put(dev_priv);
2526
2527 mutex_unlock(&dev->struct_mutex);
2528 return ret;
2529}
2530
2531DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
2532 i915_guc_log_control_get, i915_guc_log_control_set,
2533 "%lld\n");
2534
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002535static int i915_edp_psr_status(struct seq_file *m, void *data)
2536{
David Weinehall36cdd012016-08-22 13:59:31 +03002537 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Rodrigo Vivia031d702013-10-03 16:15:06 -03002538 u32 psrperf = 0;
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002539 u32 stat[3];
2540 enum pipe pipe;
Rodrigo Vivia031d702013-10-03 16:15:06 -03002541 bool enabled = false;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002542
David Weinehall36cdd012016-08-22 13:59:31 +03002543 if (!HAS_PSR(dev_priv)) {
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002544 seq_puts(m, "PSR not supported\n");
2545 return 0;
2546 }
2547
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002548 intel_runtime_pm_get(dev_priv);
2549
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002550 mutex_lock(&dev_priv->psr.lock);
Rodrigo Vivia031d702013-10-03 16:15:06 -03002551 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2552 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
Daniel Vetter2807cf62014-07-11 10:30:11 -07002553 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
Rodrigo Vivi5755c782014-06-12 10:16:45 -07002554 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002555 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2556 dev_priv->psr.busy_frontbuffer_bits);
2557 seq_printf(m, "Re-enable work scheduled: %s\n",
2558 yesno(work_busy(&dev_priv->psr.work.work)));
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002559
Nagaraju, Vathsala7e3eb592016-12-09 23:42:09 +05302560 if (HAS_DDI(dev_priv)) {
2561 if (dev_priv->psr.psr2_support)
2562 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2563 else
2564 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2565 } else {
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002566 for_each_pipe(dev_priv, pipe) {
Chris Wilson9c870d02016-10-24 13:42:15 +01002567 enum transcoder cpu_transcoder =
2568 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
2569 enum intel_display_power_domain power_domain;
2570
2571 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
2572 if (!intel_display_power_get_if_enabled(dev_priv,
2573 power_domain))
2574 continue;
2575
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002576 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2577 VLV_EDP_PSR_CURR_STATE_MASK;
2578 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2579 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2580 enabled = true;
Chris Wilson9c870d02016-10-24 13:42:15 +01002581
2582 intel_display_power_put(dev_priv, power_domain);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002583 }
2584 }
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002585
2586 seq_printf(m, "Main link in standby mode: %s\n",
2587 yesno(dev_priv->psr.link_standby));
2588
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002589 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002590
David Weinehall36cdd012016-08-22 13:59:31 +03002591 if (!HAS_DDI(dev_priv))
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002592 for_each_pipe(dev_priv, pipe) {
2593 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2594 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2595 seq_printf(m, " pipe %c", pipe_name(pipe));
2596 }
2597 seq_puts(m, "\n");
2598
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002599 /*
2600 * VLV/CHV PSR has no kind of performance counter
2601 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2602 */
David Weinehall36cdd012016-08-22 13:59:31 +03002603 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Ville Syrjälä443a3892015-11-11 20:34:15 +02002604 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
Rodrigo Vivia031d702013-10-03 16:15:06 -03002605 EDP_PSR_PERF_CNT_MASK;
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002606
2607 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2608 }
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002609 mutex_unlock(&dev_priv->psr.lock);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002610
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002611 intel_runtime_pm_put(dev_priv);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002612 return 0;
2613}
2614
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002615static int i915_sink_crc(struct seq_file *m, void *data)
2616{
David Weinehall36cdd012016-08-22 13:59:31 +03002617 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2618 struct drm_device *dev = &dev_priv->drm;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002619 struct intel_connector *connector;
2620 struct intel_dp *intel_dp = NULL;
2621 int ret;
2622 u8 crc[6];
2623
2624 drm_modeset_lock_all(dev);
Rodrigo Viviaca5e362015-03-13 16:13:59 -07002625 for_each_intel_connector(dev, connector) {
Maarten Lankhorst26c17cf2016-06-20 15:57:38 +02002626 struct drm_crtc *crtc;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002627
Maarten Lankhorst26c17cf2016-06-20 15:57:38 +02002628 if (!connector->base.state->best_encoder)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002629 continue;
2630
Maarten Lankhorst26c17cf2016-06-20 15:57:38 +02002631 crtc = connector->base.state->crtc;
2632 if (!crtc->state->active)
Paulo Zanonib6ae3c72014-02-13 17:51:33 -02002633 continue;
2634
Maarten Lankhorst26c17cf2016-06-20 15:57:38 +02002635 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002636 continue;
2637
Maarten Lankhorst26c17cf2016-06-20 15:57:38 +02002638 intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002639
2640 ret = intel_dp_sink_crc(intel_dp, crc);
2641 if (ret)
2642 goto out;
2643
2644 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2645 crc[0], crc[1], crc[2],
2646 crc[3], crc[4], crc[5]);
2647 goto out;
2648 }
2649 ret = -ENODEV;
2650out:
2651 drm_modeset_unlock_all(dev);
2652 return ret;
2653}
2654
Jesse Barnesec013e72013-08-20 10:29:23 +01002655static int i915_energy_uJ(struct seq_file *m, void *data)
2656{
David Weinehall36cdd012016-08-22 13:59:31 +03002657 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Jesse Barnesec013e72013-08-20 10:29:23 +01002658 u64 power;
2659 u32 units;
2660
David Weinehall36cdd012016-08-22 13:59:31 +03002661 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002662 return -ENODEV;
2663
Paulo Zanoni36623ef2014-02-21 13:52:23 -03002664 intel_runtime_pm_get(dev_priv);
2665
Jesse Barnesec013e72013-08-20 10:29:23 +01002666 rdmsrl(MSR_RAPL_POWER_UNIT, power);
2667 power = (power & 0x1f00) >> 8;
2668 units = 1000000 / (1 << power); /* convert to uJ */
2669 power = I915_READ(MCH_SECP_NRG_STTS);
2670 power *= units;
2671
Paulo Zanoni36623ef2014-02-21 13:52:23 -03002672 intel_runtime_pm_put(dev_priv);
2673
Jesse Barnesec013e72013-08-20 10:29:23 +01002674 seq_printf(m, "%llu", (long long unsigned)power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002675
2676 return 0;
2677}
2678
Damien Lespiau6455c872015-06-04 18:23:57 +01002679static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002680{
David Weinehall36cdd012016-08-22 13:59:31 +03002681 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002682 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002683
Chris Wilsona156e642016-04-03 14:14:21 +01002684 if (!HAS_RUNTIME_PM(dev_priv))
2685 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002686
Chris Wilson67d97da2016-07-04 08:08:31 +01002687 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
Paulo Zanoni371db662013-08-19 13:18:10 -03002688 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002689 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002690#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002691 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002692 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002693#else
2694 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2695#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002696 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002697 pci_power_name(pdev->current_state),
2698 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002699
Jesse Barnesec013e72013-08-20 10:29:23 +01002700 return 0;
2701}
2702
Imre Deak1da51582013-11-25 17:15:35 +02002703static int i915_power_domain_info(struct seq_file *m, void *unused)
2704{
David Weinehall36cdd012016-08-22 13:59:31 +03002705 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002706 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2707 int i;
2708
2709 mutex_lock(&power_domains->lock);
2710
2711 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2712 for (i = 0; i < power_domains->power_well_count; i++) {
2713 struct i915_power_well *power_well;
2714 enum intel_display_power_domain power_domain;
2715
2716 power_well = &power_domains->power_wells[i];
2717 seq_printf(m, "%-25s %d\n", power_well->name,
2718 power_well->count);
2719
2720 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2721 power_domain++) {
2722 if (!(BIT(power_domain) & power_well->domains))
2723 continue;
2724
2725 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002726 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002727 power_domains->domain_use_count[power_domain]);
2728 }
2729 }
2730
2731 mutex_unlock(&power_domains->lock);
2732
2733 return 0;
2734}
2735
Damien Lespiaub7cec662015-10-27 14:47:01 +02002736static int i915_dmc_info(struct seq_file *m, void *unused)
2737{
David Weinehall36cdd012016-08-22 13:59:31 +03002738 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Damien Lespiaub7cec662015-10-27 14:47:01 +02002739 struct intel_csr *csr;
2740
David Weinehall36cdd012016-08-22 13:59:31 +03002741 if (!HAS_CSR(dev_priv)) {
Damien Lespiaub7cec662015-10-27 14:47:01 +02002742 seq_puts(m, "not supported\n");
2743 return 0;
2744 }
2745
2746 csr = &dev_priv->csr;
2747
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002748 intel_runtime_pm_get(dev_priv);
2749
Damien Lespiaub7cec662015-10-27 14:47:01 +02002750 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2751 seq_printf(m, "path: %s\n", csr->fw_path);
2752
2753 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002754 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002755
2756 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2757 CSR_VERSION_MINOR(csr->version));
2758
David Weinehall36cdd012016-08-22 13:59:31 +03002759 if (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6)) {
Damien Lespiau83372062015-10-30 17:53:32 +02002760 seq_printf(m, "DC3 -> DC5 count: %d\n",
2761 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2762 seq_printf(m, "DC5 -> DC6 count: %d\n",
2763 I915_READ(SKL_CSR_DC5_DC6_COUNT));
David Weinehall36cdd012016-08-22 13:59:31 +03002764 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
Mika Kuoppala16e11b92015-10-27 14:47:03 +02002765 seq_printf(m, "DC3 -> DC5 count: %d\n",
2766 I915_READ(BXT_CSR_DC3_DC5_COUNT));
Damien Lespiau83372062015-10-30 17:53:32 +02002767 }
2768
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002769out:
2770 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2771 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2772 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2773
Damien Lespiau83372062015-10-30 17:53:32 +02002774 intel_runtime_pm_put(dev_priv);
2775
Damien Lespiaub7cec662015-10-27 14:47:01 +02002776 return 0;
2777}
2778
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002779static void intel_seq_print_mode(struct seq_file *m, int tabs,
2780 struct drm_display_mode *mode)
2781{
2782 int i;
2783
2784 for (i = 0; i < tabs; i++)
2785 seq_putc(m, '\t');
2786
2787 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2788 mode->base.id, mode->name,
2789 mode->vrefresh, mode->clock,
2790 mode->hdisplay, mode->hsync_start,
2791 mode->hsync_end, mode->htotal,
2792 mode->vdisplay, mode->vsync_start,
2793 mode->vsync_end, mode->vtotal,
2794 mode->type, mode->flags);
2795}
2796
2797static void intel_encoder_info(struct seq_file *m,
2798 struct intel_crtc *intel_crtc,
2799 struct intel_encoder *intel_encoder)
2800{
David Weinehall36cdd012016-08-22 13:59:31 +03002801 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2802 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002803 struct drm_crtc *crtc = &intel_crtc->base;
2804 struct intel_connector *intel_connector;
2805 struct drm_encoder *encoder;
2806
2807 encoder = &intel_encoder->base;
2808 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002809 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002810 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2811 struct drm_connector *connector = &intel_connector->base;
2812 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2813 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03002814 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002815 drm_get_connector_status_name(connector->status));
2816 if (connector->status == connector_status_connected) {
2817 struct drm_display_mode *mode = &crtc->mode;
2818 seq_printf(m, ", mode:\n");
2819 intel_seq_print_mode(m, 2, mode);
2820 } else {
2821 seq_putc(m, '\n');
2822 }
2823 }
2824}
2825
2826static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2827{
David Weinehall36cdd012016-08-22 13:59:31 +03002828 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2829 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002830 struct drm_crtc *crtc = &intel_crtc->base;
2831 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002832 struct drm_plane_state *plane_state = crtc->primary->state;
2833 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002834
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002835 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07002836 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002837 fb->base.id, plane_state->src_x >> 16,
2838 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07002839 else
2840 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002841 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2842 intel_encoder_info(m, intel_crtc, intel_encoder);
2843}
2844
2845static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2846{
2847 struct drm_display_mode *mode = panel->fixed_mode;
2848
2849 seq_printf(m, "\tfixed mode:\n");
2850 intel_seq_print_mode(m, 2, mode);
2851}
2852
2853static void intel_dp_info(struct seq_file *m,
2854 struct intel_connector *intel_connector)
2855{
2856 struct intel_encoder *intel_encoder = intel_connector->encoder;
2857 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2858
2859 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03002860 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02002861 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002862 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03002863
2864 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2865 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002866}
2867
Libin Yang9a148a92016-11-28 20:07:05 +08002868static void intel_dp_mst_info(struct seq_file *m,
2869 struct intel_connector *intel_connector)
2870{
2871 struct intel_encoder *intel_encoder = intel_connector->encoder;
2872 struct intel_dp_mst_encoder *intel_mst =
2873 enc_to_mst(&intel_encoder->base);
2874 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2875 struct intel_dp *intel_dp = &intel_dig_port->dp;
2876 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2877 intel_connector->port);
2878
2879 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2880}
2881
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002882static void intel_hdmi_info(struct seq_file *m,
2883 struct intel_connector *intel_connector)
2884{
2885 struct intel_encoder *intel_encoder = intel_connector->encoder;
2886 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2887
Jani Nikula742f4912015-09-03 11:16:09 +03002888 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002889}
2890
2891static void intel_lvds_info(struct seq_file *m,
2892 struct intel_connector *intel_connector)
2893{
2894 intel_panel_info(m, &intel_connector->panel);
2895}
2896
2897static void intel_connector_info(struct seq_file *m,
2898 struct drm_connector *connector)
2899{
2900 struct intel_connector *intel_connector = to_intel_connector(connector);
2901 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08002902 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002903
2904 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03002905 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002906 drm_get_connector_status_name(connector->status));
2907 if (connector->status == connector_status_connected) {
2908 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2909 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2910 connector->display_info.width_mm,
2911 connector->display_info.height_mm);
2912 seq_printf(m, "\tsubpixel order: %s\n",
2913 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2914 seq_printf(m, "\tCEA rev: %d\n",
2915 connector->display_info.cea_rev);
2916 }
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002917
2918 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
2919 return;
2920
2921 switch (connector->connector_type) {
2922 case DRM_MODE_CONNECTOR_DisplayPort:
2923 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08002924 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2925 intel_dp_mst_info(m, intel_connector);
2926 else
2927 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002928 break;
2929 case DRM_MODE_CONNECTOR_LVDS:
2930 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10002931 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002932 break;
2933 case DRM_MODE_CONNECTOR_HDMIA:
2934 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2935 intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
2936 intel_hdmi_info(m, intel_connector);
2937 break;
2938 default:
2939 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10002940 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002941
Jesse Barnesf103fc72014-02-20 12:39:57 -08002942 seq_printf(m, "\tmodes:\n");
2943 list_for_each_entry(mode, &connector->modes, head)
2944 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002945}
2946
David Weinehall36cdd012016-08-22 13:59:31 +03002947static bool cursor_active(struct drm_i915_private *dev_priv, int pipe)
Chris Wilson065f2ec2014-03-12 09:13:13 +00002948{
Chris Wilson065f2ec2014-03-12 09:13:13 +00002949 u32 state;
2950
Jani Nikula2a307c22016-11-30 17:43:04 +02002951 if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
Ville Syrjälä0b87c242015-09-22 19:47:51 +03002952 state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
Chris Wilson065f2ec2014-03-12 09:13:13 +00002953 else
Ville Syrjälä5efb3e22014-04-09 13:28:53 +03002954 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
Chris Wilson065f2ec2014-03-12 09:13:13 +00002955
2956 return state;
2957}
2958
David Weinehall36cdd012016-08-22 13:59:31 +03002959static bool cursor_position(struct drm_i915_private *dev_priv,
2960 int pipe, int *x, int *y)
Chris Wilson065f2ec2014-03-12 09:13:13 +00002961{
Chris Wilson065f2ec2014-03-12 09:13:13 +00002962 u32 pos;
2963
Ville Syrjälä5efb3e22014-04-09 13:28:53 +03002964 pos = I915_READ(CURPOS(pipe));
Chris Wilson065f2ec2014-03-12 09:13:13 +00002965
2966 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2967 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2968 *x = -*x;
2969
2970 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2971 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2972 *y = -*y;
2973
David Weinehall36cdd012016-08-22 13:59:31 +03002974 return cursor_active(dev_priv, pipe);
Chris Wilson065f2ec2014-03-12 09:13:13 +00002975}
2976
Robert Fekete3abc4e02015-10-27 16:58:32 +01002977static const char *plane_type(enum drm_plane_type type)
2978{
2979 switch (type) {
2980 case DRM_PLANE_TYPE_OVERLAY:
2981 return "OVL";
2982 case DRM_PLANE_TYPE_PRIMARY:
2983 return "PRI";
2984 case DRM_PLANE_TYPE_CURSOR:
2985 return "CUR";
2986 /*
2987 * Deliberately omitting default: to generate compiler warnings
2988 * when a new drm_plane_type gets added.
2989 */
2990 }
2991
2992 return "unknown";
2993}
2994
2995static const char *plane_rotation(unsigned int rotation)
2996{
2997 static char buf[48];
2998 /*
2999 * According to doc only one DRM_ROTATE_ is allowed but this
3000 * will print them all to visualize if the values are misused
3001 */
3002 snprintf(buf, sizeof(buf),
3003 "%s%s%s%s%s%s(0x%08x)",
Joonas Lahtinen31ad61e2016-07-29 08:50:05 +03003004 (rotation & DRM_ROTATE_0) ? "0 " : "",
3005 (rotation & DRM_ROTATE_90) ? "90 " : "",
3006 (rotation & DRM_ROTATE_180) ? "180 " : "",
3007 (rotation & DRM_ROTATE_270) ? "270 " : "",
3008 (rotation & DRM_REFLECT_X) ? "FLIPX " : "",
3009 (rotation & DRM_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01003010 rotation);
3011
3012 return buf;
3013}
3014
3015static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3016{
David Weinehall36cdd012016-08-22 13:59:31 +03003017 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3018 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01003019 struct intel_plane *intel_plane;
3020
3021 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3022 struct drm_plane_state *state;
3023 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003024 struct drm_format_name_buf format_name;
Robert Fekete3abc4e02015-10-27 16:58:32 +01003025
3026 if (!plane->state) {
3027 seq_puts(m, "plane->state is NULL!\n");
3028 continue;
3029 }
3030
3031 state = plane->state;
3032
Eric Engestrom90844f02016-08-15 01:02:38 +01003033 if (state->fb) {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003034 drm_get_format_name(state->fb->pixel_format, &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01003035 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003036 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01003037 }
3038
Robert Fekete3abc4e02015-10-27 16:58:32 +01003039 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3040 plane->base.id,
3041 plane_type(intel_plane->base.type),
3042 state->crtc_x, state->crtc_y,
3043 state->crtc_w, state->crtc_h,
3044 (state->src_x >> 16),
3045 ((state->src_x & 0xffff) * 15625) >> 10,
3046 (state->src_y >> 16),
3047 ((state->src_y & 0xffff) * 15625) >> 10,
3048 (state->src_w >> 16),
3049 ((state->src_w & 0xffff) * 15625) >> 10,
3050 (state->src_h >> 16),
3051 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003052 format_name.str,
Robert Fekete3abc4e02015-10-27 16:58:32 +01003053 plane_rotation(state->rotation));
3054 }
3055}
3056
3057static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3058{
3059 struct intel_crtc_state *pipe_config;
3060 int num_scalers = intel_crtc->num_scalers;
3061 int i;
3062
3063 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3064
3065 /* Not all platformas have a scaler */
3066 if (num_scalers) {
3067 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3068 num_scalers,
3069 pipe_config->scaler_state.scaler_users,
3070 pipe_config->scaler_state.scaler_id);
3071
A.Sunil Kamath58415912016-11-20 23:20:26 +05303072 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01003073 struct intel_scaler *sc =
3074 &pipe_config->scaler_state.scalers[i];
3075
3076 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3077 i, yesno(sc->in_use), sc->mode);
3078 }
3079 seq_puts(m, "\n");
3080 } else {
3081 seq_puts(m, "\tNo scalers available on this platform\n");
3082 }
3083}
3084
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003085static int i915_display_info(struct seq_file *m, void *unused)
3086{
David Weinehall36cdd012016-08-22 13:59:31 +03003087 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3088 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec2014-03-12 09:13:13 +00003089 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003090 struct drm_connector *connector;
3091
Paulo Zanonib0e5ddf2014-04-01 14:55:10 -03003092 intel_runtime_pm_get(dev_priv);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003093 drm_modeset_lock_all(dev);
3094 seq_printf(m, "CRTC info\n");
3095 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01003096 for_each_intel_crtc(dev, crtc) {
Chris Wilson065f2ec2014-03-12 09:13:13 +00003097 bool active;
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003098 struct intel_crtc_state *pipe_config;
Chris Wilson065f2ec2014-03-12 09:13:13 +00003099 int x, y;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003100
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003101 pipe_config = to_intel_crtc_state(crtc->base.state);
3102
Robert Fekete3abc4e02015-10-27 16:58:32 +01003103 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec2014-03-12 09:13:13 +00003104 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003105 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01003106 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3107 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3108
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003109 if (pipe_config->base.active) {
Chris Wilson065f2ec2014-03-12 09:13:13 +00003110 intel_crtc_info(m, crtc);
3111
David Weinehall36cdd012016-08-22 13:59:31 +03003112 active = cursor_position(dev_priv, crtc->pipe, &x, &y);
Chris Wilson57127ef2014-07-04 08:20:11 +01003113 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
Chris Wilson4b0e3332014-05-30 16:35:26 +03003114 yesno(crtc->cursor_base),
Matt Roper3dd512f2015-02-27 10:12:00 -08003115 x, y, crtc->base.cursor->state->crtc_w,
3116 crtc->base.cursor->state->crtc_h,
Chris Wilson57127ef2014-07-04 08:20:11 +01003117 crtc->cursor_addr, yesno(active));
Robert Fekete3abc4e02015-10-27 16:58:32 +01003118 intel_scaler_info(m, crtc);
3119 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003120 }
Daniel Vettercace8412014-05-22 17:56:31 +02003121
3122 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3123 yesno(!crtc->cpu_fifo_underrun_disabled),
3124 yesno(!crtc->pch_fifo_underrun_disabled));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003125 }
3126
3127 seq_printf(m, "\n");
3128 seq_printf(m, "Connector info\n");
3129 seq_printf(m, "--------------\n");
3130 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3131 intel_connector_info(m, connector);
3132 }
3133 drm_modeset_unlock_all(dev);
Paulo Zanonib0e5ddf2014-04-01 14:55:10 -03003134 intel_runtime_pm_put(dev_priv);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003135
3136 return 0;
3137}
3138
Chris Wilson1b365952016-10-04 21:11:31 +01003139static int i915_engine_info(struct seq_file *m, void *unused)
3140{
3141 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3142 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05303143 enum intel_engine_id id;
Chris Wilson1b365952016-10-04 21:11:31 +01003144
Chris Wilson9c870d02016-10-24 13:42:15 +01003145 intel_runtime_pm_get(dev_priv);
3146
Akash Goel3b3f1652016-10-13 22:44:48 +05303147 for_each_engine(engine, dev_priv, id) {
Chris Wilson1b365952016-10-04 21:11:31 +01003148 struct intel_breadcrumbs *b = &engine->breadcrumbs;
3149 struct drm_i915_gem_request *rq;
3150 struct rb_node *rb;
3151 u64 addr;
3152
3153 seq_printf(m, "%s\n", engine->name);
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02003154 seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
Chris Wilson1b365952016-10-04 21:11:31 +01003155 intel_engine_get_seqno(engine),
Chris Wilsoncb399ea2016-11-01 10:03:16 +00003156 intel_engine_last_submit(engine),
Chris Wilson1b365952016-10-04 21:11:31 +01003157 engine->hangcheck.seqno,
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02003158 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
Chris Wilson1b365952016-10-04 21:11:31 +01003159
3160 rcu_read_lock();
3161
3162 seq_printf(m, "\tRequests:\n");
3163
Chris Wilson73cb9702016-10-28 13:58:46 +01003164 rq = list_first_entry(&engine->timeline->requests,
3165 struct drm_i915_gem_request, link);
3166 if (&rq->link != &engine->timeline->requests)
Chris Wilson1b365952016-10-04 21:11:31 +01003167 print_request(m, rq, "\t\tfirst ");
3168
Chris Wilson73cb9702016-10-28 13:58:46 +01003169 rq = list_last_entry(&engine->timeline->requests,
3170 struct drm_i915_gem_request, link);
3171 if (&rq->link != &engine->timeline->requests)
Chris Wilson1b365952016-10-04 21:11:31 +01003172 print_request(m, rq, "\t\tlast ");
3173
3174 rq = i915_gem_find_active_request(engine);
3175 if (rq) {
3176 print_request(m, rq, "\t\tactive ");
3177 seq_printf(m,
3178 "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
3179 rq->head, rq->postfix, rq->tail,
3180 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
3181 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
3182 }
3183
3184 seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
3185 I915_READ(RING_START(engine->mmio_base)),
3186 rq ? i915_ggtt_offset(rq->ring->vma) : 0);
3187 seq_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
3188 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
3189 rq ? rq->ring->head : 0);
3190 seq_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
3191 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
3192 rq ? rq->ring->tail : 0);
3193 seq_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
3194 I915_READ(RING_CTL(engine->mmio_base)),
3195 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
3196
3197 rcu_read_unlock();
3198
3199 addr = intel_engine_get_active_head(engine);
3200 seq_printf(m, "\tACTHD: 0x%08x_%08x\n",
3201 upper_32_bits(addr), lower_32_bits(addr));
3202 addr = intel_engine_get_last_batch_head(engine);
3203 seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
3204 upper_32_bits(addr), lower_32_bits(addr));
3205
3206 if (i915.enable_execlists) {
3207 u32 ptr, read, write;
Chris Wilson20311bd2016-11-14 20:41:03 +00003208 struct rb_node *rb;
Chris Wilson1b365952016-10-04 21:11:31 +01003209
3210 seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
3211 I915_READ(RING_EXECLIST_STATUS_LO(engine)),
3212 I915_READ(RING_EXECLIST_STATUS_HI(engine)));
3213
3214 ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
3215 read = GEN8_CSB_READ_PTR(ptr);
3216 write = GEN8_CSB_WRITE_PTR(ptr);
3217 seq_printf(m, "\tExeclist CSB read %d, write %d\n",
3218 read, write);
3219 if (read >= GEN8_CSB_ENTRIES)
3220 read = 0;
3221 if (write >= GEN8_CSB_ENTRIES)
3222 write = 0;
3223 if (read > write)
3224 write += GEN8_CSB_ENTRIES;
3225 while (read < write) {
3226 unsigned int idx = ++read % GEN8_CSB_ENTRIES;
3227
3228 seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
3229 idx,
3230 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
3231 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
3232 }
3233
3234 rcu_read_lock();
3235 rq = READ_ONCE(engine->execlist_port[0].request);
3236 if (rq)
3237 print_request(m, rq, "\t\tELSP[0] ");
3238 else
3239 seq_printf(m, "\t\tELSP[0] idle\n");
3240 rq = READ_ONCE(engine->execlist_port[1].request);
3241 if (rq)
3242 print_request(m, rq, "\t\tELSP[1] ");
3243 else
3244 seq_printf(m, "\t\tELSP[1] idle\n");
3245 rcu_read_unlock();
Chris Wilsonc8247c02016-10-27 01:03:43 +01003246
Chris Wilson663f71e2016-11-14 20:41:00 +00003247 spin_lock_irq(&engine->timeline->lock);
Chris Wilson20311bd2016-11-14 20:41:03 +00003248 for (rb = engine->execlist_first; rb; rb = rb_next(rb)) {
3249 rq = rb_entry(rb, typeof(*rq), priotree.node);
Chris Wilsonc8247c02016-10-27 01:03:43 +01003250 print_request(m, rq, "\t\tQ ");
3251 }
Chris Wilson663f71e2016-11-14 20:41:00 +00003252 spin_unlock_irq(&engine->timeline->lock);
Chris Wilson1b365952016-10-04 21:11:31 +01003253 } else if (INTEL_GEN(dev_priv) > 6) {
3254 seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
3255 I915_READ(RING_PP_DIR_BASE(engine)));
3256 seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
3257 I915_READ(RING_PP_DIR_BASE_READ(engine)));
3258 seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
3259 I915_READ(RING_PP_DIR_DCLV(engine)));
3260 }
3261
Chris Wilsonf6168e32016-10-28 13:58:55 +01003262 spin_lock_irq(&b->lock);
Chris Wilson1b365952016-10-04 21:11:31 +01003263 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
3264 struct intel_wait *w = container_of(rb, typeof(*w), node);
3265
3266 seq_printf(m, "\t%s [%d] waiting for %x\n",
3267 w->tsk->comm, w->tsk->pid, w->seqno);
3268 }
Chris Wilsonf6168e32016-10-28 13:58:55 +01003269 spin_unlock_irq(&b->lock);
Chris Wilson1b365952016-10-04 21:11:31 +01003270
3271 seq_puts(m, "\n");
3272 }
3273
Chris Wilson9c870d02016-10-24 13:42:15 +01003274 intel_runtime_pm_put(dev_priv);
3275
Chris Wilson1b365952016-10-04 21:11:31 +01003276 return 0;
3277}
3278
Ben Widawskye04934c2014-06-30 09:53:42 -07003279static int i915_semaphore_status(struct seq_file *m, void *unused)
3280{
David Weinehall36cdd012016-08-22 13:59:31 +03003281 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3282 struct drm_device *dev = &dev_priv->drm;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003283 struct intel_engine_cs *engine;
David Weinehall36cdd012016-08-22 13:59:31 +03003284 int num_rings = INTEL_INFO(dev_priv)->num_rings;
Dave Gordonc3232b12016-03-23 18:19:53 +00003285 enum intel_engine_id id;
3286 int j, ret;
Ben Widawskye04934c2014-06-30 09:53:42 -07003287
Chris Wilson39df9192016-07-20 13:31:57 +01003288 if (!i915.semaphores) {
Ben Widawskye04934c2014-06-30 09:53:42 -07003289 seq_puts(m, "Semaphores are disabled\n");
3290 return 0;
3291 }
3292
3293 ret = mutex_lock_interruptible(&dev->struct_mutex);
3294 if (ret)
3295 return ret;
Paulo Zanoni03872062014-07-09 14:31:57 -03003296 intel_runtime_pm_get(dev_priv);
Ben Widawskye04934c2014-06-30 09:53:42 -07003297
David Weinehall36cdd012016-08-22 13:59:31 +03003298 if (IS_BROADWELL(dev_priv)) {
Ben Widawskye04934c2014-06-30 09:53:42 -07003299 struct page *page;
3300 uint64_t *seqno;
3301
Chris Wilson51d545d2016-08-15 10:49:02 +01003302 page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
Ben Widawskye04934c2014-06-30 09:53:42 -07003303
3304 seqno = (uint64_t *)kmap_atomic(page);
Akash Goel3b3f1652016-10-13 22:44:48 +05303305 for_each_engine(engine, dev_priv, id) {
Ben Widawskye04934c2014-06-30 09:53:42 -07003306 uint64_t offset;
3307
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003308 seq_printf(m, "%s\n", engine->name);
Ben Widawskye04934c2014-06-30 09:53:42 -07003309
3310 seq_puts(m, " Last signal:");
3311 for (j = 0; j < num_rings; j++) {
Dave Gordonc3232b12016-03-23 18:19:53 +00003312 offset = id * I915_NUM_ENGINES + j;
Ben Widawskye04934c2014-06-30 09:53:42 -07003313 seq_printf(m, "0x%08llx (0x%02llx) ",
3314 seqno[offset], offset * 8);
3315 }
3316 seq_putc(m, '\n');
3317
3318 seq_puts(m, " Last wait: ");
3319 for (j = 0; j < num_rings; j++) {
Dave Gordonc3232b12016-03-23 18:19:53 +00003320 offset = id + (j * I915_NUM_ENGINES);
Ben Widawskye04934c2014-06-30 09:53:42 -07003321 seq_printf(m, "0x%08llx (0x%02llx) ",
3322 seqno[offset], offset * 8);
3323 }
3324 seq_putc(m, '\n');
3325
3326 }
3327 kunmap_atomic(seqno);
3328 } else {
3329 seq_puts(m, " Last signal:");
Akash Goel3b3f1652016-10-13 22:44:48 +05303330 for_each_engine(engine, dev_priv, id)
Ben Widawskye04934c2014-06-30 09:53:42 -07003331 for (j = 0; j < num_rings; j++)
3332 seq_printf(m, "0x%08x\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003333 I915_READ(engine->semaphore.mbox.signal[j]));
Ben Widawskye04934c2014-06-30 09:53:42 -07003334 seq_putc(m, '\n');
3335 }
3336
Paulo Zanoni03872062014-07-09 14:31:57 -03003337 intel_runtime_pm_put(dev_priv);
Ben Widawskye04934c2014-06-30 09:53:42 -07003338 mutex_unlock(&dev->struct_mutex);
3339 return 0;
3340}
3341
Daniel Vetter728e29d2014-06-25 22:01:53 +03003342static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3343{
David Weinehall36cdd012016-08-22 13:59:31 +03003344 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3345 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003346 int i;
3347
3348 drm_modeset_lock_all(dev);
3349 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3350 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3351
3352 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003353 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3354 pll->config.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003355 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003356 seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll);
3357 seq_printf(m, " dpll_md: 0x%08x\n",
3358 pll->config.hw_state.dpll_md);
3359 seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0);
3360 seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1);
3361 seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003362 }
3363 drm_modeset_unlock_all(dev);
3364
3365 return 0;
3366}
3367
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003368static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003369{
3370 int i;
3371 int ret;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00003372 struct intel_engine_cs *engine;
David Weinehall36cdd012016-08-22 13:59:31 +03003373 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3374 struct drm_device *dev = &dev_priv->drm;
Arun Siluvery33136b02016-01-21 21:43:47 +00003375 struct i915_workarounds *workarounds = &dev_priv->workarounds;
Dave Gordonc3232b12016-03-23 18:19:53 +00003376 enum intel_engine_id id;
Arun Siluvery888b5992014-08-26 14:44:51 +01003377
Arun Siluvery888b5992014-08-26 14:44:51 +01003378 ret = mutex_lock_interruptible(&dev->struct_mutex);
3379 if (ret)
3380 return ret;
3381
3382 intel_runtime_pm_get(dev_priv);
3383
Arun Siluvery33136b02016-01-21 21:43:47 +00003384 seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
Akash Goel3b3f1652016-10-13 22:44:48 +05303385 for_each_engine(engine, dev_priv, id)
Arun Siluvery33136b02016-01-21 21:43:47 +00003386 seq_printf(m, "HW whitelist count for %s: %d\n",
Dave Gordonc3232b12016-03-23 18:19:53 +00003387 engine->name, workarounds->hw_whitelist_count[id]);
Arun Siluvery33136b02016-01-21 21:43:47 +00003388 for (i = 0; i < workarounds->count; ++i) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003389 i915_reg_t addr;
3390 u32 mask, value, read;
Mika Kuoppala2fa60f62014-10-07 17:21:27 +03003391 bool ok;
Arun Siluvery888b5992014-08-26 14:44:51 +01003392
Arun Siluvery33136b02016-01-21 21:43:47 +00003393 addr = workarounds->reg[i].addr;
3394 mask = workarounds->reg[i].mask;
3395 value = workarounds->reg[i].value;
Mika Kuoppala2fa60f62014-10-07 17:21:27 +03003396 read = I915_READ(addr);
3397 ok = (value & mask) == (read & mask);
3398 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003399 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
Arun Siluvery888b5992014-08-26 14:44:51 +01003400 }
3401
3402 intel_runtime_pm_put(dev_priv);
3403 mutex_unlock(&dev->struct_mutex);
3404
3405 return 0;
3406}
3407
Damien Lespiauc5511e42014-11-04 17:06:51 +00003408static int i915_ddb_info(struct seq_file *m, void *unused)
3409{
David Weinehall36cdd012016-08-22 13:59:31 +03003410 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3411 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003412 struct skl_ddb_allocation *ddb;
3413 struct skl_ddb_entry *entry;
3414 enum pipe pipe;
3415 int plane;
3416
David Weinehall36cdd012016-08-22 13:59:31 +03003417 if (INTEL_GEN(dev_priv) < 9)
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003418 return 0;
3419
Damien Lespiauc5511e42014-11-04 17:06:51 +00003420 drm_modeset_lock_all(dev);
3421
3422 ddb = &dev_priv->wm.skl_hw.ddb;
3423
3424 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3425
3426 for_each_pipe(dev_priv, pipe) {
3427 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3428
Matt Roper8b364b42016-10-26 15:51:28 -07003429 for_each_universal_plane(dev_priv, pipe, plane) {
Damien Lespiauc5511e42014-11-04 17:06:51 +00003430 entry = &ddb->plane[pipe][plane];
3431 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
3432 entry->start, entry->end,
3433 skl_ddb_entry_size(entry));
3434 }
3435
Matt Roper4969d332015-09-24 15:53:10 -07003436 entry = &ddb->plane[pipe][PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003437 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3438 entry->end, skl_ddb_entry_size(entry));
3439 }
3440
3441 drm_modeset_unlock_all(dev);
3442
3443 return 0;
3444}
3445
Vandana Kannana54746e2015-03-03 20:53:10 +05303446static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003447 struct drm_device *dev,
3448 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303449{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003450 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303451 struct i915_drrs *drrs = &dev_priv->drrs;
3452 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003453 struct drm_connector *connector;
Vandana Kannana54746e2015-03-03 20:53:10 +05303454
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003455 drm_for_each_connector(connector, dev) {
3456 if (connector->state->crtc != &intel_crtc->base)
3457 continue;
3458
3459 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303460 }
3461
3462 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3463 seq_puts(m, "\tVBT: DRRS_type: Static");
3464 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3465 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3466 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3467 seq_puts(m, "\tVBT: DRRS_type: None");
3468 else
3469 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3470
3471 seq_puts(m, "\n\n");
3472
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003473 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303474 struct intel_panel *panel;
3475
3476 mutex_lock(&drrs->mutex);
3477 /* DRRS Supported */
3478 seq_puts(m, "\tDRRS Supported: Yes\n");
3479
3480 /* disable_drrs() will make drrs->dp NULL */
3481 if (!drrs->dp) {
3482 seq_puts(m, "Idleness DRRS: Disabled");
3483 mutex_unlock(&drrs->mutex);
3484 return;
3485 }
3486
3487 panel = &drrs->dp->attached_connector->panel;
3488 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3489 drrs->busy_frontbuffer_bits);
3490
3491 seq_puts(m, "\n\t\t");
3492 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3493 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3494 vrefresh = panel->fixed_mode->vrefresh;
3495 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3496 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3497 vrefresh = panel->downclock_mode->vrefresh;
3498 } else {
3499 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3500 drrs->refresh_rate_type);
3501 mutex_unlock(&drrs->mutex);
3502 return;
3503 }
3504 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3505
3506 seq_puts(m, "\n\t\t");
3507 mutex_unlock(&drrs->mutex);
3508 } else {
3509 /* DRRS not supported. Print the VBT parameter*/
3510 seq_puts(m, "\tDRRS Supported : No");
3511 }
3512 seq_puts(m, "\n");
3513}
3514
3515static int i915_drrs_status(struct seq_file *m, void *unused)
3516{
David Weinehall36cdd012016-08-22 13:59:31 +03003517 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3518 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303519 struct intel_crtc *intel_crtc;
3520 int active_crtc_cnt = 0;
3521
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003522 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303523 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003524 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303525 active_crtc_cnt++;
3526 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3527
3528 drrs_status_per_crtc(m, dev, intel_crtc);
3529 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303530 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003531 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303532
3533 if (!active_crtc_cnt)
3534 seq_puts(m, "No active crtc found\n");
3535
3536 return 0;
3537}
3538
Dave Airlie11bed952014-05-12 15:22:27 +10003539static int i915_dp_mst_info(struct seq_file *m, void *unused)
3540{
David Weinehall36cdd012016-08-22 13:59:31 +03003541 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3542 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003543 struct intel_encoder *intel_encoder;
3544 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003545 struct drm_connector *connector;
3546
Dave Airlie11bed952014-05-12 15:22:27 +10003547 drm_modeset_lock_all(dev);
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003548 drm_for_each_connector(connector, dev) {
3549 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003550 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003551
3552 intel_encoder = intel_attached_encoder(connector);
3553 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3554 continue;
3555
3556 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003557 if (!intel_dig_port->dp.can_mst)
3558 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003559
Jim Bride40ae80c2016-04-14 10:18:37 -07003560 seq_printf(m, "MST Source Port %c\n",
3561 port_name(intel_dig_port->port));
Dave Airlie11bed952014-05-12 15:22:27 +10003562 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3563 }
3564 drm_modeset_unlock_all(dev);
3565 return 0;
3566}
3567
Todd Previteeb3394fa2015-04-18 00:04:19 -07003568static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003569 const char __user *ubuf,
3570 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003571{
3572 char *input_buffer;
3573 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003574 struct drm_device *dev;
3575 struct drm_connector *connector;
3576 struct list_head *connector_list;
3577 struct intel_dp *intel_dp;
3578 int val = 0;
3579
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303580 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003581
Todd Previteeb3394fa2015-04-18 00:04:19 -07003582 connector_list = &dev->mode_config.connector_list;
3583
3584 if (len == 0)
3585 return 0;
3586
3587 input_buffer = kmalloc(len + 1, GFP_KERNEL);
3588 if (!input_buffer)
3589 return -ENOMEM;
3590
3591 if (copy_from_user(input_buffer, ubuf, len)) {
3592 status = -EFAULT;
3593 goto out;
3594 }
3595
3596 input_buffer[len] = '\0';
3597 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3598
3599 list_for_each_entry(connector, connector_list, head) {
Todd Previteeb3394fa2015-04-18 00:04:19 -07003600 if (connector->connector_type !=
3601 DRM_MODE_CONNECTOR_DisplayPort)
3602 continue;
3603
Sudip Mukherjeeb8bb08e2015-07-21 17:36:46 +05303604 if (connector->status == connector_status_connected &&
Todd Previteeb3394fa2015-04-18 00:04:19 -07003605 connector->encoder != NULL) {
3606 intel_dp = enc_to_intel_dp(connector->encoder);
3607 status = kstrtoint(input_buffer, 10, &val);
3608 if (status < 0)
3609 goto out;
3610 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3611 /* To prevent erroneous activation of the compliance
3612 * testing code, only accept an actual value of 1 here
3613 */
3614 if (val == 1)
3615 intel_dp->compliance_test_active = 1;
3616 else
3617 intel_dp->compliance_test_active = 0;
3618 }
3619 }
3620out:
3621 kfree(input_buffer);
3622 if (status < 0)
3623 return status;
3624
3625 *offp += len;
3626 return len;
3627}
3628
3629static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3630{
3631 struct drm_device *dev = m->private;
3632 struct drm_connector *connector;
3633 struct list_head *connector_list = &dev->mode_config.connector_list;
3634 struct intel_dp *intel_dp;
3635
Todd Previteeb3394fa2015-04-18 00:04:19 -07003636 list_for_each_entry(connector, connector_list, head) {
Todd Previteeb3394fa2015-04-18 00:04:19 -07003637 if (connector->connector_type !=
3638 DRM_MODE_CONNECTOR_DisplayPort)
3639 continue;
3640
3641 if (connector->status == connector_status_connected &&
3642 connector->encoder != NULL) {
3643 intel_dp = enc_to_intel_dp(connector->encoder);
3644 if (intel_dp->compliance_test_active)
3645 seq_puts(m, "1");
3646 else
3647 seq_puts(m, "0");
3648 } else
3649 seq_puts(m, "0");
3650 }
3651
3652 return 0;
3653}
3654
3655static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003656 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003657{
David Weinehall36cdd012016-08-22 13:59:31 +03003658 struct drm_i915_private *dev_priv = inode->i_private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003659
David Weinehall36cdd012016-08-22 13:59:31 +03003660 return single_open(file, i915_displayport_test_active_show,
3661 &dev_priv->drm);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003662}
3663
3664static const struct file_operations i915_displayport_test_active_fops = {
3665 .owner = THIS_MODULE,
3666 .open = i915_displayport_test_active_open,
3667 .read = seq_read,
3668 .llseek = seq_lseek,
3669 .release = single_release,
3670 .write = i915_displayport_test_active_write
3671};
3672
3673static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3674{
3675 struct drm_device *dev = m->private;
3676 struct drm_connector *connector;
3677 struct list_head *connector_list = &dev->mode_config.connector_list;
3678 struct intel_dp *intel_dp;
3679
Todd Previteeb3394fa2015-04-18 00:04:19 -07003680 list_for_each_entry(connector, connector_list, head) {
Todd Previteeb3394fa2015-04-18 00:04:19 -07003681 if (connector->connector_type !=
3682 DRM_MODE_CONNECTOR_DisplayPort)
3683 continue;
3684
3685 if (connector->status == connector_status_connected &&
3686 connector->encoder != NULL) {
3687 intel_dp = enc_to_intel_dp(connector->encoder);
3688 seq_printf(m, "%lx", intel_dp->compliance_test_data);
3689 } else
3690 seq_puts(m, "0");
3691 }
3692
3693 return 0;
3694}
3695static int i915_displayport_test_data_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003696 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003697{
David Weinehall36cdd012016-08-22 13:59:31 +03003698 struct drm_i915_private *dev_priv = inode->i_private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003699
David Weinehall36cdd012016-08-22 13:59:31 +03003700 return single_open(file, i915_displayport_test_data_show,
3701 &dev_priv->drm);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003702}
3703
3704static const struct file_operations i915_displayport_test_data_fops = {
3705 .owner = THIS_MODULE,
3706 .open = i915_displayport_test_data_open,
3707 .read = seq_read,
3708 .llseek = seq_lseek,
3709 .release = single_release
3710};
3711
3712static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3713{
3714 struct drm_device *dev = m->private;
3715 struct drm_connector *connector;
3716 struct list_head *connector_list = &dev->mode_config.connector_list;
3717 struct intel_dp *intel_dp;
3718
Todd Previteeb3394fa2015-04-18 00:04:19 -07003719 list_for_each_entry(connector, connector_list, head) {
Todd Previteeb3394fa2015-04-18 00:04:19 -07003720 if (connector->connector_type !=
3721 DRM_MODE_CONNECTOR_DisplayPort)
3722 continue;
3723
3724 if (connector->status == connector_status_connected &&
3725 connector->encoder != NULL) {
3726 intel_dp = enc_to_intel_dp(connector->encoder);
3727 seq_printf(m, "%02lx", intel_dp->compliance_test_type);
3728 } else
3729 seq_puts(m, "0");
3730 }
3731
3732 return 0;
3733}
3734
3735static int i915_displayport_test_type_open(struct inode *inode,
3736 struct file *file)
3737{
David Weinehall36cdd012016-08-22 13:59:31 +03003738 struct drm_i915_private *dev_priv = inode->i_private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003739
David Weinehall36cdd012016-08-22 13:59:31 +03003740 return single_open(file, i915_displayport_test_type_show,
3741 &dev_priv->drm);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003742}
3743
3744static const struct file_operations i915_displayport_test_type_fops = {
3745 .owner = THIS_MODULE,
3746 .open = i915_displayport_test_type_open,
3747 .read = seq_read,
3748 .llseek = seq_lseek,
3749 .release = single_release
3750};
3751
Damien Lespiau97e94b22014-11-04 17:06:50 +00003752static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003753{
David Weinehall36cdd012016-08-22 13:59:31 +03003754 struct drm_i915_private *dev_priv = m->private;
3755 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003756 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003757 int num_levels;
3758
David Weinehall36cdd012016-08-22 13:59:31 +03003759 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003760 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003761 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003762 num_levels = 1;
3763 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003764 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003765
3766 drm_modeset_lock_all(dev);
3767
3768 for (level = 0; level < num_levels; level++) {
3769 unsigned int latency = wm[level];
3770
Damien Lespiau97e94b22014-11-04 17:06:50 +00003771 /*
3772 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003773 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003774 */
David Weinehall36cdd012016-08-22 13:59:31 +03003775 if (INTEL_GEN(dev_priv) >= 9 || IS_VALLEYVIEW(dev_priv) ||
3776 IS_CHERRYVIEW(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003777 latency *= 10;
3778 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003779 latency *= 5;
3780
3781 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003782 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003783 }
3784
3785 drm_modeset_unlock_all(dev);
3786}
3787
3788static int pri_wm_latency_show(struct seq_file *m, void *data)
3789{
David Weinehall36cdd012016-08-22 13:59:31 +03003790 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003791 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003792
David Weinehall36cdd012016-08-22 13:59:31 +03003793 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003794 latencies = dev_priv->wm.skl_latency;
3795 else
David Weinehall36cdd012016-08-22 13:59:31 +03003796 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003797
3798 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003799
3800 return 0;
3801}
3802
3803static int spr_wm_latency_show(struct seq_file *m, void *data)
3804{
David Weinehall36cdd012016-08-22 13:59:31 +03003805 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003806 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003807
David Weinehall36cdd012016-08-22 13:59:31 +03003808 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003809 latencies = dev_priv->wm.skl_latency;
3810 else
David Weinehall36cdd012016-08-22 13:59:31 +03003811 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003812
3813 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003814
3815 return 0;
3816}
3817
3818static int cur_wm_latency_show(struct seq_file *m, void *data)
3819{
David Weinehall36cdd012016-08-22 13:59:31 +03003820 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003821 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003822
David Weinehall36cdd012016-08-22 13:59:31 +03003823 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003824 latencies = dev_priv->wm.skl_latency;
3825 else
David Weinehall36cdd012016-08-22 13:59:31 +03003826 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003827
3828 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003829
3830 return 0;
3831}
3832
3833static int pri_wm_latency_open(struct inode *inode, struct file *file)
3834{
David Weinehall36cdd012016-08-22 13:59:31 +03003835 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003836
David Weinehall36cdd012016-08-22 13:59:31 +03003837 if (INTEL_GEN(dev_priv) < 5)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003838 return -ENODEV;
3839
David Weinehall36cdd012016-08-22 13:59:31 +03003840 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003841}
3842
3843static int spr_wm_latency_open(struct inode *inode, struct file *file)
3844{
David Weinehall36cdd012016-08-22 13:59:31 +03003845 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003846
David Weinehall36cdd012016-08-22 13:59:31 +03003847 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003848 return -ENODEV;
3849
David Weinehall36cdd012016-08-22 13:59:31 +03003850 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003851}
3852
3853static int cur_wm_latency_open(struct inode *inode, struct file *file)
3854{
David Weinehall36cdd012016-08-22 13:59:31 +03003855 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003856
David Weinehall36cdd012016-08-22 13:59:31 +03003857 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003858 return -ENODEV;
3859
David Weinehall36cdd012016-08-22 13:59:31 +03003860 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003861}
3862
3863static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Damien Lespiau97e94b22014-11-04 17:06:50 +00003864 size_t len, loff_t *offp, uint16_t wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003865{
3866 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003867 struct drm_i915_private *dev_priv = m->private;
3868 struct drm_device *dev = &dev_priv->drm;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003869 uint16_t new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003870 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003871 int level;
3872 int ret;
3873 char tmp[32];
3874
David Weinehall36cdd012016-08-22 13:59:31 +03003875 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003876 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003877 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003878 num_levels = 1;
3879 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003880 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003881
Ville Syrjälä369a1342014-01-22 14:36:08 +02003882 if (len >= sizeof(tmp))
3883 return -EINVAL;
3884
3885 if (copy_from_user(tmp, ubuf, len))
3886 return -EFAULT;
3887
3888 tmp[len] = '\0';
3889
Damien Lespiau97e94b22014-11-04 17:06:50 +00003890 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3891 &new[0], &new[1], &new[2], &new[3],
3892 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003893 if (ret != num_levels)
3894 return -EINVAL;
3895
3896 drm_modeset_lock_all(dev);
3897
3898 for (level = 0; level < num_levels; level++)
3899 wm[level] = new[level];
3900
3901 drm_modeset_unlock_all(dev);
3902
3903 return len;
3904}
3905
3906
3907static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3908 size_t len, loff_t *offp)
3909{
3910 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003911 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003912 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003913
David Weinehall36cdd012016-08-22 13:59:31 +03003914 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003915 latencies = dev_priv->wm.skl_latency;
3916 else
David Weinehall36cdd012016-08-22 13:59:31 +03003917 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003918
3919 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003920}
3921
3922static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3923 size_t len, loff_t *offp)
3924{
3925 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003926 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003927 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003928
David Weinehall36cdd012016-08-22 13:59:31 +03003929 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003930 latencies = dev_priv->wm.skl_latency;
3931 else
David Weinehall36cdd012016-08-22 13:59:31 +03003932 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003933
3934 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003935}
3936
3937static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3938 size_t len, loff_t *offp)
3939{
3940 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003941 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003942 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003943
David Weinehall36cdd012016-08-22 13:59:31 +03003944 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003945 latencies = dev_priv->wm.skl_latency;
3946 else
David Weinehall36cdd012016-08-22 13:59:31 +03003947 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003948
3949 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003950}
3951
3952static const struct file_operations i915_pri_wm_latency_fops = {
3953 .owner = THIS_MODULE,
3954 .open = pri_wm_latency_open,
3955 .read = seq_read,
3956 .llseek = seq_lseek,
3957 .release = single_release,
3958 .write = pri_wm_latency_write
3959};
3960
3961static const struct file_operations i915_spr_wm_latency_fops = {
3962 .owner = THIS_MODULE,
3963 .open = spr_wm_latency_open,
3964 .read = seq_read,
3965 .llseek = seq_lseek,
3966 .release = single_release,
3967 .write = spr_wm_latency_write
3968};
3969
3970static const struct file_operations i915_cur_wm_latency_fops = {
3971 .owner = THIS_MODULE,
3972 .open = cur_wm_latency_open,
3973 .read = seq_read,
3974 .llseek = seq_lseek,
3975 .release = single_release,
3976 .write = cur_wm_latency_write
3977};
3978
Kees Cook647416f2013-03-10 14:10:06 -07003979static int
3980i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003981{
David Weinehall36cdd012016-08-22 13:59:31 +03003982 struct drm_i915_private *dev_priv = data;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003983
Chris Wilsond98c52c2016-04-13 17:35:05 +01003984 *val = i915_terminally_wedged(&dev_priv->gpu_error);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003985
Kees Cook647416f2013-03-10 14:10:06 -07003986 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003987}
3988
Kees Cook647416f2013-03-10 14:10:06 -07003989static int
3990i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003991{
David Weinehall36cdd012016-08-22 13:59:31 +03003992 struct drm_i915_private *dev_priv = data;
Imre Deakd46c0512014-04-14 20:24:27 +03003993
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003994 /*
3995 * There is no safeguard against this debugfs entry colliding
3996 * with the hangcheck calling same i915_handle_error() in
3997 * parallel, causing an explosion. For now we assume that the
3998 * test harness is responsible enough not to inject gpu hangs
3999 * while it is writing to 'i915_wedged'
4000 */
4001
Chris Wilsond98c52c2016-04-13 17:35:05 +01004002 if (i915_reset_in_progress(&dev_priv->gpu_error))
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02004003 return -EAGAIN;
4004
Chris Wilsonc0336662016-05-06 15:40:21 +01004005 i915_handle_error(dev_priv, val,
Mika Kuoppala58174462014-02-25 17:11:26 +02004006 "Manually setting wedged to %llu", val);
Imre Deakd46c0512014-04-14 20:24:27 +03004007
Kees Cook647416f2013-03-10 14:10:06 -07004008 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004009}
4010
Kees Cook647416f2013-03-10 14:10:06 -07004011DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4012 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03004013 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004014
Kees Cook647416f2013-03-10 14:10:06 -07004015static int
Chris Wilson094f9a52013-09-25 17:34:55 +01004016i915_ring_missed_irq_get(void *data, u64 *val)
4017{
David Weinehall36cdd012016-08-22 13:59:31 +03004018 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004019
4020 *val = dev_priv->gpu_error.missed_irq_rings;
4021 return 0;
4022}
4023
4024static int
4025i915_ring_missed_irq_set(void *data, u64 val)
4026{
David Weinehall36cdd012016-08-22 13:59:31 +03004027 struct drm_i915_private *dev_priv = data;
4028 struct drm_device *dev = &dev_priv->drm;
Chris Wilson094f9a52013-09-25 17:34:55 +01004029 int ret;
4030
4031 /* Lock against concurrent debugfs callers */
4032 ret = mutex_lock_interruptible(&dev->struct_mutex);
4033 if (ret)
4034 return ret;
4035 dev_priv->gpu_error.missed_irq_rings = val;
4036 mutex_unlock(&dev->struct_mutex);
4037
4038 return 0;
4039}
4040
4041DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4042 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4043 "0x%08llx\n");
4044
4045static int
4046i915_ring_test_irq_get(void *data, u64 *val)
4047{
David Weinehall36cdd012016-08-22 13:59:31 +03004048 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004049
4050 *val = dev_priv->gpu_error.test_irq_rings;
4051
4052 return 0;
4053}
4054
4055static int
4056i915_ring_test_irq_set(void *data, u64 val)
4057{
David Weinehall36cdd012016-08-22 13:59:31 +03004058 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004059
Chris Wilson3a122c22016-06-17 14:35:05 +01004060 val &= INTEL_INFO(dev_priv)->ring_mask;
Chris Wilson094f9a52013-09-25 17:34:55 +01004061 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004062 dev_priv->gpu_error.test_irq_rings = val;
Chris Wilson094f9a52013-09-25 17:34:55 +01004063
4064 return 0;
4065}
4066
4067DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4068 i915_ring_test_irq_get, i915_ring_test_irq_set,
4069 "0x%08llx\n");
4070
Chris Wilsondd624af2013-01-15 12:39:35 +00004071#define DROP_UNBOUND 0x1
4072#define DROP_BOUND 0x2
4073#define DROP_RETIRE 0x4
4074#define DROP_ACTIVE 0x8
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004075#define DROP_FREED 0x10
4076#define DROP_ALL (DROP_UNBOUND | \
4077 DROP_BOUND | \
4078 DROP_RETIRE | \
4079 DROP_ACTIVE | \
4080 DROP_FREED)
Kees Cook647416f2013-03-10 14:10:06 -07004081static int
4082i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004083{
Kees Cook647416f2013-03-10 14:10:06 -07004084 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00004085
Kees Cook647416f2013-03-10 14:10:06 -07004086 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004087}
4088
Kees Cook647416f2013-03-10 14:10:06 -07004089static int
4090i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004091{
David Weinehall36cdd012016-08-22 13:59:31 +03004092 struct drm_i915_private *dev_priv = data;
4093 struct drm_device *dev = &dev_priv->drm;
Kees Cook647416f2013-03-10 14:10:06 -07004094 int ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00004095
Ben Widawsky2f9fe5f2013-11-25 09:54:37 -08004096 DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
Chris Wilsondd624af2013-01-15 12:39:35 +00004097
4098 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4099 * on ioctls on -EAGAIN. */
4100 ret = mutex_lock_interruptible(&dev->struct_mutex);
4101 if (ret)
4102 return ret;
4103
4104 if (val & DROP_ACTIVE) {
Chris Wilson22dd3bb2016-09-09 14:11:50 +01004105 ret = i915_gem_wait_for_idle(dev_priv,
4106 I915_WAIT_INTERRUPTIBLE |
4107 I915_WAIT_LOCKED);
Chris Wilsondd624af2013-01-15 12:39:35 +00004108 if (ret)
4109 goto unlock;
4110 }
4111
4112 if (val & (DROP_RETIRE | DROP_ACTIVE))
Chris Wilsonc0336662016-05-06 15:40:21 +01004113 i915_gem_retire_requests(dev_priv);
Chris Wilsondd624af2013-01-15 12:39:35 +00004114
Chris Wilson21ab4e72014-09-09 11:16:08 +01004115 if (val & DROP_BOUND)
4116 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01004117
Chris Wilson21ab4e72014-09-09 11:16:08 +01004118 if (val & DROP_UNBOUND)
4119 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00004120
4121unlock:
4122 mutex_unlock(&dev->struct_mutex);
4123
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004124 if (val & DROP_FREED) {
4125 synchronize_rcu();
4126 flush_work(&dev_priv->mm.free_work);
4127 }
4128
Kees Cook647416f2013-03-10 14:10:06 -07004129 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00004130}
4131
Kees Cook647416f2013-03-10 14:10:06 -07004132DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4133 i915_drop_caches_get, i915_drop_caches_set,
4134 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00004135
Kees Cook647416f2013-03-10 14:10:06 -07004136static int
4137i915_max_freq_get(void *data, u64 *val)
Jesse Barnes358733e2011-07-27 11:53:01 -07004138{
David Weinehall36cdd012016-08-22 13:59:31 +03004139 struct drm_i915_private *dev_priv = data;
Daniel Vetter004777c2012-08-09 15:07:01 +02004140
David Weinehall36cdd012016-08-22 13:59:31 +03004141 if (INTEL_GEN(dev_priv) < 6)
Daniel Vetter004777c2012-08-09 15:07:01 +02004142 return -ENODEV;
4143
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02004144 *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
Kees Cook647416f2013-03-10 14:10:06 -07004145 return 0;
Jesse Barnes358733e2011-07-27 11:53:01 -07004146}
4147
Kees Cook647416f2013-03-10 14:10:06 -07004148static int
4149i915_max_freq_set(void *data, u64 val)
Jesse Barnes358733e2011-07-27 11:53:01 -07004150{
David Weinehall36cdd012016-08-22 13:59:31 +03004151 struct drm_i915_private *dev_priv = data;
Akash Goelbc4d91f2015-02-26 16:09:47 +05304152 u32 hw_max, hw_min;
Kees Cook647416f2013-03-10 14:10:06 -07004153 int ret;
Daniel Vetter004777c2012-08-09 15:07:01 +02004154
David Weinehall36cdd012016-08-22 13:59:31 +03004155 if (INTEL_GEN(dev_priv) < 6)
Daniel Vetter004777c2012-08-09 15:07:01 +02004156 return -ENODEV;
Jesse Barnes358733e2011-07-27 11:53:01 -07004157
Kees Cook647416f2013-03-10 14:10:06 -07004158 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
Jesse Barnes358733e2011-07-27 11:53:01 -07004159
Jesse Barnes4fc688c2012-11-02 11:14:01 -07004160 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
Daniel Vetter004777c2012-08-09 15:07:01 +02004161 if (ret)
4162 return ret;
4163
Jesse Barnes358733e2011-07-27 11:53:01 -07004164 /*
4165 * Turbo will still be enabled, but won't go above the set value.
4166 */
Akash Goelbc4d91f2015-02-26 16:09:47 +05304167 val = intel_freq_opcode(dev_priv, val);
Jeff McGeedd0a1aa2014-02-04 11:32:31 -06004168
Akash Goelbc4d91f2015-02-26 16:09:47 +05304169 hw_max = dev_priv->rps.max_freq;
4170 hw_min = dev_priv->rps.min_freq;
Jesse Barnes0a073b82013-04-17 15:54:58 -07004171
Ben Widawskyb39fb292014-03-19 18:31:11 -07004172 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
Jeff McGeedd0a1aa2014-02-04 11:32:31 -06004173 mutex_unlock(&dev_priv->rps.hw_lock);
4174 return -EINVAL;
4175 }
4176
Ben Widawskyb39fb292014-03-19 18:31:11 -07004177 dev_priv->rps.max_freq_softlimit = val;
Jeff McGeedd0a1aa2014-02-04 11:32:31 -06004178
Chris Wilsondc979972016-05-10 14:10:04 +01004179 intel_set_rps(dev_priv, val);
Jeff McGeedd0a1aa2014-02-04 11:32:31 -06004180
Jesse Barnes4fc688c2012-11-02 11:14:01 -07004181 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes358733e2011-07-27 11:53:01 -07004182
Kees Cook647416f2013-03-10 14:10:06 -07004183 return 0;
Jesse Barnes358733e2011-07-27 11:53:01 -07004184}
4185
Kees Cook647416f2013-03-10 14:10:06 -07004186DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
4187 i915_max_freq_get, i915_max_freq_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03004188 "%llu\n");
Jesse Barnes358733e2011-07-27 11:53:01 -07004189
Kees Cook647416f2013-03-10 14:10:06 -07004190static int
4191i915_min_freq_get(void *data, u64 *val)
Jesse Barnes1523c312012-05-25 12:34:54 -07004192{
David Weinehall36cdd012016-08-22 13:59:31 +03004193 struct drm_i915_private *dev_priv = data;
Daniel Vetter004777c2012-08-09 15:07:01 +02004194
Chris Wilson62e1baa2016-07-13 09:10:36 +01004195 if (INTEL_GEN(dev_priv) < 6)
Daniel Vetter004777c2012-08-09 15:07:01 +02004196 return -ENODEV;
4197
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02004198 *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
Kees Cook647416f2013-03-10 14:10:06 -07004199 return 0;
Jesse Barnes1523c312012-05-25 12:34:54 -07004200}
4201
Kees Cook647416f2013-03-10 14:10:06 -07004202static int
4203i915_min_freq_set(void *data, u64 val)
Jesse Barnes1523c312012-05-25 12:34:54 -07004204{
David Weinehall36cdd012016-08-22 13:59:31 +03004205 struct drm_i915_private *dev_priv = data;
Akash Goelbc4d91f2015-02-26 16:09:47 +05304206 u32 hw_max, hw_min;
Kees Cook647416f2013-03-10 14:10:06 -07004207 int ret;
Daniel Vetter004777c2012-08-09 15:07:01 +02004208
Chris Wilson62e1baa2016-07-13 09:10:36 +01004209 if (INTEL_GEN(dev_priv) < 6)
Daniel Vetter004777c2012-08-09 15:07:01 +02004210 return -ENODEV;
Jesse Barnes1523c312012-05-25 12:34:54 -07004211
Kees Cook647416f2013-03-10 14:10:06 -07004212 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
Jesse Barnes1523c312012-05-25 12:34:54 -07004213
Jesse Barnes4fc688c2012-11-02 11:14:01 -07004214 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
Daniel Vetter004777c2012-08-09 15:07:01 +02004215 if (ret)
4216 return ret;
4217
Jesse Barnes1523c312012-05-25 12:34:54 -07004218 /*
4219 * Turbo will still be enabled, but won't go below the set value.
4220 */
Akash Goelbc4d91f2015-02-26 16:09:47 +05304221 val = intel_freq_opcode(dev_priv, val);
Jeff McGeedd0a1aa2014-02-04 11:32:31 -06004222
Akash Goelbc4d91f2015-02-26 16:09:47 +05304223 hw_max = dev_priv->rps.max_freq;
4224 hw_min = dev_priv->rps.min_freq;
Jeff McGeedd0a1aa2014-02-04 11:32:31 -06004225
David Weinehall36cdd012016-08-22 13:59:31 +03004226 if (val < hw_min ||
4227 val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
Jeff McGeedd0a1aa2014-02-04 11:32:31 -06004228 mutex_unlock(&dev_priv->rps.hw_lock);
4229 return -EINVAL;
4230 }
4231
Ben Widawskyb39fb292014-03-19 18:31:11 -07004232 dev_priv->rps.min_freq_softlimit = val;
Jeff McGeedd0a1aa2014-02-04 11:32:31 -06004233
Chris Wilsondc979972016-05-10 14:10:04 +01004234 intel_set_rps(dev_priv, val);
Jeff McGeedd0a1aa2014-02-04 11:32:31 -06004235
Jesse Barnes4fc688c2012-11-02 11:14:01 -07004236 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes1523c312012-05-25 12:34:54 -07004237
Kees Cook647416f2013-03-10 14:10:06 -07004238 return 0;
Jesse Barnes1523c312012-05-25 12:34:54 -07004239}
4240
Kees Cook647416f2013-03-10 14:10:06 -07004241DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
4242 i915_min_freq_get, i915_min_freq_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03004243 "%llu\n");
Jesse Barnes1523c312012-05-25 12:34:54 -07004244
Kees Cook647416f2013-03-10 14:10:06 -07004245static int
4246i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004247{
David Weinehall36cdd012016-08-22 13:59:31 +03004248 struct drm_i915_private *dev_priv = data;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004249 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004250
David Weinehall36cdd012016-08-22 13:59:31 +03004251 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004252 return -ENODEV;
4253
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004254 intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02004255
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004256 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004257
4258 intel_runtime_pm_put(dev_priv);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004259
Kees Cook647416f2013-03-10 14:10:06 -07004260 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004261
Kees Cook647416f2013-03-10 14:10:06 -07004262 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004263}
4264
Kees Cook647416f2013-03-10 14:10:06 -07004265static int
4266i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004267{
David Weinehall36cdd012016-08-22 13:59:31 +03004268 struct drm_i915_private *dev_priv = data;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004269 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004270
David Weinehall36cdd012016-08-22 13:59:31 +03004271 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004272 return -ENODEV;
4273
Kees Cook647416f2013-03-10 14:10:06 -07004274 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004275 return -EINVAL;
4276
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004277 intel_runtime_pm_get(dev_priv);
Kees Cook647416f2013-03-10 14:10:06 -07004278 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004279
4280 /* Update the cache sharing policy here as well */
4281 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4282 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4283 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4284 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4285
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004286 intel_runtime_pm_put(dev_priv);
Kees Cook647416f2013-03-10 14:10:06 -07004287 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004288}
4289
Kees Cook647416f2013-03-10 14:10:06 -07004290DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4291 i915_cache_sharing_get, i915_cache_sharing_set,
4292 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004293
David Weinehall36cdd012016-08-22 13:59:31 +03004294static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004295 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004296{
Ville Syrjälä0a0b4572015-08-21 20:45:27 +03004297 int ss_max = 2;
Jeff McGee5d395252015-04-03 18:13:17 -07004298 int ss;
4299 u32 sig1[ss_max], sig2[ss_max];
4300
4301 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4302 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4303 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4304 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4305
4306 for (ss = 0; ss < ss_max; ss++) {
4307 unsigned int eu_cnt;
4308
4309 if (sig1[ss] & CHV_SS_PG_ENABLE)
4310 /* skip disabled subslice */
4311 continue;
4312
Imre Deakf08a0c92016-08-31 19:13:04 +03004313 sseu->slice_mask = BIT(0);
Imre Deak57ec1712016-08-31 19:13:05 +03004314 sseu->subslice_mask |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07004315 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4316 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4317 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4318 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03004319 sseu->eu_total += eu_cnt;
4320 sseu->eu_per_subslice = max_t(unsigned int,
4321 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004322 }
Jeff McGee5d395252015-04-03 18:13:17 -07004323}
4324
David Weinehall36cdd012016-08-22 13:59:31 +03004325static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004326 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004327{
Jeff McGee1c046bc2015-04-03 18:13:18 -07004328 int s_max = 3, ss_max = 4;
Jeff McGee5d395252015-04-03 18:13:17 -07004329 int s, ss;
4330 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
4331
Jeff McGee1c046bc2015-04-03 18:13:18 -07004332 /* BXT has a single slice and at most 3 subslices. */
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004333 if (IS_GEN9_LP(dev_priv)) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004334 s_max = 1;
4335 ss_max = 3;
4336 }
4337
4338 for (s = 0; s < s_max; s++) {
4339 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4340 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4341 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4342 }
4343
Jeff McGee5d395252015-04-03 18:13:17 -07004344 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4345 GEN9_PGCTL_SSA_EU19_ACK |
4346 GEN9_PGCTL_SSA_EU210_ACK |
4347 GEN9_PGCTL_SSA_EU311_ACK;
4348 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4349 GEN9_PGCTL_SSB_EU19_ACK |
4350 GEN9_PGCTL_SSB_EU210_ACK |
4351 GEN9_PGCTL_SSB_EU311_ACK;
4352
4353 for (s = 0; s < s_max; s++) {
4354 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4355 /* skip disabled slice */
4356 continue;
4357
Imre Deakf08a0c92016-08-31 19:13:04 +03004358 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004359
David Weinehall36cdd012016-08-22 13:59:31 +03004360 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
Imre Deak57ec1712016-08-31 19:13:05 +03004361 sseu->subslice_mask =
4362 INTEL_INFO(dev_priv)->sseu.subslice_mask;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004363
Jeff McGee5d395252015-04-03 18:13:17 -07004364 for (ss = 0; ss < ss_max; ss++) {
4365 unsigned int eu_cnt;
4366
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004367 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004368 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4369 /* skip disabled subslice */
4370 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004371
Imre Deak57ec1712016-08-31 19:13:05 +03004372 sseu->subslice_mask |= BIT(ss);
4373 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004374
Jeff McGee5d395252015-04-03 18:13:17 -07004375 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4376 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004377 sseu->eu_total += eu_cnt;
4378 sseu->eu_per_subslice = max_t(unsigned int,
4379 sseu->eu_per_subslice,
4380 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004381 }
4382 }
4383}
4384
David Weinehall36cdd012016-08-22 13:59:31 +03004385static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004386 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004387{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004388 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004389 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004390
Imre Deakf08a0c92016-08-31 19:13:04 +03004391 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004392
Imre Deakf08a0c92016-08-31 19:13:04 +03004393 if (sseu->slice_mask) {
Imre Deak57ec1712016-08-31 19:13:05 +03004394 sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
Imre Deak43b67992016-08-31 19:13:02 +03004395 sseu->eu_per_subslice =
4396 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
Imre Deak57ec1712016-08-31 19:13:05 +03004397 sseu->eu_total = sseu->eu_per_subslice *
4398 sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004399
4400 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004401 for (s = 0; s < fls(sseu->slice_mask); s++) {
Imre Deak43b67992016-08-31 19:13:02 +03004402 u8 subslice_7eu =
4403 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004404
Imre Deak915490d2016-08-31 19:13:01 +03004405 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004406 }
4407 }
4408}
4409
Imre Deak615d8902016-08-31 19:13:03 +03004410static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4411 const struct sseu_dev_info *sseu)
4412{
4413 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4414 const char *type = is_available_info ? "Available" : "Enabled";
4415
Imre Deakc67ba532016-08-31 19:13:06 +03004416 seq_printf(m, " %s Slice Mask: %04x\n", type,
4417 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004418 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004419 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004420 seq_printf(m, " %s Subslice Total: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004421 sseu_subslice_total(sseu));
Imre Deakc67ba532016-08-31 19:13:06 +03004422 seq_printf(m, " %s Subslice Mask: %04x\n", type,
4423 sseu->subslice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004424 seq_printf(m, " %s Subslice Per Slice: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004425 hweight8(sseu->subslice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004426 seq_printf(m, " %s EU Total: %u\n", type,
4427 sseu->eu_total);
4428 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4429 sseu->eu_per_subslice);
4430
4431 if (!is_available_info)
4432 return;
4433
4434 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4435 if (HAS_POOLED_EU(dev_priv))
4436 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4437
4438 seq_printf(m, " Has Slice Power Gating: %s\n",
4439 yesno(sseu->has_slice_pg));
4440 seq_printf(m, " Has Subslice Power Gating: %s\n",
4441 yesno(sseu->has_subslice_pg));
4442 seq_printf(m, " Has EU Power Gating: %s\n",
4443 yesno(sseu->has_eu_pg));
4444}
4445
Jeff McGee38732182015-02-13 10:27:54 -06004446static int i915_sseu_status(struct seq_file *m, void *unused)
4447{
David Weinehall36cdd012016-08-22 13:59:31 +03004448 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004449 struct sseu_dev_info sseu;
Jeff McGee38732182015-02-13 10:27:54 -06004450
David Weinehall36cdd012016-08-22 13:59:31 +03004451 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004452 return -ENODEV;
4453
4454 seq_puts(m, "SSEU Device Info\n");
Imre Deak615d8902016-08-31 19:13:03 +03004455 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004456
Jeff McGee7f992ab2015-02-13 10:27:55 -06004457 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004458 memset(&sseu, 0, sizeof(sseu));
David Weinehall238010e2016-08-01 17:33:27 +03004459
4460 intel_runtime_pm_get(dev_priv);
4461
David Weinehall36cdd012016-08-22 13:59:31 +03004462 if (IS_CHERRYVIEW(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004463 cherryview_sseu_device_status(dev_priv, &sseu);
David Weinehall36cdd012016-08-22 13:59:31 +03004464 } else if (IS_BROADWELL(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004465 broadwell_sseu_device_status(dev_priv, &sseu);
David Weinehall36cdd012016-08-22 13:59:31 +03004466 } else if (INTEL_GEN(dev_priv) >= 9) {
Imre Deak915490d2016-08-31 19:13:01 +03004467 gen9_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004468 }
David Weinehall238010e2016-08-01 17:33:27 +03004469
4470 intel_runtime_pm_put(dev_priv);
4471
Imre Deak615d8902016-08-31 19:13:03 +03004472 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004473
Jeff McGee38732182015-02-13 10:27:54 -06004474 return 0;
4475}
4476
Ben Widawsky6d794d42011-04-25 11:25:56 -07004477static int i915_forcewake_open(struct inode *inode, struct file *file)
4478{
David Weinehall36cdd012016-08-22 13:59:31 +03004479 struct drm_i915_private *dev_priv = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004480
David Weinehall36cdd012016-08-22 13:59:31 +03004481 if (INTEL_GEN(dev_priv) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004482 return 0;
4483
Chris Wilson6daccb02015-01-16 11:34:35 +02004484 intel_runtime_pm_get(dev_priv);
Mika Kuoppala59bad942015-01-16 11:34:40 +02004485 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004486
4487 return 0;
4488}
4489
Ben Widawskyc43b5632012-04-16 14:07:40 -07004490static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004491{
David Weinehall36cdd012016-08-22 13:59:31 +03004492 struct drm_i915_private *dev_priv = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004493
David Weinehall36cdd012016-08-22 13:59:31 +03004494 if (INTEL_GEN(dev_priv) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004495 return 0;
4496
Mika Kuoppala59bad942015-01-16 11:34:40 +02004497 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Chris Wilson6daccb02015-01-16 11:34:35 +02004498 intel_runtime_pm_put(dev_priv);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004499
4500 return 0;
4501}
4502
4503static const struct file_operations i915_forcewake_fops = {
4504 .owner = THIS_MODULE,
4505 .open = i915_forcewake_open,
4506 .release = i915_forcewake_release,
4507};
4508
4509static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
4510{
Ben Widawsky6d794d42011-04-25 11:25:56 -07004511 struct dentry *ent;
4512
4513 ent = debugfs_create_file("i915_forcewake_user",
Ben Widawsky8eb57292011-05-11 15:10:58 -07004514 S_IRUSR,
David Weinehall36cdd012016-08-22 13:59:31 +03004515 root, to_i915(minor->dev),
Ben Widawsky6d794d42011-04-25 11:25:56 -07004516 &i915_forcewake_fops);
Wei Yongjunf3c5fe92013-12-16 14:13:25 +08004517 if (!ent)
4518 return -ENOMEM;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004519
Ben Widawsky8eb57292011-05-11 15:10:58 -07004520 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004521}
4522
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004523static int i915_debugfs_create(struct dentry *root,
4524 struct drm_minor *minor,
4525 const char *name,
4526 const struct file_operations *fops)
Jesse Barnes358733e2011-07-27 11:53:01 -07004527{
Jesse Barnes358733e2011-07-27 11:53:01 -07004528 struct dentry *ent;
4529
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004530 ent = debugfs_create_file(name,
Jesse Barnes358733e2011-07-27 11:53:01 -07004531 S_IRUGO | S_IWUSR,
David Weinehall36cdd012016-08-22 13:59:31 +03004532 root, to_i915(minor->dev),
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004533 fops);
Wei Yongjunf3c5fe92013-12-16 14:13:25 +08004534 if (!ent)
4535 return -ENOMEM;
Jesse Barnes358733e2011-07-27 11:53:01 -07004536
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004537 return drm_add_fake_info_node(minor, ent, fops);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004538}
4539
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004540static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004541 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004542 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00004543 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson6da84822016-08-15 10:48:44 +01004544 {"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004545 {"i915_gem_stolen", i915_gem_stolen_list_info },
Simon Farnsworth4e5359c2010-09-01 17:47:52 +01004546 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004547 {"i915_gem_request", i915_gem_request_info, 0},
4548 {"i915_gem_seqno", i915_gem_seqno_info, 0},
Chris Wilsona6172a82009-02-11 14:26:38 +00004549 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004550 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004551 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004552 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004553 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004554 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304555 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004556 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004557 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004558 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004559 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004560 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004561 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004562 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004563 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004564 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004565 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004566 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004567 {"i915_context_status", i915_context_status, 0},
Ben Widawskyc0ab1ae2014-08-07 13:24:26 +01004568 {"i915_dump_lrc", i915_dump_lrc, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004569 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004570 {"i915_swizzle_info", i915_swizzle_info, 0},
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01004571 {"i915_ppgtt_info", i915_ppgtt_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004572 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004573 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004574 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004575 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004576 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004577 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004578 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004579 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004580 {"i915_engine_info", i915_engine_info, 0},
Ben Widawskye04934c2014-06-30 09:53:42 -07004581 {"i915_semaphore_status", i915_semaphore_status, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004582 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004583 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004584 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004585 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004586 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304587 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004588 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004589};
Ben Gamari27c202a2009-07-01 22:26:52 -04004590#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004591
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004592static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004593 const char *name;
4594 const struct file_operations *fops;
4595} i915_debugfs_files[] = {
4596 {"i915_wedged", &i915_wedged_fops},
4597 {"i915_max_freq", &i915_max_freq_fops},
4598 {"i915_min_freq", &i915_min_freq_fops},
4599 {"i915_cache_sharing", &i915_cache_sharing_fops},
Chris Wilson094f9a52013-09-25 17:34:55 +01004600 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4601 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004602 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004603#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004604 {"i915_error_state", &i915_error_state_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004605#endif
Daniel Vetter34b96742013-07-04 20:49:44 +02004606 {"i915_next_seqno", &i915_next_seqno_fops},
Damien Lespiaubd9db022013-10-15 18:55:36 +01004607 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004608 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4609 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4610 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Rodrigo Vivida46f932014-08-01 02:04:45 -07004611 {"i915_fbc_false_color", &i915_fbc_fc_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004612 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4613 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304614 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4615 {"i915_guc_log_control", &i915_guc_log_control_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004616};
4617
Chris Wilson1dac8912016-06-24 14:00:17 +01004618int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004619{
Chris Wilson91c8a322016-07-05 10:40:23 +01004620 struct drm_minor *minor = dev_priv->drm.primary;
Daniel Vetter34b96742013-07-04 20:49:44 +02004621 int ret, i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004622
Ben Widawsky6d794d42011-04-25 11:25:56 -07004623 ret = i915_forcewake_create(minor->debugfs_root, minor);
4624 if (ret)
4625 return ret;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004626
Tomeu Vizoso731035f2016-12-12 13:29:48 +01004627 ret = intel_pipe_crc_create(minor);
4628 if (ret)
4629 return ret;
Damien Lespiau07144422013-10-15 18:55:40 +01004630
Daniel Vetter34b96742013-07-04 20:49:44 +02004631 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4632 ret = i915_debugfs_create(minor->debugfs_root, minor,
4633 i915_debugfs_files[i].name,
4634 i915_debugfs_files[i].fops);
4635 if (ret)
4636 return ret;
4637 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004638
Ben Gamari27c202a2009-07-01 22:26:52 -04004639 return drm_debugfs_create_files(i915_debugfs_list,
4640 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004641 minor->debugfs_root, minor);
4642}
4643
Chris Wilson1dac8912016-06-24 14:00:17 +01004644void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004645{
Chris Wilson91c8a322016-07-05 10:40:23 +01004646 struct drm_minor *minor = dev_priv->drm.primary;
Daniel Vetter34b96742013-07-04 20:49:44 +02004647 int i;
4648
Ben Gamari27c202a2009-07-01 22:26:52 -04004649 drm_debugfs_remove_files(i915_debugfs_list,
4650 I915_DEBUGFS_ENTRIES, minor);
Damien Lespiau07144422013-10-15 18:55:40 +01004651
David Weinehall36cdd012016-08-22 13:59:31 +03004652 drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops,
Ben Widawsky6d794d42011-04-25 11:25:56 -07004653 1, minor);
Damien Lespiau07144422013-10-15 18:55:40 +01004654
Tomeu Vizoso731035f2016-12-12 13:29:48 +01004655 intel_pipe_crc_cleanup(minor);
Damien Lespiau07144422013-10-15 18:55:40 +01004656
Daniel Vetter34b96742013-07-04 20:49:44 +02004657 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4658 struct drm_info_list *info_list =
David Weinehall36cdd012016-08-22 13:59:31 +03004659 (struct drm_info_list *)i915_debugfs_files[i].fops;
Daniel Vetter34b96742013-07-04 20:49:44 +02004660
4661 drm_debugfs_remove_files(info_list, 1, minor);
4662 }
Ben Gamari20172632009-02-17 20:08:50 -05004663}
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004664
4665struct dpcd_block {
4666 /* DPCD dump start address. */
4667 unsigned int offset;
4668 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4669 unsigned int end;
4670 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4671 size_t size;
4672 /* Only valid for eDP. */
4673 bool edp;
4674};
4675
4676static const struct dpcd_block i915_dpcd_debug[] = {
4677 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4678 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4679 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4680 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4681 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4682 { .offset = DP_SET_POWER },
4683 { .offset = DP_EDP_DPCD_REV },
4684 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4685 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4686 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4687};
4688
4689static int i915_dpcd_show(struct seq_file *m, void *data)
4690{
4691 struct drm_connector *connector = m->private;
4692 struct intel_dp *intel_dp =
4693 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4694 uint8_t buf[16];
4695 ssize_t err;
4696 int i;
4697
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004698 if (connector->status != connector_status_connected)
4699 return -ENODEV;
4700
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004701 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4702 const struct dpcd_block *b = &i915_dpcd_debug[i];
4703 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4704
4705 if (b->edp &&
4706 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4707 continue;
4708
4709 /* low tech for now */
4710 if (WARN_ON(size > sizeof(buf)))
4711 continue;
4712
4713 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4714 if (err <= 0) {
4715 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4716 size, b->offset, err);
4717 continue;
4718 }
4719
4720 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004721 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004722
4723 return 0;
4724}
4725
4726static int i915_dpcd_open(struct inode *inode, struct file *file)
4727{
4728 return single_open(file, i915_dpcd_show, inode->i_private);
4729}
4730
4731static const struct file_operations i915_dpcd_fops = {
4732 .owner = THIS_MODULE,
4733 .open = i915_dpcd_open,
4734 .read = seq_read,
4735 .llseek = seq_lseek,
4736 .release = single_release,
4737};
4738
David Weinehallecbd6782016-08-23 12:23:56 +03004739static int i915_panel_show(struct seq_file *m, void *data)
4740{
4741 struct drm_connector *connector = m->private;
4742 struct intel_dp *intel_dp =
4743 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4744
4745 if (connector->status != connector_status_connected)
4746 return -ENODEV;
4747
4748 seq_printf(m, "Panel power up delay: %d\n",
4749 intel_dp->panel_power_up_delay);
4750 seq_printf(m, "Panel power down delay: %d\n",
4751 intel_dp->panel_power_down_delay);
4752 seq_printf(m, "Backlight on delay: %d\n",
4753 intel_dp->backlight_on_delay);
4754 seq_printf(m, "Backlight off delay: %d\n",
4755 intel_dp->backlight_off_delay);
4756
4757 return 0;
4758}
4759
4760static int i915_panel_open(struct inode *inode, struct file *file)
4761{
4762 return single_open(file, i915_panel_show, inode->i_private);
4763}
4764
4765static const struct file_operations i915_panel_fops = {
4766 .owner = THIS_MODULE,
4767 .open = i915_panel_open,
4768 .read = seq_read,
4769 .llseek = seq_lseek,
4770 .release = single_release,
4771};
4772
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004773/**
4774 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4775 * @connector: pointer to a registered drm_connector
4776 *
4777 * Cleanup will be done by drm_connector_unregister() through a call to
4778 * drm_debugfs_connector_remove().
4779 *
4780 * Returns 0 on success, negative error codes on error.
4781 */
4782int i915_debugfs_connector_add(struct drm_connector *connector)
4783{
4784 struct dentry *root = connector->debugfs_entry;
4785
4786 /* The connector must have been registered beforehands. */
4787 if (!root)
4788 return -ENODEV;
4789
4790 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4791 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03004792 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4793 connector, &i915_dpcd_fops);
4794
4795 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4796 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4797 connector, &i915_panel_fops);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004798
4799 return 0;
4800}