blob: 747dad2666aa37e7af309cf0fa83320cc4f3cad3 [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Chris Wilsonf3cd4742009-10-13 22:20:20 +010029#include <linux/debugfs.h>
Chris Wilsone637d2c2017-03-16 13:19:57 +000030#include <linux/sort.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010031#include <linux/sched/mm.h>
Simon Farnsworth4e5359c2010-09-01 17:47:52 +010032#include "intel_drv.h"
Sagar Arun Kamblea2695742017-11-16 19:02:41 +053033#include "intel_guc_submission.h"
Ben Gamari20172632009-02-17 20:08:50 -050034
David Weinehall36cdd012016-08-22 13:59:31 +030035static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36{
37 return to_i915(node->minor->dev);
38}
39
Chris Wilson70d39fe2010-08-25 16:03:34 +010040static int i915_capabilities(struct seq_file *m, void *data)
41{
David Weinehall36cdd012016-08-22 13:59:31 +030042 struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000044 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010045
David Weinehall36cdd012016-08-22 13:59:31 +030046 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020047 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030048 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000049
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000050 intel_device_info_dump_flags(info, &p);
Michal Wajdeczko5fbbe8d2017-12-21 21:57:34 +000051 intel_device_info_dump_runtime(info, &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000052 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010053
Chris Wilson418e3cd2017-02-06 21:36:08 +000054 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000055 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000056 kernel_param_unlock(THIS_MODULE);
57
Chris Wilson70d39fe2010-08-25 16:03:34 +010058 return 0;
59}
Ben Gamari433e12f2009-02-17 20:08:51 -050060
Imre Deaka7363de2016-05-12 16:18:52 +030061static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000062{
Chris Wilson573adb32016-08-04 16:32:39 +010063 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000064}
65
Imre Deaka7363de2016-05-12 16:18:52 +030066static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010067{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010068 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010069}
70
Imre Deaka7363de2016-05-12 16:18:52 +030071static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000072{
Chris Wilson3e510a82016-08-05 10:14:23 +010073 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040074 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010075 case I915_TILING_NONE: return ' ';
76 case I915_TILING_X: return 'X';
77 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040078 }
Chris Wilsona6172a82009-02-11 14:26:38 +000079}
80
Imre Deaka7363de2016-05-12 16:18:52 +030081static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070082{
Chris Wilsona65adaf2017-10-09 09:43:57 +010083 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010084}
85
Imre Deaka7363de2016-05-12 16:18:52 +030086static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010087{
Chris Wilsona4f5ea62016-10-28 13:58:35 +010088 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -070089}
90
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010091static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92{
93 u64 size = 0;
94 struct i915_vma *vma;
95
Chris Wilsone2189dd2017-12-07 21:14:07 +000096 for_each_ggtt_vma(vma, obj) {
97 if (drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010098 size += vma->node.size;
99 }
100
101 return size;
102}
103
Matthew Auld7393b7e2017-10-06 23:18:28 +0100104static const char *
105stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106{
107 size_t x = 0;
108
109 switch (page_sizes) {
110 case 0:
111 return "";
112 case I915_GTT_PAGE_SIZE_4K:
113 return "4K";
114 case I915_GTT_PAGE_SIZE_64K:
115 return "64K";
116 case I915_GTT_PAGE_SIZE_2M:
117 return "2M";
118 default:
119 if (!buf)
120 return "M";
121
122 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 x += snprintf(buf + x, len - x, "2M, ");
124 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 x += snprintf(buf + x, len - x, "64K, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 x += snprintf(buf + x, len - x, "4K, ");
128 buf[x-2] = '\0';
129
130 return buf;
131 }
132}
133
Chris Wilson37811fc2010-08-25 22:45:57 +0100134static void
135describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136{
Chris Wilsonb4716182015-04-27 13:41:17 +0100137 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000138 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700139 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100140 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800141 int pin_count = 0;
142
Chris Wilson188c1ab2016-04-03 14:14:20 +0100143 lockdep_assert_held(&obj->base.dev->struct_mutex);
144
Chris Wilsond07f0e52016-10-28 13:58:44 +0100145 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100146 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100147 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100148 get_pin_flag(obj),
149 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700150 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100151 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800152 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100153 obj->read_domains,
154 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300155 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100156 obj->mm.dirty ? " dirty" : "",
157 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100158 if (obj->base.name)
159 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000160 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100161 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800162 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300163 }
164 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100165 if (obj->pin_global)
166 seq_printf(m, " (global)");
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000167 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100168 if (!drm_mm_node_allocated(&vma->node))
169 continue;
170
Matthew Auld7393b7e2017-10-06 23:18:28 +0100171 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100172 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100173 vma->node.start, vma->node.size,
174 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000175 if (i915_vma_is_ggtt(vma)) {
176 switch (vma->ggtt_view.type) {
177 case I915_GGTT_VIEW_NORMAL:
178 seq_puts(m, ", normal");
179 break;
180
181 case I915_GGTT_VIEW_PARTIAL:
182 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000183 vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000185 break;
186
187 case I915_GGTT_VIEW_ROTATED:
188 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000189 vma->ggtt_view.rotated.plane[0].width,
190 vma->ggtt_view.rotated.plane[0].height,
191 vma->ggtt_view.rotated.plane[0].stride,
192 vma->ggtt_view.rotated.plane[0].offset,
193 vma->ggtt_view.rotated.plane[1].width,
194 vma->ggtt_view.rotated.plane[1].height,
195 vma->ggtt_view.rotated.plane[1].stride,
196 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000197 break;
198
199 default:
200 MISSING_CASE(vma->ggtt_view.type);
201 break;
202 }
203 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100204 if (vma->fence)
205 seq_printf(m, " , fence: %d%s",
206 vma->fence->id,
207 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000208 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700209 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000210 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100211 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100212
Chris Wilsond07f0e52016-10-28 13:58:44 +0100213 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100214 if (engine)
215 seq_printf(m, " (%s)", engine->name);
216
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100217 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 if (frontbuffer_bits)
219 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100220}
221
Chris Wilsone637d2c2017-03-16 13:19:57 +0000222static int obj_rank_by_stolen(const void *A, const void *B)
Chris Wilson6d2b88852013-08-07 18:30:54 +0100223{
Chris Wilsone637d2c2017-03-16 13:19:57 +0000224 const struct drm_i915_gem_object *a =
225 *(const struct drm_i915_gem_object **)A;
226 const struct drm_i915_gem_object *b =
227 *(const struct drm_i915_gem_object **)B;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100228
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200229 if (a->stolen->start < b->stolen->start)
230 return -1;
231 if (a->stolen->start > b->stolen->start)
232 return 1;
233 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100234}
235
236static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237{
David Weinehall36cdd012016-08-22 13:59:31 +0300238 struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000240 struct drm_i915_gem_object **objects;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100241 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300242 u64 total_obj_size, total_gtt_size;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000243 unsigned long total, count, n;
244 int ret;
245
246 total = READ_ONCE(dev_priv->mm.object_count);
Michal Hocko20981052017-05-17 14:23:12 +0200247 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000248 if (!objects)
249 return -ENOMEM;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100250
251 ret = mutex_lock_interruptible(&dev->struct_mutex);
252 if (ret)
Chris Wilsone637d2c2017-03-16 13:19:57 +0000253 goto out;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100254
255 total_obj_size = total_gtt_size = count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100256
257 spin_lock(&dev_priv->mm.obj_lock);
258 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000259 if (count == total)
260 break;
261
Chris Wilson6d2b88852013-08-07 18:30:54 +0100262 if (obj->stolen == NULL)
263 continue;
264
Chris Wilsone637d2c2017-03-16 13:19:57 +0000265 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100266 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100267 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000268
Chris Wilson6d2b88852013-08-07 18:30:54 +0100269 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100270 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000271 if (count == total)
272 break;
273
Chris Wilson6d2b88852013-08-07 18:30:54 +0100274 if (obj->stolen == NULL)
275 continue;
276
Chris Wilsone637d2c2017-03-16 13:19:57 +0000277 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100278 total_obj_size += obj->base.size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100279 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100280 spin_unlock(&dev_priv->mm.obj_lock);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100281
Chris Wilsone637d2c2017-03-16 13:19:57 +0000282 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
284 seq_puts(m, "Stolen:\n");
285 for (n = 0; n < count; n++) {
286 seq_puts(m, " ");
287 describe_obj(m, objects[n]);
288 seq_putc(m, '\n');
289 }
290 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100291 count, total_obj_size, total_gtt_size);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000292
293 mutex_unlock(&dev->struct_mutex);
294out:
Michal Hocko20981052017-05-17 14:23:12 +0200295 kvfree(objects);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000296 return ret;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100297}
298
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100299struct file_stats {
Chris Wilson6313c202014-03-19 13:45:45 +0000300 struct drm_i915_file_private *file_priv;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300301 unsigned long count;
302 u64 total, unbound;
303 u64 global, shared;
304 u64 active, inactive;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100305};
306
307static int per_file_stats(int id, void *ptr, void *data)
308{
309 struct drm_i915_gem_object *obj = ptr;
310 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000311 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100312
Chris Wilson0caf81b2017-06-17 12:57:44 +0100313 lockdep_assert_held(&obj->base.dev->struct_mutex);
314
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100315 stats->count++;
316 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100317 if (!obj->bind_count)
318 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000319 if (obj->base.name || obj->base.dma_buf)
320 stats->shared += obj->base.size;
321
Chris Wilson894eeec2016-08-04 07:52:20 +0100322 list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 if (!drm_mm_node_allocated(&vma->node))
324 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000325
Chris Wilson3272db52016-08-04 16:32:32 +0100326 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100327 stats->global += vma->node.size;
328 } else {
329 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
Chris Wilson6313c202014-03-19 13:45:45 +0000330
Chris Wilson2bfa9962016-08-04 07:52:25 +0100331 if (ppgtt->base.file != stats->file_priv)
Chris Wilson6313c202014-03-19 13:45:45 +0000332 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000333 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100334
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100335 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100336 stats->active += vma->node.size;
337 else
338 stats->inactive += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100339 }
340
341 return 0;
342}
343
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100344#define print_file_stats(m, name, stats) do { \
345 if (stats.count) \
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300346 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100347 name, \
348 stats.count, \
349 stats.total, \
350 stats.active, \
351 stats.inactive, \
352 stats.global, \
353 stats.shared, \
354 stats.unbound); \
355} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800356
357static void print_batch_pool_stats(struct seq_file *m,
358 struct drm_i915_private *dev_priv)
359{
360 struct drm_i915_gem_object *obj;
361 struct file_stats stats;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000362 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530363 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000364 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800365
366 memset(&stats, 0, sizeof(stats));
367
Akash Goel3b3f1652016-10-13 22:44:48 +0530368 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000369 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100370 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000371 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100372 batch_pool_link)
373 per_file_stats(0, obj, &stats);
374 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100375 }
Brad Volkin493018d2014-12-11 12:13:08 -0800376
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100377 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800378}
379
Chris Wilson15da9562016-05-24 14:53:43 +0100380static int per_file_ctx_stats(int id, void *ptr, void *data)
381{
382 struct i915_gem_context *ctx = ptr;
383 int n;
384
385 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
386 if (ctx->engine[n].state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +0100387 per_file_stats(0, ctx->engine[n].state->obj, data);
Chris Wilsondca33ec2016-08-02 22:50:20 +0100388 if (ctx->engine[n].ring)
Chris Wilson57e88532016-08-15 10:48:57 +0100389 per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
Chris Wilson15da9562016-05-24 14:53:43 +0100390 }
391
392 return 0;
393}
394
395static void print_context_stats(struct seq_file *m,
396 struct drm_i915_private *dev_priv)
397{
David Weinehall36cdd012016-08-22 13:59:31 +0300398 struct drm_device *dev = &dev_priv->drm;
Chris Wilson15da9562016-05-24 14:53:43 +0100399 struct file_stats stats;
400 struct drm_file *file;
401
402 memset(&stats, 0, sizeof(stats));
403
David Weinehall36cdd012016-08-22 13:59:31 +0300404 mutex_lock(&dev->struct_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100405 if (dev_priv->kernel_context)
406 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
407
David Weinehall36cdd012016-08-22 13:59:31 +0300408 list_for_each_entry(file, &dev->filelist, lhead) {
Chris Wilson15da9562016-05-24 14:53:43 +0100409 struct drm_i915_file_private *fpriv = file->driver_priv;
410 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
411 }
David Weinehall36cdd012016-08-22 13:59:31 +0300412 mutex_unlock(&dev->struct_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100413
414 print_file_stats(m, "[k]contexts", stats);
415}
416
David Weinehall36cdd012016-08-22 13:59:31 +0300417static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100418{
David Weinehall36cdd012016-08-22 13:59:31 +0300419 struct drm_i915_private *dev_priv = node_to_i915(m->private);
420 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300421 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100422 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
423 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000424 struct drm_i915_gem_object *obj;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100425 unsigned int page_sizes = 0;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100426 struct drm_file *file;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100427 char buf[80];
Chris Wilson73aa8082010-09-30 11:46:12 +0100428 int ret;
429
430 ret = mutex_lock_interruptible(&dev->struct_mutex);
431 if (ret)
432 return ret;
433
Chris Wilson3ef7f222016-10-18 13:02:48 +0100434 seq_printf(m, "%u objects, %llu bytes\n",
Chris Wilson6299f992010-11-24 12:23:44 +0000435 dev_priv->mm.object_count,
436 dev_priv->mm.object_memory);
437
Chris Wilson1544c422016-08-15 13:18:16 +0100438 size = count = 0;
439 mapped_size = mapped_count = 0;
440 purgeable_size = purgeable_count = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100441 huge_size = huge_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100442
443 spin_lock(&dev_priv->mm.obj_lock);
444 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100445 size += obj->base.size;
446 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200447
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100448 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200449 purgeable_size += obj->base.size;
450 ++purgeable_count;
451 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100452
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100453 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100454 mapped_count++;
455 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100456 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100457
458 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
459 huge_count++;
460 huge_size += obj->base.size;
461 page_sizes |= obj->mm.page_sizes.sg;
462 }
Chris Wilson6299f992010-11-24 12:23:44 +0000463 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100464 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
465
466 size = count = dpy_size = dpy_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100467 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100468 size += obj->base.size;
469 ++count;
470
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100471 if (obj->pin_global) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100472 dpy_size += obj->base.size;
473 ++dpy_count;
474 }
475
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100476 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100477 purgeable_size += obj->base.size;
478 ++purgeable_count;
479 }
480
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100481 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100482 mapped_count++;
483 mapped_size += obj->base.size;
484 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100485
486 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
487 huge_count++;
488 huge_size += obj->base.size;
489 page_sizes |= obj->mm.page_sizes.sg;
490 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100491 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100492 spin_unlock(&dev_priv->mm.obj_lock);
493
Chris Wilson2bd160a2016-08-15 10:48:45 +0100494 seq_printf(m, "%u bound objects, %llu bytes\n",
495 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300496 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200497 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100498 seq_printf(m, "%u mapped objects, %llu bytes\n",
499 mapped_count, mapped_size);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100500 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
501 huge_count,
502 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
503 huge_size);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100504 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
Chris Wilson2bd160a2016-08-15 10:48:45 +0100505 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000506
Matthew Auldb7128ef2017-12-11 15:18:22 +0000507 seq_printf(m, "%llu [%pa] gtt total\n",
508 ggtt->base.total, &ggtt->mappable_end);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100509 seq_printf(m, "Supported page sizes: %s\n",
510 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
511 buf, sizeof(buf)));
Chris Wilson73aa8082010-09-30 11:46:12 +0100512
Damien Lespiau267f0c92013-06-24 22:59:48 +0100513 seq_putc(m, '\n');
Brad Volkin493018d2014-12-11 12:13:08 -0800514 print_batch_pool_stats(m, dev_priv);
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200515 mutex_unlock(&dev->struct_mutex);
516
517 mutex_lock(&dev->filelist_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100518 print_context_stats(m, dev_priv);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100519 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
520 struct file_stats stats;
Chris Wilsonc84455b2016-08-15 10:49:08 +0100521 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsone61e0f52018-02-21 09:56:36 +0000522 struct i915_request *request;
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900523 struct task_struct *task;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100524
Chris Wilson0caf81b2017-06-17 12:57:44 +0100525 mutex_lock(&dev->struct_mutex);
526
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100527 memset(&stats, 0, sizeof(stats));
Chris Wilson6313c202014-03-19 13:45:45 +0000528 stats.file_priv = file->driver_priv;
Chris Wilson5b5ffff2014-06-17 09:56:24 +0100529 spin_lock(&file->table_lock);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100530 idr_for_each(&file->object_idr, per_file_stats, &stats);
Chris Wilson5b5ffff2014-06-17 09:56:24 +0100531 spin_unlock(&file->table_lock);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900532 /*
533 * Although we have a valid reference on file->pid, that does
534 * not guarantee that the task_struct who called get_pid() is
535 * still alive (e.g. get_pid(current) => fork() => exit()).
536 * Therefore, we need to protect this ->comm access using RCU.
537 */
Chris Wilsonc84455b2016-08-15 10:49:08 +0100538 request = list_first_entry_or_null(&file_priv->mm.request_list,
Chris Wilsone61e0f52018-02-21 09:56:36 +0000539 struct i915_request,
Chris Wilsonc8659ef2017-03-02 12:25:25 +0000540 client_link);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900541 rcu_read_lock();
Chris Wilsonc84455b2016-08-15 10:49:08 +0100542 task = pid_task(request && request->ctx->pid ?
543 request->ctx->pid : file->pid,
544 PIDTYPE_PID);
Brad Volkin493018d2014-12-11 12:13:08 -0800545 print_file_stats(m, task ? task->comm : "<unknown>", stats);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900546 rcu_read_unlock();
Chris Wilson0caf81b2017-06-17 12:57:44 +0100547
Chris Wilsonc84455b2016-08-15 10:49:08 +0100548 mutex_unlock(&dev->struct_mutex);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100549 }
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200550 mutex_unlock(&dev->filelist_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100551
552 return 0;
553}
554
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100555static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000556{
Damien Lespiau9f25d002014-05-13 15:30:28 +0100557 struct drm_info_node *node = m->private;
David Weinehall36cdd012016-08-22 13:59:31 +0300558 struct drm_i915_private *dev_priv = node_to_i915(node);
559 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonf2123812017-10-16 12:40:37 +0100560 struct drm_i915_gem_object **objects;
Chris Wilson08c18322011-01-10 00:00:24 +0000561 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300562 u64 total_obj_size, total_gtt_size;
Chris Wilsonf2123812017-10-16 12:40:37 +0100563 unsigned long nobject, n;
Chris Wilson08c18322011-01-10 00:00:24 +0000564 int count, ret;
565
Chris Wilsonf2123812017-10-16 12:40:37 +0100566 nobject = READ_ONCE(dev_priv->mm.object_count);
567 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
568 if (!objects)
569 return -ENOMEM;
570
Chris Wilson08c18322011-01-10 00:00:24 +0000571 ret = mutex_lock_interruptible(&dev->struct_mutex);
572 if (ret)
573 return ret;
574
Chris Wilsonf2123812017-10-16 12:40:37 +0100575 count = 0;
576 spin_lock(&dev_priv->mm.obj_lock);
577 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
578 objects[count++] = obj;
579 if (count == nobject)
580 break;
581 }
582 spin_unlock(&dev_priv->mm.obj_lock);
583
584 total_obj_size = total_gtt_size = 0;
585 for (n = 0; n < count; n++) {
586 obj = objects[n];
587
Damien Lespiau267f0c92013-06-24 22:59:48 +0100588 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000589 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100590 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000591 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100592 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000593 }
594
595 mutex_unlock(&dev->struct_mutex);
596
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300597 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson08c18322011-01-10 00:00:24 +0000598 count, total_obj_size, total_gtt_size);
Chris Wilsonf2123812017-10-16 12:40:37 +0100599 kvfree(objects);
Chris Wilson08c18322011-01-10 00:00:24 +0000600
601 return 0;
602}
603
Brad Volkin493018d2014-12-11 12:13:08 -0800604static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
605{
David Weinehall36cdd012016-08-22 13:59:31 +0300606 struct drm_i915_private *dev_priv = node_to_i915(m->private);
607 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800608 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000609 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530610 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100611 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000612 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800613
614 ret = mutex_lock_interruptible(&dev->struct_mutex);
615 if (ret)
616 return ret;
617
Akash Goel3b3f1652016-10-13 22:44:48 +0530618 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000619 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100620 int count;
621
622 count = 0;
623 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000624 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100625 batch_pool_link)
626 count++;
627 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000628 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100629
630 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000631 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100632 batch_pool_link) {
633 seq_puts(m, " ");
634 describe_obj(m, obj);
635 seq_putc(m, '\n');
636 }
637
638 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100639 }
Brad Volkin493018d2014-12-11 12:13:08 -0800640 }
641
Chris Wilson8d9d5742015-04-07 16:20:38 +0100642 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800643
644 mutex_unlock(&dev->struct_mutex);
645
646 return 0;
647}
648
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200649static void gen8_display_interrupt_info(struct seq_file *m)
650{
651 struct drm_i915_private *dev_priv = node_to_i915(m->private);
652 int pipe;
653
654 for_each_pipe(dev_priv, pipe) {
655 enum intel_display_power_domain power_domain;
656
657 power_domain = POWER_DOMAIN_PIPE(pipe);
658 if (!intel_display_power_get_if_enabled(dev_priv,
659 power_domain)) {
660 seq_printf(m, "Pipe %c power disabled\n",
661 pipe_name(pipe));
662 continue;
663 }
664 seq_printf(m, "Pipe %c IMR:\t%08x\n",
665 pipe_name(pipe),
666 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
667 seq_printf(m, "Pipe %c IIR:\t%08x\n",
668 pipe_name(pipe),
669 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
670 seq_printf(m, "Pipe %c IER:\t%08x\n",
671 pipe_name(pipe),
672 I915_READ(GEN8_DE_PIPE_IER(pipe)));
673
674 intel_display_power_put(dev_priv, power_domain);
675 }
676
677 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
678 I915_READ(GEN8_DE_PORT_IMR));
679 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
680 I915_READ(GEN8_DE_PORT_IIR));
681 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
682 I915_READ(GEN8_DE_PORT_IER));
683
684 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
685 I915_READ(GEN8_DE_MISC_IMR));
686 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
687 I915_READ(GEN8_DE_MISC_IIR));
688 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
689 I915_READ(GEN8_DE_MISC_IER));
690
691 seq_printf(m, "PCU interrupt mask:\t%08x\n",
692 I915_READ(GEN8_PCU_IMR));
693 seq_printf(m, "PCU interrupt identity:\t%08x\n",
694 I915_READ(GEN8_PCU_IIR));
695 seq_printf(m, "PCU interrupt enable:\t%08x\n",
696 I915_READ(GEN8_PCU_IER));
697}
698
Ben Gamari20172632009-02-17 20:08:50 -0500699static int i915_interrupt_info(struct seq_file *m, void *data)
700{
David Weinehall36cdd012016-08-22 13:59:31 +0300701 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000702 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530703 enum intel_engine_id id;
Chris Wilson4bb05042016-09-03 07:53:43 +0100704 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100705
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200706 intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500707
David Weinehall36cdd012016-08-22 13:59:31 +0300708 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300709 seq_printf(m, "Master Interrupt Control:\t%08x\n",
710 I915_READ(GEN8_MASTER_IRQ));
711
712 seq_printf(m, "Display IER:\t%08x\n",
713 I915_READ(VLV_IER));
714 seq_printf(m, "Display IIR:\t%08x\n",
715 I915_READ(VLV_IIR));
716 seq_printf(m, "Display IIR_RW:\t%08x\n",
717 I915_READ(VLV_IIR_RW));
718 seq_printf(m, "Display IMR:\t%08x\n",
719 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100720 for_each_pipe(dev_priv, pipe) {
721 enum intel_display_power_domain power_domain;
722
723 power_domain = POWER_DOMAIN_PIPE(pipe);
724 if (!intel_display_power_get_if_enabled(dev_priv,
725 power_domain)) {
726 seq_printf(m, "Pipe %c power disabled\n",
727 pipe_name(pipe));
728 continue;
729 }
730
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300731 seq_printf(m, "Pipe %c stat:\t%08x\n",
732 pipe_name(pipe),
733 I915_READ(PIPESTAT(pipe)));
734
Chris Wilson9c870d02016-10-24 13:42:15 +0100735 intel_display_power_put(dev_priv, power_domain);
736 }
737
738 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300739 seq_printf(m, "Port hotplug:\t%08x\n",
740 I915_READ(PORT_HOTPLUG_EN));
741 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
742 I915_READ(VLV_DPFLIPSTAT));
743 seq_printf(m, "DPINVGTT:\t%08x\n",
744 I915_READ(DPINVGTT));
Chris Wilson9c870d02016-10-24 13:42:15 +0100745 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300746
747 for (i = 0; i < 4; i++) {
748 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
749 i, I915_READ(GEN8_GT_IMR(i)));
750 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
751 i, I915_READ(GEN8_GT_IIR(i)));
752 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
753 i, I915_READ(GEN8_GT_IER(i)));
754 }
755
756 seq_printf(m, "PCU interrupt mask:\t%08x\n",
757 I915_READ(GEN8_PCU_IMR));
758 seq_printf(m, "PCU interrupt identity:\t%08x\n",
759 I915_READ(GEN8_PCU_IIR));
760 seq_printf(m, "PCU interrupt enable:\t%08x\n",
761 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200762 } else if (INTEL_GEN(dev_priv) >= 11) {
763 seq_printf(m, "Master Interrupt Control: %08x\n",
764 I915_READ(GEN11_GFX_MSTR_IRQ));
765
766 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
767 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
768 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
769 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
770 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
771 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
772 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
773 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
774 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
775 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
776 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
777 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
778
779 seq_printf(m, "Display Interrupt Control:\t%08x\n",
780 I915_READ(GEN11_DISPLAY_INT_CTL));
781
782 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300783 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700784 seq_printf(m, "Master Interrupt Control:\t%08x\n",
785 I915_READ(GEN8_MASTER_IRQ));
786
787 for (i = 0; i < 4; i++) {
788 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
789 i, I915_READ(GEN8_GT_IMR(i)));
790 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
791 i, I915_READ(GEN8_GT_IIR(i)));
792 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
793 i, I915_READ(GEN8_GT_IER(i)));
794 }
795
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200796 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300797 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700798 seq_printf(m, "Display IER:\t%08x\n",
799 I915_READ(VLV_IER));
800 seq_printf(m, "Display IIR:\t%08x\n",
801 I915_READ(VLV_IIR));
802 seq_printf(m, "Display IIR_RW:\t%08x\n",
803 I915_READ(VLV_IIR_RW));
804 seq_printf(m, "Display IMR:\t%08x\n",
805 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000806 for_each_pipe(dev_priv, pipe) {
807 enum intel_display_power_domain power_domain;
808
809 power_domain = POWER_DOMAIN_PIPE(pipe);
810 if (!intel_display_power_get_if_enabled(dev_priv,
811 power_domain)) {
812 seq_printf(m, "Pipe %c power disabled\n",
813 pipe_name(pipe));
814 continue;
815 }
816
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700817 seq_printf(m, "Pipe %c stat:\t%08x\n",
818 pipe_name(pipe),
819 I915_READ(PIPESTAT(pipe)));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000820 intel_display_power_put(dev_priv, power_domain);
821 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700822
823 seq_printf(m, "Master IER:\t%08x\n",
824 I915_READ(VLV_MASTER_IER));
825
826 seq_printf(m, "Render IER:\t%08x\n",
827 I915_READ(GTIER));
828 seq_printf(m, "Render IIR:\t%08x\n",
829 I915_READ(GTIIR));
830 seq_printf(m, "Render IMR:\t%08x\n",
831 I915_READ(GTIMR));
832
833 seq_printf(m, "PM IER:\t\t%08x\n",
834 I915_READ(GEN6_PMIER));
835 seq_printf(m, "PM IIR:\t\t%08x\n",
836 I915_READ(GEN6_PMIIR));
837 seq_printf(m, "PM IMR:\t\t%08x\n",
838 I915_READ(GEN6_PMIMR));
839
840 seq_printf(m, "Port hotplug:\t%08x\n",
841 I915_READ(PORT_HOTPLUG_EN));
842 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
843 I915_READ(VLV_DPFLIPSTAT));
844 seq_printf(m, "DPINVGTT:\t%08x\n",
845 I915_READ(DPINVGTT));
846
David Weinehall36cdd012016-08-22 13:59:31 +0300847 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800848 seq_printf(m, "Interrupt enable: %08x\n",
849 I915_READ(IER));
850 seq_printf(m, "Interrupt identity: %08x\n",
851 I915_READ(IIR));
852 seq_printf(m, "Interrupt mask: %08x\n",
853 I915_READ(IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100854 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800855 seq_printf(m, "Pipe %c stat: %08x\n",
856 pipe_name(pipe),
857 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800858 } else {
859 seq_printf(m, "North Display Interrupt enable: %08x\n",
860 I915_READ(DEIER));
861 seq_printf(m, "North Display Interrupt identity: %08x\n",
862 I915_READ(DEIIR));
863 seq_printf(m, "North Display Interrupt mask: %08x\n",
864 I915_READ(DEIMR));
865 seq_printf(m, "South Display Interrupt enable: %08x\n",
866 I915_READ(SDEIER));
867 seq_printf(m, "South Display Interrupt identity: %08x\n",
868 I915_READ(SDEIIR));
869 seq_printf(m, "South Display Interrupt mask: %08x\n",
870 I915_READ(SDEIMR));
871 seq_printf(m, "Graphics Interrupt enable: %08x\n",
872 I915_READ(GTIER));
873 seq_printf(m, "Graphics Interrupt identity: %08x\n",
874 I915_READ(GTIIR));
875 seq_printf(m, "Graphics Interrupt mask: %08x\n",
876 I915_READ(GTIMR));
877 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200878
879 if (INTEL_GEN(dev_priv) >= 11) {
880 seq_printf(m, "RCS Intr Mask:\t %08x\n",
881 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
882 seq_printf(m, "BCS Intr Mask:\t %08x\n",
883 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
884 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
885 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
886 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
887 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
888 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
889 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
890 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
891 I915_READ(GEN11_GUC_SG_INTR_MASK));
892 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
893 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
894 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
895 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
896 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
897 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
898
899 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsond5acadf2017-12-09 10:44:18 +0000900 for_each_engine(engine, dev_priv, id) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100901 seq_printf(m,
902 "Graphics Interrupt mask (%s): %08x\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000903 engine->name, I915_READ_IMR(engine));
Chris Wilson9862e602011-01-04 22:22:17 +0000904 }
Chris Wilson9862e602011-01-04 22:22:17 +0000905 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200906
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200907 intel_runtime_pm_put(dev_priv);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100908
Ben Gamari20172632009-02-17 20:08:50 -0500909 return 0;
910}
911
Chris Wilsona6172a82009-02-11 14:26:38 +0000912static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
913{
David Weinehall36cdd012016-08-22 13:59:31 +0300914 struct drm_i915_private *dev_priv = node_to_i915(m->private);
915 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100916 int i, ret;
917
918 ret = mutex_lock_interruptible(&dev->struct_mutex);
919 if (ret)
920 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000921
Chris Wilsona6172a82009-02-11 14:26:38 +0000922 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
923 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100924 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000925
Chris Wilson6c085a72012-08-20 11:40:46 +0200926 seq_printf(m, "Fence %d, pin count = %d, object = ",
927 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100928 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100929 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100930 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100931 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100932 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000933 }
934
Chris Wilson05394f32010-11-08 19:18:58 +0000935 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000936 return 0;
937}
938
Chris Wilson98a2f412016-10-12 10:05:18 +0100939#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000940static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
941 size_t count, loff_t *pos)
942{
943 struct i915_gpu_state *error = file->private_data;
944 struct drm_i915_error_state_buf str;
945 ssize_t ret;
946 loff_t tmp;
947
948 if (!error)
949 return 0;
950
951 ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
952 if (ret)
953 return ret;
954
955 ret = i915_error_state_to_str(&str, error);
956 if (ret)
957 goto out;
958
959 tmp = 0;
960 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
961 if (ret < 0)
962 goto out;
963
964 *pos = str.start + ret;
965out:
966 i915_error_state_buf_release(&str);
967 return ret;
968}
969
970static int gpu_state_release(struct inode *inode, struct file *file)
971{
972 i915_gpu_state_put(file->private_data);
973 return 0;
974}
975
976static int i915_gpu_info_open(struct inode *inode, struct file *file)
977{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100978 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000979 struct i915_gpu_state *gpu;
980
Chris Wilson090e5fe2017-03-28 14:14:07 +0100981 intel_runtime_pm_get(i915);
982 gpu = i915_capture_gpu_state(i915);
983 intel_runtime_pm_put(i915);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000984 if (!gpu)
985 return -ENOMEM;
986
987 file->private_data = gpu;
988 return 0;
989}
990
991static const struct file_operations i915_gpu_info_fops = {
992 .owner = THIS_MODULE,
993 .open = i915_gpu_info_open,
994 .read = gpu_state_read,
995 .llseek = default_llseek,
996 .release = gpu_state_release,
997};
Chris Wilson98a2f412016-10-12 10:05:18 +0100998
Daniel Vetterd5442302012-04-27 15:17:40 +0200999static ssize_t
1000i915_error_state_write(struct file *filp,
1001 const char __user *ubuf,
1002 size_t cnt,
1003 loff_t *ppos)
1004{
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001005 struct i915_gpu_state *error = filp->private_data;
1006
1007 if (!error)
1008 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001009
1010 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001011 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +02001012
1013 return cnt;
1014}
1015
1016static int i915_error_state_open(struct inode *inode, struct file *file)
1017{
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001018 file->private_data = i915_first_error_state(inode->i_private);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001019 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001020}
1021
Daniel Vetterd5442302012-04-27 15:17:40 +02001022static const struct file_operations i915_error_state_fops = {
1023 .owner = THIS_MODULE,
1024 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001025 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +02001026 .write = i915_error_state_write,
1027 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001028 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +02001029};
Chris Wilson98a2f412016-10-12 10:05:18 +01001030#endif
1031
Kees Cook647416f2013-03-10 14:10:06 -07001032static int
Kees Cook647416f2013-03-10 14:10:06 -07001033i915_next_seqno_set(void *data, u64 val)
Mika Kuoppala40633212012-12-04 15:12:00 +02001034{
David Weinehall36cdd012016-08-22 13:59:31 +03001035 struct drm_i915_private *dev_priv = data;
1036 struct drm_device *dev = &dev_priv->drm;
Mika Kuoppala40633212012-12-04 15:12:00 +02001037 int ret;
1038
Mika Kuoppala40633212012-12-04 15:12:00 +02001039 ret = mutex_lock_interruptible(&dev->struct_mutex);
1040 if (ret)
1041 return ret;
1042
Chris Wilson65c475c2018-01-02 15:12:31 +00001043 intel_runtime_pm_get(dev_priv);
Chris Wilson73cb9702016-10-28 13:58:46 +01001044 ret = i915_gem_set_global_seqno(dev, val);
Chris Wilson65c475c2018-01-02 15:12:31 +00001045 intel_runtime_pm_put(dev_priv);
1046
Mika Kuoppala40633212012-12-04 15:12:00 +02001047 mutex_unlock(&dev->struct_mutex);
1048
Kees Cook647416f2013-03-10 14:10:06 -07001049 return ret;
Mika Kuoppala40633212012-12-04 15:12:00 +02001050}
1051
Kees Cook647416f2013-03-10 14:10:06 -07001052DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
Chris Wilson9b6586a2017-02-23 07:44:08 +00001053 NULL, i915_next_seqno_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03001054 "0x%llx\n");
Mika Kuoppala40633212012-12-04 15:12:00 +02001055
Deepak Sadb4bd12014-03-31 11:30:02 +05301056static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001057{
David Weinehall36cdd012016-08-22 13:59:31 +03001058 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001059 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001060 int ret = 0;
1061
1062 intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001063
David Weinehall36cdd012016-08-22 13:59:31 +03001064 if (IS_GEN5(dev_priv)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001065 u16 rgvswctl = I915_READ16(MEMSWCTL);
1066 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1067
1068 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1069 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1070 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1071 MEMSTAT_VID_SHIFT);
1072 seq_printf(m, "Current P-state: %d\n",
1073 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001074 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001075 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -08001076
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001077 mutex_lock(&dev_priv->pcu_lock);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001078
1079 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1080 seq_printf(m, "Video Turbo Mode: %s\n",
1081 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1082 seq_printf(m, "HW control enabled: %s\n",
1083 yesno(rpmodectl & GEN6_RP_ENABLE));
1084 seq_printf(m, "SW control enabled: %s\n",
1085 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1086 GEN6_RP_MEDIA_SW_MODE));
1087
Wayne Boyer666a4532015-12-09 12:29:35 -08001088 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1089 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1090 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1091
1092 seq_printf(m, "actual GPU freq: %d MHz\n",
1093 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1094
1095 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001096 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001097
1098 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001099 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001100
1101 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001102 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001103
1104 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001105 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001106
1107 seq_printf(m,
1108 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001109 intel_gpu_freq(dev_priv, rps->efficient_freq));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001110 mutex_unlock(&dev_priv->pcu_lock);
David Weinehall36cdd012016-08-22 13:59:31 +03001111 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001112 u32 rp_state_limits;
1113 u32 gt_perf_status;
1114 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001115 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001116 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001117 u32 rpupei, rpcurup, rpprevup;
1118 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001119 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001120 int max_freq;
1121
Bob Paauwe35040562015-06-25 14:54:07 -07001122 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001123 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001124 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1125 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1126 } else {
1127 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1128 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1129 }
1130
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001131 /* RPSTAT1 is in the GT power well */
Mika Kuoppala59bad942015-01-16 11:34:40 +02001132 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001133
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001134 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001135 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +05301136 reqf >>= 23;
1137 else {
1138 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001139 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301140 reqf >>= 24;
1141 else
1142 reqf >>= 25;
1143 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001144 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001145
Chris Wilson0d8f9492014-03-27 09:06:14 +00001146 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1147 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1148 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1149
Jesse Barnesccab5c82011-01-18 15:49:25 -08001150 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301151 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1152 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1153 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1154 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1155 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1156 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00001157 cagf = intel_gpu_freq(dev_priv,
1158 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -08001159
Mika Kuoppala59bad942015-01-16 11:34:40 +02001160 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001161
David Weinehall36cdd012016-08-22 13:59:31 +03001162 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001163 pm_ier = I915_READ(GEN6_PMIER);
1164 pm_imr = I915_READ(GEN6_PMIMR);
1165 pm_isr = I915_READ(GEN6_PMISR);
1166 pm_iir = I915_READ(GEN6_PMIIR);
1167 pm_mask = I915_READ(GEN6_PMINTRMSK);
1168 } else {
1169 pm_ier = I915_READ(GEN8_GT_IER(2));
1170 pm_imr = I915_READ(GEN8_GT_IMR(2));
1171 pm_isr = I915_READ(GEN8_GT_ISR(2));
1172 pm_iir = I915_READ(GEN8_GT_IIR(2));
1173 pm_mask = I915_READ(GEN6_PMINTRMSK);
1174 }
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001175 seq_printf(m, "Video Turbo Mode: %s\n",
1176 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1177 seq_printf(m, "HW control enabled: %s\n",
1178 yesno(rpmodectl & GEN6_RP_ENABLE));
1179 seq_printf(m, "SW control enabled: %s\n",
1180 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1181 GEN6_RP_MEDIA_SW_MODE));
Chris Wilson0d8f9492014-03-27 09:06:14 +00001182 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001183 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05301184 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001185 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001186 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001187 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001188 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001189 seq_printf(m, "Render p-state VID: %d\n",
1190 gt_perf_status & 0xff);
1191 seq_printf(m, "Render p-state limit: %d\n",
1192 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001193 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1194 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1195 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1196 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001197 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001198 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301199 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1200 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1201 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1202 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1203 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1204 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001205 seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +01001206
Akash Goeld6cda9c2016-04-23 00:05:46 +05301207 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1208 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1209 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1210 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1211 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1212 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001213 seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001214
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001215 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001216 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001217 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001218 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001219 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001220 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001221
1222 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001223 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001224 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001225 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001226 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001227
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001228 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001229 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001230 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001231 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001232 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001233 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001234 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001235 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001236
Chris Wilsond86ed342015-04-27 13:41:19 +01001237 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001238 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001239 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001240 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001241 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001242 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001243 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001244 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001245 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001246 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001247 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001248 seq_printf(m,
1249 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001250 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001251 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001252 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001253 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001254
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001255 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +03001256 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1257 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1258
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001259 intel_runtime_pm_put(dev_priv);
1260 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001261}
1262
Ben Widawskyd6369512016-09-20 16:54:32 +03001263static void i915_instdone_info(struct drm_i915_private *dev_priv,
1264 struct seq_file *m,
1265 struct intel_instdone *instdone)
1266{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001267 int slice;
1268 int subslice;
1269
Ben Widawskyd6369512016-09-20 16:54:32 +03001270 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1271 instdone->instdone);
1272
1273 if (INTEL_GEN(dev_priv) <= 3)
1274 return;
1275
1276 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1277 instdone->slice_common);
1278
1279 if (INTEL_GEN(dev_priv) <= 6)
1280 return;
1281
Ben Widawskyf9e61372016-09-20 16:54:33 +03001282 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1283 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1284 slice, subslice, instdone->sampler[slice][subslice]);
1285
1286 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1287 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1288 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001289}
1290
Chris Wilsonf6544492015-01-26 18:03:04 +02001291static int i915_hangcheck_info(struct seq_file *m, void *unused)
1292{
David Weinehall36cdd012016-08-22 13:59:31 +03001293 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001294 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001295 u64 acthd[I915_NUM_ENGINES];
1296 u32 seqno[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001297 struct intel_instdone instdone;
Dave Gordonc3232b12016-03-23 18:19:53 +00001298 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001299
Chris Wilson8af29b02016-09-09 14:11:47 +01001300 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001301 seq_puts(m, "Wedged\n");
1302 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1303 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1304 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1305 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001306 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001307 seq_puts(m, "Waiter holding struct mutex\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001308 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001309 seq_puts(m, "struct_mutex blocked for reset\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001310
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001311 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001312 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001313 return 0;
1314 }
1315
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001316 intel_runtime_pm_get(dev_priv);
1317
Akash Goel3b3f1652016-10-13 22:44:48 +05301318 for_each_engine(engine, dev_priv, id) {
Chris Wilson7e37f882016-08-02 22:50:21 +01001319 acthd[id] = intel_engine_get_active_head(engine);
Chris Wilson1b7744e2016-07-01 17:23:17 +01001320 seqno[id] = intel_engine_get_seqno(engine);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001321 }
1322
Akash Goel3b3f1652016-10-13 22:44:48 +05301323 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001324
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001325 intel_runtime_pm_put(dev_priv);
1326
Chris Wilson8352aea2017-03-03 09:00:56 +00001327 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1328 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsonf6544492015-01-26 18:03:04 +02001329 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1330 jiffies));
Chris Wilson8352aea2017-03-03 09:00:56 +00001331 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1332 seq_puts(m, "Hangcheck active, work pending\n");
1333 else
1334 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001335
Chris Wilsonf73b5672017-03-02 15:03:56 +00001336 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1337
Akash Goel3b3f1652016-10-13 22:44:48 +05301338 for_each_engine(engine, dev_priv, id) {
Chris Wilson33f53712016-10-04 21:11:32 +01001339 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1340 struct rb_node *rb;
1341
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001342 seq_printf(m, "%s:\n", engine->name);
Chris Wilson52d7f162018-04-30 14:15:00 +01001343 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
Chris Wilsoncb399ea2016-11-01 10:03:16 +00001344 engine->hangcheck.seqno, seqno[id],
Chris Wilson52d7f162018-04-30 14:15:00 +01001345 intel_engine_last_submit(engine));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001346 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
Chris Wilson83348ba2016-08-09 17:47:51 +01001347 yesno(intel_engine_has_waiter(engine)),
1348 yesno(test_bit(engine->id,
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001349 &dev_priv->gpu_error.missed_irq_rings)),
1350 yesno(engine->hangcheck.stalled));
1351
Chris Wilson61d3dc72017-03-03 19:08:24 +00001352 spin_lock_irq(&b->rb_lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001353 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
Geliang Tangf802cf72016-12-19 22:43:49 +08001354 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
Chris Wilson33f53712016-10-04 21:11:32 +01001355
1356 seq_printf(m, "\t%s [%d] waiting for %x\n",
1357 w->tsk->comm, w->tsk->pid, w->seqno);
1358 }
Chris Wilson61d3dc72017-03-03 19:08:24 +00001359 spin_unlock_irq(&b->rb_lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001360
Chris Wilsonf6544492015-01-26 18:03:04 +02001361 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001362 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001363 (long long)acthd[id]);
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001364 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1365 hangcheck_action_to_str(engine->hangcheck.action),
1366 engine->hangcheck.action,
1367 jiffies_to_msecs(jiffies -
1368 engine->hangcheck.action_timestamp));
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001369
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001370 if (engine->id == RCS) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001371 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001372
Ben Widawskyd6369512016-09-20 16:54:32 +03001373 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001374
Ben Widawskyd6369512016-09-20 16:54:32 +03001375 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001376
Ben Widawskyd6369512016-09-20 16:54:32 +03001377 i915_instdone_info(dev_priv, m,
1378 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001379 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001380 }
1381
1382 return 0;
1383}
1384
Michel Thierry061d06a2017-06-20 10:57:49 +01001385static int i915_reset_info(struct seq_file *m, void *unused)
1386{
1387 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1388 struct i915_gpu_error *error = &dev_priv->gpu_error;
1389 struct intel_engine_cs *engine;
1390 enum intel_engine_id id;
1391
1392 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1393
1394 for_each_engine(engine, dev_priv, id) {
1395 seq_printf(m, "%s = %u\n", engine->name,
1396 i915_reset_engine_count(error, engine));
1397 }
1398
1399 return 0;
1400}
1401
Ben Widawsky4d855292011-12-12 19:34:16 -08001402static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001403{
David Weinehall36cdd012016-08-22 13:59:31 +03001404 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001405 u32 rgvmodectl, rstdbyctl;
1406 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001407
Ben Widawsky616fdb52011-10-05 11:44:54 -07001408 rgvmodectl = I915_READ(MEMMODECTL);
1409 rstdbyctl = I915_READ(RSTDBYCTL);
1410 crstandvid = I915_READ16(CRSTANDVID);
1411
Jani Nikula742f4912015-09-03 11:16:09 +03001412 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001413 seq_printf(m, "Boost freq: %d\n",
1414 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1415 MEMMODE_BOOST_FREQ_SHIFT);
1416 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001417 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001418 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001419 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001420 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001421 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001422 seq_printf(m, "Starting frequency: P%d\n",
1423 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001424 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001425 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001426 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1427 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1428 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1429 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001430 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001431 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001432 switch (rstdbyctl & RSX_STATUS_MASK) {
1433 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001434 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001435 break;
1436 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001437 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001438 break;
1439 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001440 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001441 break;
1442 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001443 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001444 break;
1445 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001446 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001447 break;
1448 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001449 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001450 break;
1451 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001452 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001453 break;
1454 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001455
1456 return 0;
1457}
1458
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001459static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001460{
Chris Wilson233ebf52017-03-23 10:19:44 +00001461 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001462 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001463 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001464
Chris Wilsond7a133d2017-09-07 14:44:41 +01001465 seq_printf(m, "user.bypass_count = %u\n",
1466 i915->uncore.user_forcewake.count);
1467
Chris Wilson233ebf52017-03-23 10:19:44 +00001468 for_each_fw_domain(fw_domain, i915, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001469 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001470 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001471 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001472
1473 return 0;
1474}
1475
Mika Kuoppala13628772017-03-15 17:43:02 +02001476static void print_rc6_res(struct seq_file *m,
1477 const char *title,
1478 const i915_reg_t reg)
1479{
1480 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1481
1482 seq_printf(m, "%s %u (%llu us)\n",
1483 title, I915_READ(reg),
1484 intel_rc6_residency_us(dev_priv, reg));
1485}
1486
Deepak S669ab5a2014-01-10 15:18:26 +05301487static int vlv_drpc_info(struct seq_file *m)
1488{
David Weinehall36cdd012016-08-22 13:59:31 +03001489 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001490 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301491
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001492 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301493 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1494
Deepak S669ab5a2014-01-10 15:18:26 +05301495 seq_printf(m, "RC6 Enabled: %s\n",
1496 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1497 GEN6_RC_CTL_EI_MODE(1))));
1498 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001499 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301500 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001501 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301502
Mika Kuoppala13628772017-03-15 17:43:02 +02001503 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1504 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001505
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001506 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301507}
1508
Ben Widawsky4d855292011-12-12 19:34:16 -08001509static int gen6_drpc_info(struct seq_file *m)
1510{
David Weinehall36cdd012016-08-22 13:59:31 +03001511 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001512 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301513 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001514
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001515 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001516 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001517
Ben Widawsky4d855292011-12-12 19:34:16 -08001518 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001519 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301520 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1521 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1522 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001523
Imre Deak51cc9ad2018-02-08 19:41:02 +02001524 if (INTEL_GEN(dev_priv) <= 7) {
1525 mutex_lock(&dev_priv->pcu_lock);
1526 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1527 &rc6vids);
1528 mutex_unlock(&dev_priv->pcu_lock);
1529 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001530
Eric Anholtfff24e22012-01-23 16:14:05 -08001531 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001532 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1533 seq_printf(m, "RC6 Enabled: %s\n",
1534 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001535 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301536 seq_printf(m, "Render Well Gating Enabled: %s\n",
1537 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1538 seq_printf(m, "Media Well Gating Enabled: %s\n",
1539 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1540 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001541 seq_printf(m, "Deep RC6 Enabled: %s\n",
1542 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1543 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1544 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001545 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001546 switch (gt_core_status & GEN6_RCn_MASK) {
1547 case GEN6_RC0:
1548 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001549 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001550 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001551 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001552 break;
1553 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001554 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001555 break;
1556 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001557 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001558 break;
1559 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001560 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001561 break;
1562 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001563 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001564 break;
1565 }
1566
1567 seq_printf(m, "Core Power Down: %s\n",
1568 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001569 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301570 seq_printf(m, "Render Power Well: %s\n",
1571 (gen9_powergate_status &
1572 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1573 seq_printf(m, "Media Power Well: %s\n",
1574 (gen9_powergate_status &
1575 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1576 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001577
1578 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001579 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1580 GEN6_GT_GFX_RC6_LOCKED);
1581 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1582 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1583 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001584
Imre Deak51cc9ad2018-02-08 19:41:02 +02001585 if (INTEL_GEN(dev_priv) <= 7) {
1586 seq_printf(m, "RC6 voltage: %dmV\n",
1587 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1588 seq_printf(m, "RC6+ voltage: %dmV\n",
1589 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1590 seq_printf(m, "RC6++ voltage: %dmV\n",
1591 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1592 }
1593
Akash Goelf2dd7572016-06-27 20:10:01 +05301594 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001595}
1596
1597static int i915_drpc_info(struct seq_file *m, void *unused)
1598{
David Weinehall36cdd012016-08-22 13:59:31 +03001599 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001600 int err;
1601
1602 intel_runtime_pm_get(dev_priv);
Ben Widawsky4d855292011-12-12 19:34:16 -08001603
David Weinehall36cdd012016-08-22 13:59:31 +03001604 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001605 err = vlv_drpc_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +03001606 else if (INTEL_GEN(dev_priv) >= 6)
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001607 err = gen6_drpc_info(m);
Ben Widawsky4d855292011-12-12 19:34:16 -08001608 else
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001609 err = ironlake_drpc_info(m);
1610
1611 intel_runtime_pm_put(dev_priv);
1612
1613 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001614}
1615
Daniel Vetter9a851782015-06-18 10:30:22 +02001616static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1617{
David Weinehall36cdd012016-08-22 13:59:31 +03001618 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001619
1620 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1621 dev_priv->fb_tracking.busy_bits);
1622
1623 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1624 dev_priv->fb_tracking.flip_bits);
1625
1626 return 0;
1627}
1628
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001629static int i915_fbc_status(struct seq_file *m, void *unused)
1630{
David Weinehall36cdd012016-08-22 13:59:31 +03001631 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001632 struct intel_fbc *fbc = &dev_priv->fbc;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001633
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001634 if (!HAS_FBC(dev_priv))
1635 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001636
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001637 intel_runtime_pm_get(dev_priv);
Chris Wilson31388722017-12-20 20:58:48 +00001638 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001639
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001640 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001641 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001642 else
Chris Wilson31388722017-12-20 20:58:48 +00001643 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1644
1645 if (fbc->work.scheduled)
Dhinakaran Pandiyan1b29b7c2018-02-02 21:12:55 -08001646 seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
Chris Wilson31388722017-12-20 20:58:48 +00001647 fbc->work.scheduled_vblank,
1648 drm_crtc_vblank_count(&fbc->crtc->base));
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001649
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001650 if (intel_fbc_is_active(dev_priv)) {
1651 u32 mask;
1652
1653 if (INTEL_GEN(dev_priv) >= 8)
1654 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1655 else if (INTEL_GEN(dev_priv) >= 7)
1656 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1657 else if (INTEL_GEN(dev_priv) >= 5)
1658 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1659 else if (IS_G4X(dev_priv))
1660 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1661 else
1662 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1663 FBC_STAT_COMPRESSED);
1664
1665 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001666 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001667
Chris Wilson31388722017-12-20 20:58:48 +00001668 mutex_unlock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001669 intel_runtime_pm_put(dev_priv);
1670
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001671 return 0;
1672}
1673
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001674static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001675{
David Weinehall36cdd012016-08-22 13:59:31 +03001676 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001677
David Weinehall36cdd012016-08-22 13:59:31 +03001678 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001679 return -ENODEV;
1680
Rodrigo Vivida46f932014-08-01 02:04:45 -07001681 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001682
1683 return 0;
1684}
1685
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001686static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001687{
David Weinehall36cdd012016-08-22 13:59:31 +03001688 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001689 u32 reg;
1690
David Weinehall36cdd012016-08-22 13:59:31 +03001691 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001692 return -ENODEV;
1693
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001694 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001695
1696 reg = I915_READ(ILK_DPFC_CONTROL);
1697 dev_priv->fbc.false_color = val;
1698
1699 I915_WRITE(ILK_DPFC_CONTROL, val ?
1700 (reg | FBC_CTL_FALSE_COLOR) :
1701 (reg & ~FBC_CTL_FALSE_COLOR));
1702
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001703 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001704 return 0;
1705}
1706
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001707DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1708 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001709 "%llu\n");
1710
Paulo Zanoni92d44622013-05-31 16:33:24 -03001711static int i915_ips_status(struct seq_file *m, void *unused)
1712{
David Weinehall36cdd012016-08-22 13:59:31 +03001713 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Paulo Zanoni92d44622013-05-31 16:33:24 -03001714
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001715 if (!HAS_IPS(dev_priv))
1716 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001717
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001718 intel_runtime_pm_get(dev_priv);
1719
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001720 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001721 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001722
David Weinehall36cdd012016-08-22 13:59:31 +03001723 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001724 seq_puts(m, "Currently: unknown\n");
1725 } else {
1726 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1727 seq_puts(m, "Currently: enabled\n");
1728 else
1729 seq_puts(m, "Currently: disabled\n");
1730 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001731
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001732 intel_runtime_pm_put(dev_priv);
1733
Paulo Zanoni92d44622013-05-31 16:33:24 -03001734 return 0;
1735}
1736
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001737static int i915_sr_status(struct seq_file *m, void *unused)
1738{
David Weinehall36cdd012016-08-22 13:59:31 +03001739 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001740 bool sr_enabled = false;
1741
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001742 intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01001743 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001744
Chris Wilson7342a722017-03-09 14:20:49 +00001745 if (INTEL_GEN(dev_priv) >= 9)
1746 /* no global SR status; inspect per-plane WM */;
1747 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001748 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001749 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001750 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001751 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001752 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001753 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001754 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001755 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001756 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001757 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001758
Chris Wilson9c870d02016-10-24 13:42:15 +01001759 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001760 intel_runtime_pm_put(dev_priv);
1761
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001762 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001763
1764 return 0;
1765}
1766
Jesse Barnes7648fa92010-05-20 14:28:11 -07001767static int i915_emon_status(struct seq_file *m, void *unused)
1768{
David Weinehall36cdd012016-08-22 13:59:31 +03001769 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1770 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001771 unsigned long temp, chipset, gfx;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001772 int ret;
1773
David Weinehall36cdd012016-08-22 13:59:31 +03001774 if (!IS_GEN5(dev_priv))
Chris Wilson582be6b2012-04-30 19:35:02 +01001775 return -ENODEV;
1776
Chris Wilsonde227ef2010-07-03 07:58:38 +01001777 ret = mutex_lock_interruptible(&dev->struct_mutex);
1778 if (ret)
1779 return ret;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001780
1781 temp = i915_mch_val(dev_priv);
1782 chipset = i915_chipset_val(dev_priv);
1783 gfx = i915_gfx_val(dev_priv);
Chris Wilsonde227ef2010-07-03 07:58:38 +01001784 mutex_unlock(&dev->struct_mutex);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001785
1786 seq_printf(m, "GMCH temp: %ld\n", temp);
1787 seq_printf(m, "Chipset power: %ld\n", chipset);
1788 seq_printf(m, "GFX power: %ld\n", gfx);
1789 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1790
1791 return 0;
1792}
1793
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001794static int i915_ring_freq_table(struct seq_file *m, void *unused)
1795{
David Weinehall36cdd012016-08-22 13:59:31 +03001796 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001797 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301798 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001799 int gpu_freq, ia_freq;
1800 int ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001801
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001802 if (!HAS_LLC(dev_priv))
1803 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001804
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001805 intel_runtime_pm_get(dev_priv);
1806
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001807 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001808 if (ret)
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001809 goto out;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001810
Chris Wilsond586b5f2018-03-08 14:26:48 +00001811 min_gpu_freq = rps->min_freq;
1812 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001813 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301814 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001815 min_gpu_freq /= GEN9_FREQ_SCALER;
1816 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301817 }
1818
Damien Lespiau267f0c92013-06-24 22:59:48 +01001819 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001820
Akash Goelf936ec32015-06-29 14:50:22 +05301821 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001822 ia_freq = gpu_freq;
1823 sandybridge_pcode_read(dev_priv,
1824 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1825 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001826 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301827 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001828 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001829 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001830 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001831 ((ia_freq >> 0) & 0xff) * 100,
1832 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001833 }
1834
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001835 mutex_unlock(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001836
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001837out:
1838 intel_runtime_pm_put(dev_priv);
1839 return ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001840}
1841
Chris Wilson44834a62010-08-19 16:09:23 +01001842static int i915_opregion(struct seq_file *m, void *unused)
1843{
David Weinehall36cdd012016-08-22 13:59:31 +03001844 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1845 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001846 struct intel_opregion *opregion = &dev_priv->opregion;
1847 int ret;
1848
1849 ret = mutex_lock_interruptible(&dev->struct_mutex);
1850 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001851 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001852
Jani Nikula2455a8e2015-12-14 12:50:53 +02001853 if (opregion->header)
1854 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001855
1856 mutex_unlock(&dev->struct_mutex);
1857
Daniel Vetter0d38f002012-04-21 22:49:10 +02001858out:
Chris Wilson44834a62010-08-19 16:09:23 +01001859 return 0;
1860}
1861
Jani Nikulaada8f952015-12-15 13:17:12 +02001862static int i915_vbt(struct seq_file *m, void *unused)
1863{
David Weinehall36cdd012016-08-22 13:59:31 +03001864 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001865
1866 if (opregion->vbt)
1867 seq_write(m, opregion->vbt, opregion->vbt_size);
1868
1869 return 0;
1870}
1871
Chris Wilson37811fc2010-08-25 22:45:57 +01001872static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1873{
David Weinehall36cdd012016-08-22 13:59:31 +03001874 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1875 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301876 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001877 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001878 int ret;
1879
1880 ret = mutex_lock_interruptible(&dev->struct_mutex);
1881 if (ret)
1882 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001883
Daniel Vetter06957262015-08-10 13:34:08 +02001884#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001885 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001886 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001887
Chris Wilson25bcce92016-07-02 15:36:00 +01001888 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1889 fbdev_fb->base.width,
1890 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001891 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001892 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001893 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001894 drm_framebuffer_read_refcount(&fbdev_fb->base));
1895 describe_obj(m, fbdev_fb->obj);
1896 seq_putc(m, '\n');
1897 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001898#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001899
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001900 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001901 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301902 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1903 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001904 continue;
1905
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001906 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001907 fb->base.width,
1908 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001909 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001910 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001911 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001912 drm_framebuffer_read_refcount(&fb->base));
Chris Wilson05394f32010-11-08 19:18:58 +00001913 describe_obj(m, fb->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +01001914 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001915 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001916 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001917 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001918
1919 return 0;
1920}
1921
Chris Wilson7e37f882016-08-02 22:50:21 +01001922static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001923{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001924 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1925 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001926}
1927
Ben Widawskye76d3632011-03-19 18:14:29 -07001928static int i915_context_status(struct seq_file *m, void *unused)
1929{
David Weinehall36cdd012016-08-22 13:59:31 +03001930 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1931 struct drm_device *dev = &dev_priv->drm;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001932 struct intel_engine_cs *engine;
Chris Wilsone2efd132016-05-24 14:53:34 +01001933 struct i915_gem_context *ctx;
Akash Goel3b3f1652016-10-13 22:44:48 +05301934 enum intel_engine_id id;
Dave Gordonc3232b12016-03-23 18:19:53 +00001935 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001936
Daniel Vetterf3d28872014-05-29 23:23:08 +02001937 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001938 if (ret)
1939 return ret;
1940
Chris Wilson829a0af2017-06-20 12:05:45 +01001941 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson5d1808e2016-04-28 09:56:51 +01001942 seq_printf(m, "HW context %u ", ctx->hw_id);
Chris Wilsonc84455b2016-08-15 10:49:08 +01001943 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001944 struct task_struct *task;
1945
Chris Wilsonc84455b2016-08-15 10:49:08 +01001946 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001947 if (task) {
1948 seq_printf(m, "(%s [%d]) ",
1949 task->comm, task->pid);
1950 put_task_struct(task);
1951 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001952 } else if (IS_ERR(ctx->file_priv)) {
1953 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001954 } else {
1955 seq_puts(m, "(kernel) ");
1956 }
1957
Chris Wilsonbca44d82016-05-24 14:53:41 +01001958 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1959 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001960
Akash Goel3b3f1652016-10-13 22:44:48 +05301961 for_each_engine(engine, dev_priv, id) {
Chris Wilsonbca44d82016-05-24 14:53:41 +01001962 struct intel_context *ce = &ctx->engine[engine->id];
1963
1964 seq_printf(m, "%s: ", engine->name);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001965 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001966 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001967 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001968 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001969 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001970 }
1971
Ben Widawskya33afea2013-09-17 21:12:45 -07001972 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001973 }
1974
Daniel Vetterf3d28872014-05-29 23:23:08 +02001975 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001976
1977 return 0;
1978}
1979
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001980static const char *swizzle_string(unsigned swizzle)
1981{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001982 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001983 case I915_BIT_6_SWIZZLE_NONE:
1984 return "none";
1985 case I915_BIT_6_SWIZZLE_9:
1986 return "bit9";
1987 case I915_BIT_6_SWIZZLE_9_10:
1988 return "bit9/bit10";
1989 case I915_BIT_6_SWIZZLE_9_11:
1990 return "bit9/bit11";
1991 case I915_BIT_6_SWIZZLE_9_10_11:
1992 return "bit9/bit10/bit11";
1993 case I915_BIT_6_SWIZZLE_9_17:
1994 return "bit9/bit17";
1995 case I915_BIT_6_SWIZZLE_9_10_17:
1996 return "bit9/bit10/bit17";
1997 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09001998 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001999 }
2000
2001 return "bug";
2002}
2003
2004static int i915_swizzle_info(struct seq_file *m, void *data)
2005{
David Weinehall36cdd012016-08-22 13:59:31 +03002006 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002007
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002008 intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02002009
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002010 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2011 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2012 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2013 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2014
David Weinehall36cdd012016-08-22 13:59:31 +03002015 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002016 seq_printf(m, "DDC = 0x%08x\n",
2017 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01002018 seq_printf(m, "DDC2 = 0x%08x\n",
2019 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002020 seq_printf(m, "C0DRB3 = 0x%04x\n",
2021 I915_READ16(C0DRB3));
2022 seq_printf(m, "C1DRB3 = 0x%04x\n",
2023 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03002024 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002025 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2026 I915_READ(MAD_DIMM_C0));
2027 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2028 I915_READ(MAD_DIMM_C1));
2029 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2030 I915_READ(MAD_DIMM_C2));
2031 seq_printf(m, "TILECTL = 0x%08x\n",
2032 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03002033 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07002034 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2035 I915_READ(GAMTARBMODE));
2036 else
2037 seq_printf(m, "ARB_MODE = 0x%08x\n",
2038 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002039 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2040 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002041 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01002042
2043 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2044 seq_puts(m, "L-shaped memory detected\n");
2045
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002046 intel_runtime_pm_put(dev_priv);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002047
2048 return 0;
2049}
2050
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002051static int per_file_ctx(int id, void *ptr, void *data)
2052{
Chris Wilsone2efd132016-05-24 14:53:34 +01002053 struct i915_gem_context *ctx = ptr;
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002054 struct seq_file *m = data;
Daniel Vetterae6c4802014-08-06 15:04:53 +02002055 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2056
2057 if (!ppgtt) {
2058 seq_printf(m, " no ppgtt for context %d\n",
2059 ctx->user_handle);
2060 return 0;
2061 }
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002062
Oscar Mateof83d6512014-05-22 14:13:38 +01002063 if (i915_gem_context_is_default(ctx))
2064 seq_puts(m, " default context:\n");
2065 else
Oscar Mateo821d66d2014-07-03 16:28:00 +01002066 seq_printf(m, " context %d:\n", ctx->user_handle);
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002067 ppgtt->debug_dump(ppgtt, m);
2068
2069 return 0;
2070}
2071
David Weinehall36cdd012016-08-22 13:59:31 +03002072static void gen8_ppgtt_info(struct seq_file *m,
2073 struct drm_i915_private *dev_priv)
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002074{
Ben Widawsky77df6772013-11-02 21:07:30 -07002075 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Akash Goel3b3f1652016-10-13 22:44:48 +05302076 struct intel_engine_cs *engine;
2077 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002078 int i;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002079
Ben Widawsky77df6772013-11-02 21:07:30 -07002080 if (!ppgtt)
2081 return;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002082
Akash Goel3b3f1652016-10-13 22:44:48 +05302083 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002084 seq_printf(m, "%s\n", engine->name);
Ben Widawsky77df6772013-11-02 21:07:30 -07002085 for (i = 0; i < 4; i++) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002086 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
Ben Widawsky77df6772013-11-02 21:07:30 -07002087 pdp <<= 32;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002088 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
Ville Syrjäläa2a5b152014-03-31 18:17:16 +03002089 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
Ben Widawsky77df6772013-11-02 21:07:30 -07002090 }
2091 }
2092}
2093
David Weinehall36cdd012016-08-22 13:59:31 +03002094static void gen6_ppgtt_info(struct seq_file *m,
2095 struct drm_i915_private *dev_priv)
Ben Widawsky77df6772013-11-02 21:07:30 -07002096{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002097 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302098 enum intel_engine_id id;
Ben Widawsky77df6772013-11-02 21:07:30 -07002099
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002100 if (IS_GEN6(dev_priv))
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002101 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2102
Akash Goel3b3f1652016-10-13 22:44:48 +05302103 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002104 seq_printf(m, "%s\n", engine->name);
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002105 if (IS_GEN7(dev_priv))
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002106 seq_printf(m, "GFX_MODE: 0x%08x\n",
2107 I915_READ(RING_MODE_GEN7(engine)));
2108 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2109 I915_READ(RING_PP_DIR_BASE(engine)));
2110 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2111 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2112 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2113 I915_READ(RING_PP_DIR_DCLV(engine)));
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002114 }
2115 if (dev_priv->mm.aliasing_ppgtt) {
2116 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2117
Damien Lespiau267f0c92013-06-24 22:59:48 +01002118 seq_puts(m, "aliasing PPGTT:\n");
Mika Kuoppala44159dd2015-06-25 18:35:07 +03002119 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002120
Ben Widawsky87d60b62013-12-06 14:11:29 -08002121 ppgtt->debug_dump(ppgtt, m);
Daniel Vetterae6c4802014-08-06 15:04:53 +02002122 }
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002123
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002124 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
Ben Widawsky77df6772013-11-02 21:07:30 -07002125}
2126
2127static int i915_ppgtt_info(struct seq_file *m, void *data)
2128{
David Weinehall36cdd012016-08-22 13:59:31 +03002129 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2130 struct drm_device *dev = &dev_priv->drm;
Michel Thierryea91e402015-07-29 17:23:57 +01002131 struct drm_file *file;
Chris Wilson637ee292016-08-22 14:28:20 +01002132 int ret;
Ben Widawsky77df6772013-11-02 21:07:30 -07002133
Chris Wilson637ee292016-08-22 14:28:20 +01002134 mutex_lock(&dev->filelist_mutex);
2135 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawsky77df6772013-11-02 21:07:30 -07002136 if (ret)
Chris Wilson637ee292016-08-22 14:28:20 +01002137 goto out_unlock;
2138
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002139 intel_runtime_pm_get(dev_priv);
Ben Widawsky77df6772013-11-02 21:07:30 -07002140
David Weinehall36cdd012016-08-22 13:59:31 +03002141 if (INTEL_GEN(dev_priv) >= 8)
2142 gen8_ppgtt_info(m, dev_priv);
2143 else if (INTEL_GEN(dev_priv) >= 6)
2144 gen6_ppgtt_info(m, dev_priv);
Ben Widawsky77df6772013-11-02 21:07:30 -07002145
Michel Thierryea91e402015-07-29 17:23:57 +01002146 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2147 struct drm_i915_file_private *file_priv = file->driver_priv;
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002148 struct task_struct *task;
Michel Thierryea91e402015-07-29 17:23:57 +01002149
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002150 task = get_pid_task(file->pid, PIDTYPE_PID);
Dan Carpenter06812762015-10-02 18:14:22 +03002151 if (!task) {
2152 ret = -ESRCH;
Chris Wilson637ee292016-08-22 14:28:20 +01002153 goto out_rpm;
Dan Carpenter06812762015-10-02 18:14:22 +03002154 }
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002155 seq_printf(m, "\nproc: %s\n", task->comm);
2156 put_task_struct(task);
Michel Thierryea91e402015-07-29 17:23:57 +01002157 idr_for_each(&file_priv->context_idr, per_file_ctx,
2158 (void *)(unsigned long)m);
2159 }
2160
Chris Wilson637ee292016-08-22 14:28:20 +01002161out_rpm:
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002162 intel_runtime_pm_put(dev_priv);
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002163 mutex_unlock(&dev->struct_mutex);
Chris Wilson637ee292016-08-22 14:28:20 +01002164out_unlock:
2165 mutex_unlock(&dev->filelist_mutex);
Dan Carpenter06812762015-10-02 18:14:22 +03002166 return ret;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002167}
2168
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002169static int count_irq_waiters(struct drm_i915_private *i915)
2170{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002171 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302172 enum intel_engine_id id;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002173 int count = 0;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002174
Akash Goel3b3f1652016-10-13 22:44:48 +05302175 for_each_engine(engine, i915, id)
Chris Wilson688e6c72016-07-01 17:23:15 +01002176 count += intel_engine_has_waiter(engine);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002177
2178 return count;
2179}
2180
Chris Wilson7466c292016-08-15 09:49:33 +01002181static const char *rps_power_to_str(unsigned int power)
2182{
2183 static const char * const strings[] = {
2184 [LOW_POWER] = "low power",
2185 [BETWEEN] = "mixed",
2186 [HIGH_POWER] = "high power",
2187 };
2188
2189 if (power >= ARRAY_SIZE(strings) || !strings[power])
2190 return "unknown";
2191
2192 return strings[power];
2193}
2194
Chris Wilson1854d5c2015-04-07 16:20:32 +01002195static int i915_rps_boost_info(struct seq_file *m, void *data)
2196{
David Weinehall36cdd012016-08-22 13:59:31 +03002197 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2198 struct drm_device *dev = &dev_priv->drm;
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002199 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002200 struct drm_file *file;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002201
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002202 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson28176ef2016-10-28 13:58:56 +01002203 seq_printf(m, "GPU busy? %s [%d requests]\n",
2204 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002205 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002206 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002207 atomic_read(&rps->num_waiters));
Chris Wilson7466c292016-08-15 09:49:33 +01002208 seq_printf(m, "Frequency requested %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002209 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002210 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002211 intel_gpu_freq(dev_priv, rps->min_freq),
2212 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2213 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2214 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002215 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002216 intel_gpu_freq(dev_priv, rps->idle_freq),
2217 intel_gpu_freq(dev_priv, rps->efficient_freq),
2218 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002219
2220 mutex_lock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002221 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2222 struct drm_i915_file_private *file_priv = file->driver_priv;
2223 struct task_struct *task;
2224
2225 rcu_read_lock();
2226 task = pid_task(file->pid, PIDTYPE_PID);
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002227 seq_printf(m, "%s [%d]: %d boosts\n",
Chris Wilson1854d5c2015-04-07 16:20:32 +01002228 task ? task->comm : "<unknown>",
2229 task ? task->pid : -1,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002230 atomic_read(&file_priv->rps_client.boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01002231 rcu_read_unlock();
2232 }
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002233 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002234 atomic_read(&rps->boosts));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002235 mutex_unlock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002236
Chris Wilson7466c292016-08-15 09:49:33 +01002237 if (INTEL_GEN(dev_priv) >= 6 &&
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002238 rps->enabled &&
Chris Wilson28176ef2016-10-28 13:58:56 +01002239 dev_priv->gt.active_requests) {
Chris Wilson7466c292016-08-15 09:49:33 +01002240 u32 rpup, rpupei;
2241 u32 rpdown, rpdownei;
2242
2243 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2244 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2245 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2246 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2247 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2248 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2249
2250 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002251 rps_power_to_str(rps->power));
Chris Wilson7466c292016-08-15 09:49:33 +01002252 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002253 rpup && rpupei ? 100 * rpup / rpupei : 0,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002254 rps->up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002255 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002256 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002257 rps->down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002258 } else {
2259 seq_puts(m, "\nRPS Autotuning inactive\n");
2260 }
2261
Chris Wilson8d3afd72015-05-21 21:01:47 +01002262 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002263}
2264
Ben Widawsky63573eb2013-07-04 11:02:07 -07002265static int i915_llc(struct seq_file *m, void *data)
2266{
David Weinehall36cdd012016-08-22 13:59:31 +03002267 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002268 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002269
David Weinehall36cdd012016-08-22 13:59:31 +03002270 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002271 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2272 intel_uncore_edram_size(dev_priv)/1024/1024);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002273
2274 return 0;
2275}
2276
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002277static int i915_huc_load_status_info(struct seq_file *m, void *data)
2278{
2279 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002280 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002281
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002282 if (!HAS_HUC(dev_priv))
2283 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002284
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002285 p = drm_seq_file_printer(m);
2286 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002287
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302288 intel_runtime_pm_get(dev_priv);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002289 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302290 intel_runtime_pm_put(dev_priv);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002291
2292 return 0;
2293}
2294
Alex Daifdf5d352015-08-12 15:43:37 +01002295static int i915_guc_load_status_info(struct seq_file *m, void *data)
2296{
David Weinehall36cdd012016-08-22 13:59:31 +03002297 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002298 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01002299 u32 tmp, i;
2300
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002301 if (!HAS_GUC(dev_priv))
2302 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01002303
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002304 p = drm_seq_file_printer(m);
2305 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01002306
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302307 intel_runtime_pm_get(dev_priv);
2308
Alex Daifdf5d352015-08-12 15:43:37 +01002309 tmp = I915_READ(GUC_STATUS);
2310
2311 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2312 seq_printf(m, "\tBootrom status = 0x%x\n",
2313 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2314 seq_printf(m, "\tuKernel status = 0x%x\n",
2315 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2316 seq_printf(m, "\tMIA Core status = 0x%x\n",
2317 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2318 seq_puts(m, "\nScratch registers:\n");
2319 for (i = 0; i < 16; i++)
2320 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2321
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302322 intel_runtime_pm_put(dev_priv);
2323
Alex Daifdf5d352015-08-12 15:43:37 +01002324 return 0;
2325}
2326
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002327static const char *
2328stringify_guc_log_type(enum guc_log_buffer_type type)
2329{
2330 switch (type) {
2331 case GUC_ISR_LOG_BUFFER:
2332 return "ISR";
2333 case GUC_DPC_LOG_BUFFER:
2334 return "DPC";
2335 case GUC_CRASH_DUMP_LOG_BUFFER:
2336 return "CRASH";
2337 default:
2338 MISSING_CASE(type);
2339 }
2340
2341 return "";
2342}
2343
Akash Goel5aa1ee42016-10-12 21:54:36 +05302344static void i915_guc_log_info(struct seq_file *m,
2345 struct drm_i915_private *dev_priv)
2346{
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002347 struct intel_guc_log *log = &dev_priv->guc.log;
2348 enum guc_log_buffer_type type;
2349
2350 if (!intel_guc_log_relay_enabled(log)) {
2351 seq_puts(m, "GuC log relay disabled\n");
2352 return;
2353 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302354
Michał Winiarskidb557992018-03-19 10:53:43 +01002355 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05302356
Michał Winiarski6a96be22018-03-19 10:53:42 +01002357 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002358 log->relay.full_count);
2359
2360 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2361 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2362 stringify_guc_log_type(type),
2363 log->stats[type].flush,
2364 log->stats[type].sampled_overflow);
2365 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302366}
2367
Dave Gordon8b417c22015-08-12 15:43:44 +01002368static void i915_guc_client_info(struct seq_file *m,
2369 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302370 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01002371{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002372 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002373 enum intel_engine_id id;
Dave Gordon8b417c22015-08-12 15:43:44 +01002374 uint64_t tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002375
Oscar Mateob09935a2017-03-22 10:39:53 -07002376 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2377 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02002378 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2379 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01002380
Akash Goel3b3f1652016-10-13 22:44:48 +05302381 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002382 u64 submissions = client->submissions[id];
2383 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002384 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002385 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002386 }
2387 seq_printf(m, "\tTotal: %llu\n", tot);
2388}
2389
2390static int i915_guc_info(struct seq_file *m, void *data)
2391{
David Weinehall36cdd012016-08-22 13:59:31 +03002392 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002393 const struct intel_guc *guc = &dev_priv->guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01002394
Michał Winiarskidb557992018-03-19 10:53:43 +01002395 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002396 return -ENODEV;
2397
Michał Winiarskidb557992018-03-19 10:53:43 +01002398 i915_guc_log_info(m, dev_priv);
2399
2400 if (!USES_GUC_SUBMISSION(dev_priv))
2401 return 0;
2402
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002403 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002404
Michał Winiarskidb557992018-03-19 10:53:43 +01002405 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07002406 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01002407 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002408
Chris Wilson334636c2016-11-29 12:10:20 +00002409 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2410 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Chris Wilsone78c9172018-02-07 21:05:42 +00002411 if (guc->preempt_client) {
2412 seq_printf(m, "\nGuC preempt client @ %p:\n",
2413 guc->preempt_client);
2414 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2415 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002416
2417 /* Add more as required ... */
2418
2419 return 0;
2420}
2421
Oscar Mateoa8b93702017-05-10 15:04:51 +00002422static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002423{
David Weinehall36cdd012016-08-22 13:59:31 +03002424 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Oscar Mateoa8b93702017-05-10 15:04:51 +00002425 const struct intel_guc *guc = &dev_priv->guc;
2426 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302427 struct intel_guc_client *client = guc->execbuf_client;
Oscar Mateoa8b93702017-05-10 15:04:51 +00002428 unsigned int tmp;
2429 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002430
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002431 if (!USES_GUC_SUBMISSION(dev_priv))
2432 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002433
Oscar Mateoa8b93702017-05-10 15:04:51 +00002434 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2435 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002436
Oscar Mateoa8b93702017-05-10 15:04:51 +00002437 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2438 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002439
Oscar Mateoa8b93702017-05-10 15:04:51 +00002440 seq_printf(m, "GuC stage descriptor %u:\n", index);
2441 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2442 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2443 seq_printf(m, "\tPriority: %d\n", desc->priority);
2444 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2445 seq_printf(m, "\tEngines used: 0x%x\n",
2446 desc->engines_used);
2447 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2448 desc->db_trigger_phy,
2449 desc->db_trigger_cpu,
2450 desc->db_trigger_uk);
2451 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2452 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01002453 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00002454 desc->wq_addr, desc->wq_size);
2455 seq_putc(m, '\n');
2456
2457 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2458 u32 guc_engine_id = engine->guc_id;
2459 struct guc_execlist_context *lrc =
2460 &desc->lrc[guc_engine_id];
2461
2462 seq_printf(m, "\t%s LRC:\n", engine->name);
2463 seq_printf(m, "\t\tContext desc: 0x%x\n",
2464 lrc->context_desc);
2465 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2466 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2467 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2468 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2469 seq_putc(m, '\n');
2470 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01002471 }
2472
Oscar Mateoa8b93702017-05-10 15:04:51 +00002473 return 0;
2474}
2475
Alex Dai4c7e77f2015-08-12 15:43:40 +01002476static int i915_guc_log_dump(struct seq_file *m, void *data)
2477{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002478 struct drm_info_node *node = m->private;
2479 struct drm_i915_private *dev_priv = node_to_i915(node);
2480 bool dump_load_err = !!node->info_ent->data;
2481 struct drm_i915_gem_object *obj = NULL;
2482 u32 *log;
2483 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002484
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002485 if (!HAS_GUC(dev_priv))
2486 return -ENODEV;
2487
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002488 if (dump_load_err)
2489 obj = dev_priv->guc.load_err_log;
2490 else if (dev_priv->guc.log.vma)
2491 obj = dev_priv->guc.log.vma->obj;
2492
2493 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002494 return 0;
2495
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002496 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2497 if (IS_ERR(log)) {
2498 DRM_DEBUG("Failed to pin object\n");
2499 seq_puts(m, "(log data unaccessible)\n");
2500 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002501 }
2502
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002503 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2504 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2505 *(log + i), *(log + i + 1),
2506 *(log + i + 2), *(log + i + 3));
2507
Alex Dai4c7e77f2015-08-12 15:43:40 +01002508 seq_putc(m, '\n');
2509
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002510 i915_gem_object_unpin_map(obj);
2511
Alex Dai4c7e77f2015-08-12 15:43:40 +01002512 return 0;
2513}
2514
Michał Winiarski4977a282018-03-19 10:53:40 +01002515static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302516{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002517 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302518
Michał Winiarski86aa8242018-03-08 16:46:53 +01002519 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002520 return -ENODEV;
2521
Michał Winiarski4977a282018-03-19 10:53:40 +01002522 *val = intel_guc_log_level_get(&dev_priv->guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302523
2524 return 0;
2525}
2526
Michał Winiarski4977a282018-03-19 10:53:40 +01002527static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302528{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002529 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302530
Michał Winiarski86aa8242018-03-08 16:46:53 +01002531 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002532 return -ENODEV;
2533
Michał Winiarski4977a282018-03-19 10:53:40 +01002534 return intel_guc_log_level_set(&dev_priv->guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302535}
2536
Michał Winiarski4977a282018-03-19 10:53:40 +01002537DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2538 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302539 "%lld\n");
2540
Michał Winiarski4977a282018-03-19 10:53:40 +01002541static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2542{
2543 struct drm_i915_private *dev_priv = inode->i_private;
2544
2545 if (!USES_GUC(dev_priv))
2546 return -ENODEV;
2547
2548 file->private_data = &dev_priv->guc.log;
2549
2550 return intel_guc_log_relay_open(&dev_priv->guc.log);
2551}
2552
2553static ssize_t
2554i915_guc_log_relay_write(struct file *filp,
2555 const char __user *ubuf,
2556 size_t cnt,
2557 loff_t *ppos)
2558{
2559 struct intel_guc_log *log = filp->private_data;
2560
2561 intel_guc_log_relay_flush(log);
2562
2563 return cnt;
2564}
2565
2566static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2567{
2568 struct drm_i915_private *dev_priv = inode->i_private;
2569
2570 intel_guc_log_relay_close(&dev_priv->guc.log);
2571
2572 return 0;
2573}
2574
2575static const struct file_operations i915_guc_log_relay_fops = {
2576 .owner = THIS_MODULE,
2577 .open = i915_guc_log_relay_open,
2578 .write = i915_guc_log_relay_write,
2579 .release = i915_guc_log_relay_release,
2580};
2581
Chris Wilsonb86bef202017-01-16 13:06:21 +00002582static const char *psr2_live_status(u32 val)
2583{
2584 static const char * const live_status[] = {
2585 "IDLE",
2586 "CAPTURE",
2587 "CAPTURE_FS",
2588 "SLEEP",
2589 "BUFON_FW",
2590 "ML_UP",
2591 "SU_STANDBY",
2592 "FAST_SLEEP",
2593 "DEEP_SLEEP",
2594 "BUF_ON",
2595 "TG_ON"
2596 };
2597
2598 val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
2599 if (val < ARRAY_SIZE(live_status))
2600 return live_status[val];
2601
2602 return "unknown";
2603}
2604
José Roberto de Souzad0bc8622018-04-25 14:23:33 -07002605static const char *psr_sink_status(u8 val)
2606{
2607 static const char * const sink_status[] = {
2608 "inactive",
2609 "transition to active, capture and display",
2610 "active, display from RFB",
2611 "active, capture and display on sink device timings",
2612 "transition to inactive, capture and display, timing re-sync",
2613 "reserved",
2614 "reserved",
2615 "sink internal error"
2616 };
2617
2618 val &= DP_PSR_SINK_STATE_MASK;
2619 if (val < ARRAY_SIZE(sink_status))
2620 return sink_status[val];
2621
2622 return "unknown";
2623}
2624
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002625static int i915_edp_psr_status(struct seq_file *m, void *data)
2626{
David Weinehall36cdd012016-08-22 13:59:31 +03002627 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Rodrigo Vivia031d702013-10-03 16:15:06 -03002628 u32 psrperf = 0;
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002629 u32 stat[3];
2630 enum pipe pipe;
Rodrigo Vivia031d702013-10-03 16:15:06 -03002631 bool enabled = false;
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002632 bool sink_support;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002633
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002634 if (!HAS_PSR(dev_priv))
2635 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002636
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002637 sink_support = dev_priv->psr.sink_support;
2638 seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2639 if (!sink_support)
2640 return 0;
2641
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002642 intel_runtime_pm_get(dev_priv);
2643
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002644 mutex_lock(&dev_priv->psr.lock);
Daniel Vetter2807cf62014-07-11 10:30:11 -07002645 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002646 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2647 dev_priv->psr.busy_frontbuffer_bits);
2648 seq_printf(m, "Re-enable work scheduled: %s\n",
2649 yesno(work_busy(&dev_priv->psr.work.work)));
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002650
Nagaraju, Vathsala7e3eb592016-12-09 23:42:09 +05302651 if (HAS_DDI(dev_priv)) {
José Roberto de Souza95f28d22018-03-28 15:30:42 -07002652 if (dev_priv->psr.psr2_enabled)
Nagaraju, Vathsala7e3eb592016-12-09 23:42:09 +05302653 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2654 else
2655 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2656 } else {
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002657 for_each_pipe(dev_priv, pipe) {
Chris Wilson9c870d02016-10-24 13:42:15 +01002658 enum transcoder cpu_transcoder =
2659 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
2660 enum intel_display_power_domain power_domain;
2661
2662 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
2663 if (!intel_display_power_get_if_enabled(dev_priv,
2664 power_domain))
2665 continue;
2666
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002667 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2668 VLV_EDP_PSR_CURR_STATE_MASK;
2669 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2670 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2671 enabled = true;
Chris Wilson9c870d02016-10-24 13:42:15 +01002672
2673 intel_display_power_put(dev_priv, power_domain);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002674 }
2675 }
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002676
2677 seq_printf(m, "Main link in standby mode: %s\n",
2678 yesno(dev_priv->psr.link_standby));
2679
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002680 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002681
David Weinehall36cdd012016-08-22 13:59:31 +03002682 if (!HAS_DDI(dev_priv))
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002683 for_each_pipe(dev_priv, pipe) {
2684 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2685 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2686 seq_printf(m, " pipe %c", pipe_name(pipe));
2687 }
2688 seq_puts(m, "\n");
2689
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002690 /*
2691 * VLV/CHV PSR has no kind of performance counter
2692 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2693 */
David Weinehall36cdd012016-08-22 13:59:31 +03002694 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Ville Syrjälä443a3892015-11-11 20:34:15 +02002695 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
Rodrigo Vivia031d702013-10-03 16:15:06 -03002696 EDP_PSR_PERF_CNT_MASK;
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002697
2698 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2699 }
José Roberto de Souza95f28d22018-03-28 15:30:42 -07002700 if (dev_priv->psr.psr2_enabled) {
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -08002701 u32 psr2 = I915_READ(EDP_PSR2_STATUS);
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302702
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -08002703 seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
Chris Wilsonb86bef202017-01-16 13:06:21 +00002704 psr2, psr2_live_status(psr2));
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302705 }
José Roberto de Souzad0bc8622018-04-25 14:23:33 -07002706
2707 if (dev_priv->psr.enabled) {
2708 struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
2709 u8 val;
2710
2711 if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
2712 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
2713 psr_sink_status(val));
2714 }
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002715 mutex_unlock(&dev_priv->psr.lock);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002716
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002717 if (READ_ONCE(dev_priv->psr.debug)) {
2718 seq_printf(m, "Last attempted entry at: %lld\n",
2719 dev_priv->psr.last_entry_attempt);
2720 seq_printf(m, "Last exit at: %lld\n",
2721 dev_priv->psr.last_exit);
2722 }
2723
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002724 intel_runtime_pm_put(dev_priv);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002725 return 0;
2726}
2727
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002728static int
2729i915_edp_psr_debug_set(void *data, u64 val)
2730{
2731 struct drm_i915_private *dev_priv = data;
2732
2733 if (!CAN_PSR(dev_priv))
2734 return -ENODEV;
2735
2736 DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
2737
2738 intel_runtime_pm_get(dev_priv);
2739 intel_psr_irq_control(dev_priv, !!val);
2740 intel_runtime_pm_put(dev_priv);
2741
2742 return 0;
2743}
2744
2745static int
2746i915_edp_psr_debug_get(void *data, u64 *val)
2747{
2748 struct drm_i915_private *dev_priv = data;
2749
2750 if (!CAN_PSR(dev_priv))
2751 return -ENODEV;
2752
2753 *val = READ_ONCE(dev_priv->psr.debug);
2754 return 0;
2755}
2756
2757DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2758 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2759 "%llu\n");
2760
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002761static int i915_sink_crc(struct seq_file *m, void *data)
2762{
David Weinehall36cdd012016-08-22 13:59:31 +03002763 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2764 struct drm_device *dev = &dev_priv->drm;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002765 struct intel_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002766 struct drm_connector_list_iter conn_iter;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002767 struct intel_dp *intel_dp = NULL;
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002768 struct drm_modeset_acquire_ctx ctx;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002769 int ret;
2770 u8 crc[6];
2771
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002772 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2773
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002774 drm_connector_list_iter_begin(dev, &conn_iter);
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002775
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002776 for_each_intel_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26c17cf2016-06-20 15:57:38 +02002777 struct drm_crtc *crtc;
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002778 struct drm_connector_state *state;
Maarten Lankhorst93313532017-11-10 12:34:59 +01002779 struct intel_crtc_state *crtc_state;
Paulo Zanonib6ae3c72014-02-13 17:51:33 -02002780
Maarten Lankhorst26c17cf2016-06-20 15:57:38 +02002781 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002782 continue;
2783
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002784retry:
2785 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
2786 if (ret)
2787 goto err;
2788
2789 state = connector->base.state;
2790 if (!state->best_encoder)
2791 continue;
2792
2793 crtc = state->crtc;
2794 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2795 if (ret)
2796 goto err;
2797
Maarten Lankhorst93313532017-11-10 12:34:59 +01002798 crtc_state = to_intel_crtc_state(crtc->state);
2799 if (!crtc_state->base.active)
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002800 continue;
2801
Maarten Lankhorst93313532017-11-10 12:34:59 +01002802 /*
2803 * We need to wait for all crtc updates to complete, to make
2804 * sure any pending modesets and plane updates are completed.
2805 */
2806 if (crtc_state->base.commit) {
2807 ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
2808
2809 if (ret)
2810 goto err;
2811 }
2812
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002813 intel_dp = enc_to_intel_dp(state->best_encoder);
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002814
Maarten Lankhorst93313532017-11-10 12:34:59 +01002815 ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002816 if (ret)
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002817 goto err;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002818
2819 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2820 crc[0], crc[1], crc[2],
2821 crc[3], crc[4], crc[5]);
2822 goto out;
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002823
2824err:
2825 if (ret == -EDEADLK) {
2826 ret = drm_modeset_backoff(&ctx);
2827 if (!ret)
2828 goto retry;
2829 }
2830 goto out;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002831 }
2832 ret = -ENODEV;
2833out:
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002834 drm_connector_list_iter_end(&conn_iter);
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002835 drm_modeset_drop_locks(&ctx);
2836 drm_modeset_acquire_fini(&ctx);
2837
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002838 return ret;
2839}
2840
Jesse Barnesec013e72013-08-20 10:29:23 +01002841static int i915_energy_uJ(struct seq_file *m, void *data)
2842{
David Weinehall36cdd012016-08-22 13:59:31 +03002843 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002844 unsigned long long power;
Jesse Barnesec013e72013-08-20 10:29:23 +01002845 u32 units;
2846
David Weinehall36cdd012016-08-22 13:59:31 +03002847 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002848 return -ENODEV;
2849
Paulo Zanoni36623ef2014-02-21 13:52:23 -03002850 intel_runtime_pm_get(dev_priv);
2851
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002852 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2853 intel_runtime_pm_put(dev_priv);
2854 return -ENODEV;
2855 }
2856
2857 units = (power & 0x1f00) >> 8;
Jesse Barnesec013e72013-08-20 10:29:23 +01002858 power = I915_READ(MCH_SECP_NRG_STTS);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002859 power = (1000000 * power) >> units; /* convert to uJ */
Jesse Barnesec013e72013-08-20 10:29:23 +01002860
Paulo Zanoni36623ef2014-02-21 13:52:23 -03002861 intel_runtime_pm_put(dev_priv);
2862
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002863 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002864
2865 return 0;
2866}
2867
Damien Lespiau6455c872015-06-04 18:23:57 +01002868static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002869{
David Weinehall36cdd012016-08-22 13:59:31 +03002870 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002871 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002872
Chris Wilsona156e642016-04-03 14:14:21 +01002873 if (!HAS_RUNTIME_PM(dev_priv))
2874 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002875
Chris Wilson6f561032018-01-24 11:36:07 +00002876 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2877 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
Paulo Zanoni371db662013-08-19 13:18:10 -03002878 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002879 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002880#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002881 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002882 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002883#else
2884 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2885#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002886 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002887 pci_power_name(pdev->current_state),
2888 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002889
Jesse Barnesec013e72013-08-20 10:29:23 +01002890 return 0;
2891}
2892
Imre Deak1da51582013-11-25 17:15:35 +02002893static int i915_power_domain_info(struct seq_file *m, void *unused)
2894{
David Weinehall36cdd012016-08-22 13:59:31 +03002895 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002896 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2897 int i;
2898
2899 mutex_lock(&power_domains->lock);
2900
2901 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2902 for (i = 0; i < power_domains->power_well_count; i++) {
2903 struct i915_power_well *power_well;
2904 enum intel_display_power_domain power_domain;
2905
2906 power_well = &power_domains->power_wells[i];
2907 seq_printf(m, "%-25s %d\n", power_well->name,
2908 power_well->count);
2909
Joonas Lahtinen8385c2e2017-02-08 15:12:10 +02002910 for_each_power_domain(power_domain, power_well->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002911 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002912 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002913 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002914 }
2915
2916 mutex_unlock(&power_domains->lock);
2917
2918 return 0;
2919}
2920
Damien Lespiaub7cec662015-10-27 14:47:01 +02002921static int i915_dmc_info(struct seq_file *m, void *unused)
2922{
David Weinehall36cdd012016-08-22 13:59:31 +03002923 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Damien Lespiaub7cec662015-10-27 14:47:01 +02002924 struct intel_csr *csr;
2925
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002926 if (!HAS_CSR(dev_priv))
2927 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002928
2929 csr = &dev_priv->csr;
2930
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002931 intel_runtime_pm_get(dev_priv);
2932
Damien Lespiaub7cec662015-10-27 14:47:01 +02002933 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2934 seq_printf(m, "path: %s\n", csr->fw_path);
2935
2936 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002937 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002938
2939 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2940 CSR_VERSION_MINOR(csr->version));
2941
Mika Kuoppala48de5682017-05-09 13:05:22 +03002942 if (IS_KABYLAKE(dev_priv) ||
2943 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
Damien Lespiau83372062015-10-30 17:53:32 +02002944 seq_printf(m, "DC3 -> DC5 count: %d\n",
2945 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2946 seq_printf(m, "DC5 -> DC6 count: %d\n",
2947 I915_READ(SKL_CSR_DC5_DC6_COUNT));
David Weinehall36cdd012016-08-22 13:59:31 +03002948 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
Mika Kuoppala16e11b92015-10-27 14:47:03 +02002949 seq_printf(m, "DC3 -> DC5 count: %d\n",
2950 I915_READ(BXT_CSR_DC3_DC5_COUNT));
Damien Lespiau83372062015-10-30 17:53:32 +02002951 }
2952
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002953out:
2954 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2955 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2956 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2957
Damien Lespiau83372062015-10-30 17:53:32 +02002958 intel_runtime_pm_put(dev_priv);
2959
Damien Lespiaub7cec662015-10-27 14:47:01 +02002960 return 0;
2961}
2962
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002963static void intel_seq_print_mode(struct seq_file *m, int tabs,
2964 struct drm_display_mode *mode)
2965{
2966 int i;
2967
2968 for (i = 0; i < tabs; i++)
2969 seq_putc(m, '\t');
2970
2971 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2972 mode->base.id, mode->name,
2973 mode->vrefresh, mode->clock,
2974 mode->hdisplay, mode->hsync_start,
2975 mode->hsync_end, mode->htotal,
2976 mode->vdisplay, mode->vsync_start,
2977 mode->vsync_end, mode->vtotal,
2978 mode->type, mode->flags);
2979}
2980
2981static void intel_encoder_info(struct seq_file *m,
2982 struct intel_crtc *intel_crtc,
2983 struct intel_encoder *intel_encoder)
2984{
David Weinehall36cdd012016-08-22 13:59:31 +03002985 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2986 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002987 struct drm_crtc *crtc = &intel_crtc->base;
2988 struct intel_connector *intel_connector;
2989 struct drm_encoder *encoder;
2990
2991 encoder = &intel_encoder->base;
2992 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002993 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002994 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2995 struct drm_connector *connector = &intel_connector->base;
2996 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2997 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03002998 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002999 drm_get_connector_status_name(connector->status));
3000 if (connector->status == connector_status_connected) {
3001 struct drm_display_mode *mode = &crtc->mode;
3002 seq_printf(m, ", mode:\n");
3003 intel_seq_print_mode(m, 2, mode);
3004 } else {
3005 seq_putc(m, '\n');
3006 }
3007 }
3008}
3009
3010static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3011{
David Weinehall36cdd012016-08-22 13:59:31 +03003012 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3013 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003014 struct drm_crtc *crtc = &intel_crtc->base;
3015 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02003016 struct drm_plane_state *plane_state = crtc->primary->state;
3017 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003018
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02003019 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07003020 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02003021 fb->base.id, plane_state->src_x >> 16,
3022 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07003023 else
3024 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003025 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3026 intel_encoder_info(m, intel_crtc, intel_encoder);
3027}
3028
3029static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3030{
3031 struct drm_display_mode *mode = panel->fixed_mode;
3032
3033 seq_printf(m, "\tfixed mode:\n");
3034 intel_seq_print_mode(m, 2, mode);
3035}
3036
3037static void intel_dp_info(struct seq_file *m,
3038 struct intel_connector *intel_connector)
3039{
3040 struct intel_encoder *intel_encoder = intel_connector->encoder;
3041 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3042
3043 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03003044 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003045 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003046 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03003047
3048 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3049 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003050}
3051
Libin Yang9a148a92016-11-28 20:07:05 +08003052static void intel_dp_mst_info(struct seq_file *m,
3053 struct intel_connector *intel_connector)
3054{
3055 struct intel_encoder *intel_encoder = intel_connector->encoder;
3056 struct intel_dp_mst_encoder *intel_mst =
3057 enc_to_mst(&intel_encoder->base);
3058 struct intel_digital_port *intel_dig_port = intel_mst->primary;
3059 struct intel_dp *intel_dp = &intel_dig_port->dp;
3060 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3061 intel_connector->port);
3062
3063 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3064}
3065
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003066static void intel_hdmi_info(struct seq_file *m,
3067 struct intel_connector *intel_connector)
3068{
3069 struct intel_encoder *intel_encoder = intel_connector->encoder;
3070 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3071
Jani Nikula742f4912015-09-03 11:16:09 +03003072 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003073}
3074
3075static void intel_lvds_info(struct seq_file *m,
3076 struct intel_connector *intel_connector)
3077{
3078 intel_panel_info(m, &intel_connector->panel);
3079}
3080
3081static void intel_connector_info(struct seq_file *m,
3082 struct drm_connector *connector)
3083{
3084 struct intel_connector *intel_connector = to_intel_connector(connector);
3085 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08003086 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003087
3088 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03003089 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003090 drm_get_connector_status_name(connector->status));
3091 if (connector->status == connector_status_connected) {
3092 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3093 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3094 connector->display_info.width_mm,
3095 connector->display_info.height_mm);
3096 seq_printf(m, "\tsubpixel order: %s\n",
3097 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3098 seq_printf(m, "\tCEA rev: %d\n",
3099 connector->display_info.cea_rev);
3100 }
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003101
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02003102 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003103 return;
3104
3105 switch (connector->connector_type) {
3106 case DRM_MODE_CONNECTOR_DisplayPort:
3107 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08003108 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3109 intel_dp_mst_info(m, intel_connector);
3110 else
3111 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003112 break;
3113 case DRM_MODE_CONNECTOR_LVDS:
3114 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10003115 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003116 break;
3117 case DRM_MODE_CONNECTOR_HDMIA:
3118 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03003119 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003120 intel_hdmi_info(m, intel_connector);
3121 break;
3122 default:
3123 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10003124 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003125
Jesse Barnesf103fc72014-02-20 12:39:57 -08003126 seq_printf(m, "\tmodes:\n");
3127 list_for_each_entry(mode, &connector->modes, head)
3128 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003129}
3130
Robert Fekete3abc4e02015-10-27 16:58:32 +01003131static const char *plane_type(enum drm_plane_type type)
3132{
3133 switch (type) {
3134 case DRM_PLANE_TYPE_OVERLAY:
3135 return "OVL";
3136 case DRM_PLANE_TYPE_PRIMARY:
3137 return "PRI";
3138 case DRM_PLANE_TYPE_CURSOR:
3139 return "CUR";
3140 /*
3141 * Deliberately omitting default: to generate compiler warnings
3142 * when a new drm_plane_type gets added.
3143 */
3144 }
3145
3146 return "unknown";
3147}
3148
3149static const char *plane_rotation(unsigned int rotation)
3150{
3151 static char buf[48];
3152 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04003153 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01003154 * will print them all to visualize if the values are misused
3155 */
3156 snprintf(buf, sizeof(buf),
3157 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04003158 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3159 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3160 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3161 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3162 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3163 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01003164 rotation);
3165
3166 return buf;
3167}
3168
3169static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3170{
David Weinehall36cdd012016-08-22 13:59:31 +03003171 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3172 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01003173 struct intel_plane *intel_plane;
3174
3175 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3176 struct drm_plane_state *state;
3177 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003178 struct drm_format_name_buf format_name;
Robert Fekete3abc4e02015-10-27 16:58:32 +01003179
3180 if (!plane->state) {
3181 seq_puts(m, "plane->state is NULL!\n");
3182 continue;
3183 }
3184
3185 state = plane->state;
3186
Eric Engestrom90844f02016-08-15 01:02:38 +01003187 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003188 drm_get_format_name(state->fb->format->format,
3189 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01003190 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003191 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01003192 }
3193
Robert Fekete3abc4e02015-10-27 16:58:32 +01003194 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3195 plane->base.id,
3196 plane_type(intel_plane->base.type),
3197 state->crtc_x, state->crtc_y,
3198 state->crtc_w, state->crtc_h,
3199 (state->src_x >> 16),
3200 ((state->src_x & 0xffff) * 15625) >> 10,
3201 (state->src_y >> 16),
3202 ((state->src_y & 0xffff) * 15625) >> 10,
3203 (state->src_w >> 16),
3204 ((state->src_w & 0xffff) * 15625) >> 10,
3205 (state->src_h >> 16),
3206 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003207 format_name.str,
Robert Fekete3abc4e02015-10-27 16:58:32 +01003208 plane_rotation(state->rotation));
3209 }
3210}
3211
3212static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3213{
3214 struct intel_crtc_state *pipe_config;
3215 int num_scalers = intel_crtc->num_scalers;
3216 int i;
3217
3218 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3219
3220 /* Not all platformas have a scaler */
3221 if (num_scalers) {
3222 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3223 num_scalers,
3224 pipe_config->scaler_state.scaler_users,
3225 pipe_config->scaler_state.scaler_id);
3226
A.Sunil Kamath58415912016-11-20 23:20:26 +05303227 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01003228 struct intel_scaler *sc =
3229 &pipe_config->scaler_state.scalers[i];
3230
3231 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3232 i, yesno(sc->in_use), sc->mode);
3233 }
3234 seq_puts(m, "\n");
3235 } else {
3236 seq_puts(m, "\tNo scalers available on this platform\n");
3237 }
3238}
3239
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003240static int i915_display_info(struct seq_file *m, void *unused)
3241{
David Weinehall36cdd012016-08-22 13:59:31 +03003242 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3243 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec2014-03-12 09:13:13 +00003244 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003245 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003246 struct drm_connector_list_iter conn_iter;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003247
Paulo Zanonib0e5ddf2014-04-01 14:55:10 -03003248 intel_runtime_pm_get(dev_priv);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003249 seq_printf(m, "CRTC info\n");
3250 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01003251 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003252 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003253
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003254 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003255 pipe_config = to_intel_crtc_state(crtc->base.state);
3256
Robert Fekete3abc4e02015-10-27 16:58:32 +01003257 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec2014-03-12 09:13:13 +00003258 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003259 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01003260 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3261 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3262
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003263 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003264 struct intel_plane *cursor =
3265 to_intel_plane(crtc->base.cursor);
3266
Chris Wilson065f2ec2014-03-12 09:13:13 +00003267 intel_crtc_info(m, crtc);
3268
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003269 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3270 yesno(cursor->base.state->visible),
3271 cursor->base.state->crtc_x,
3272 cursor->base.state->crtc_y,
3273 cursor->base.state->crtc_w,
3274 cursor->base.state->crtc_h,
3275 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003276 intel_scaler_info(m, crtc);
3277 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003278 }
Daniel Vettercace8412014-05-22 17:56:31 +02003279
3280 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3281 yesno(!crtc->cpu_fifo_underrun_disabled),
3282 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003283 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003284 }
3285
3286 seq_printf(m, "\n");
3287 seq_printf(m, "Connector info\n");
3288 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003289 mutex_lock(&dev->mode_config.mutex);
3290 drm_connector_list_iter_begin(dev, &conn_iter);
3291 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003292 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003293 drm_connector_list_iter_end(&conn_iter);
3294 mutex_unlock(&dev->mode_config.mutex);
3295
Paulo Zanonib0e5ddf2014-04-01 14:55:10 -03003296 intel_runtime_pm_put(dev_priv);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003297
3298 return 0;
3299}
3300
Chris Wilson1b365952016-10-04 21:11:31 +01003301static int i915_engine_info(struct seq_file *m, void *unused)
3302{
3303 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3304 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05303305 enum intel_engine_id id;
Chris Wilsonf636edb2017-10-09 12:02:57 +01003306 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01003307
Chris Wilson9c870d02016-10-24 13:42:15 +01003308 intel_runtime_pm_get(dev_priv);
3309
Chris Wilson6f561032018-01-24 11:36:07 +00003310 seq_printf(m, "GT awake? %s (epoch %u)\n",
3311 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003312 seq_printf(m, "Global active requests: %d\n",
3313 dev_priv->gt.active_requests);
Lionel Landwerlinf577a032017-11-13 23:34:53 +00003314 seq_printf(m, "CS timestamp frequency: %u kHz\n",
3315 dev_priv->info.cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003316
Chris Wilsonf636edb2017-10-09 12:02:57 +01003317 p = drm_seq_file_printer(m);
3318 for_each_engine(engine, dev_priv, id)
Chris Wilson0db18b12017-12-08 01:23:00 +00003319 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01003320
Chris Wilson9c870d02016-10-24 13:42:15 +01003321 intel_runtime_pm_put(dev_priv);
3322
Chris Wilson1b365952016-10-04 21:11:31 +01003323 return 0;
3324}
3325
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003326static int i915_rcs_topology(struct seq_file *m, void *unused)
3327{
3328 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3329 struct drm_printer p = drm_seq_file_printer(m);
3330
3331 intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3332
3333 return 0;
3334}
3335
Chris Wilsonc5418a82017-10-13 21:26:19 +01003336static int i915_shrinker_info(struct seq_file *m, void *unused)
3337{
3338 struct drm_i915_private *i915 = node_to_i915(m->private);
3339
3340 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3341 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3342
3343 return 0;
3344}
3345
Daniel Vetter728e29d2014-06-25 22:01:53 +03003346static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3347{
David Weinehall36cdd012016-08-22 13:59:31 +03003348 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3349 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003350 int i;
3351
3352 drm_modeset_lock_all(dev);
3353 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3354 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3355
Lucas De Marchi72f775f2018-03-20 15:06:34 -07003356 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07003357 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003358 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003359 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003360 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003361 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003362 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003363 pll->state.hw_state.dpll_md);
3364 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3365 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3366 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003367 }
3368 drm_modeset_unlock_all(dev);
3369
3370 return 0;
3371}
3372
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003373static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003374{
David Weinehall36cdd012016-08-22 13:59:31 +03003375 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Arun Siluvery33136b02016-01-21 21:43:47 +00003376 struct i915_workarounds *workarounds = &dev_priv->workarounds;
Chris Wilsonf4ecfbf2018-04-14 13:27:54 +01003377 int i;
Arun Siluvery888b5992014-08-26 14:44:51 +01003378
3379 intel_runtime_pm_get(dev_priv);
3380
Arun Siluvery33136b02016-01-21 21:43:47 +00003381 seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
Arun Siluvery33136b02016-01-21 21:43:47 +00003382 for (i = 0; i < workarounds->count; ++i) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003383 i915_reg_t addr;
3384 u32 mask, value, read;
Mika Kuoppala2fa60f62014-10-07 17:21:27 +03003385 bool ok;
Arun Siluvery888b5992014-08-26 14:44:51 +01003386
Arun Siluvery33136b02016-01-21 21:43:47 +00003387 addr = workarounds->reg[i].addr;
3388 mask = workarounds->reg[i].mask;
3389 value = workarounds->reg[i].value;
Mika Kuoppala2fa60f62014-10-07 17:21:27 +03003390 read = I915_READ(addr);
3391 ok = (value & mask) == (read & mask);
3392 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003393 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
Arun Siluvery888b5992014-08-26 14:44:51 +01003394 }
3395
3396 intel_runtime_pm_put(dev_priv);
Arun Siluvery888b5992014-08-26 14:44:51 +01003397
3398 return 0;
3399}
3400
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303401static int i915_ipc_status_show(struct seq_file *m, void *data)
3402{
3403 struct drm_i915_private *dev_priv = m->private;
3404
3405 seq_printf(m, "Isochronous Priority Control: %s\n",
3406 yesno(dev_priv->ipc_enabled));
3407 return 0;
3408}
3409
3410static int i915_ipc_status_open(struct inode *inode, struct file *file)
3411{
3412 struct drm_i915_private *dev_priv = inode->i_private;
3413
3414 if (!HAS_IPC(dev_priv))
3415 return -ENODEV;
3416
3417 return single_open(file, i915_ipc_status_show, dev_priv);
3418}
3419
3420static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3421 size_t len, loff_t *offp)
3422{
3423 struct seq_file *m = file->private_data;
3424 struct drm_i915_private *dev_priv = m->private;
3425 int ret;
3426 bool enable;
3427
3428 ret = kstrtobool_from_user(ubuf, len, &enable);
3429 if (ret < 0)
3430 return ret;
3431
3432 intel_runtime_pm_get(dev_priv);
3433 if (!dev_priv->ipc_enabled && enable)
3434 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3435 dev_priv->wm.distrust_bios_wm = true;
3436 dev_priv->ipc_enabled = enable;
3437 intel_enable_ipc(dev_priv);
3438 intel_runtime_pm_put(dev_priv);
3439
3440 return len;
3441}
3442
3443static const struct file_operations i915_ipc_status_fops = {
3444 .owner = THIS_MODULE,
3445 .open = i915_ipc_status_open,
3446 .read = seq_read,
3447 .llseek = seq_lseek,
3448 .release = single_release,
3449 .write = i915_ipc_status_write
3450};
3451
Damien Lespiauc5511e42014-11-04 17:06:51 +00003452static int i915_ddb_info(struct seq_file *m, void *unused)
3453{
David Weinehall36cdd012016-08-22 13:59:31 +03003454 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3455 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003456 struct skl_ddb_allocation *ddb;
3457 struct skl_ddb_entry *entry;
3458 enum pipe pipe;
3459 int plane;
3460
David Weinehall36cdd012016-08-22 13:59:31 +03003461 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00003462 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003463
Damien Lespiauc5511e42014-11-04 17:06:51 +00003464 drm_modeset_lock_all(dev);
3465
3466 ddb = &dev_priv->wm.skl_hw.ddb;
3467
3468 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3469
3470 for_each_pipe(dev_priv, pipe) {
3471 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3472
Matt Roper8b364b42016-10-26 15:51:28 -07003473 for_each_universal_plane(dev_priv, pipe, plane) {
Damien Lespiauc5511e42014-11-04 17:06:51 +00003474 entry = &ddb->plane[pipe][plane];
3475 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
3476 entry->start, entry->end,
3477 skl_ddb_entry_size(entry));
3478 }
3479
Matt Roper4969d332015-09-24 15:53:10 -07003480 entry = &ddb->plane[pipe][PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003481 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3482 entry->end, skl_ddb_entry_size(entry));
3483 }
3484
3485 drm_modeset_unlock_all(dev);
3486
3487 return 0;
3488}
3489
Vandana Kannana54746e2015-03-03 20:53:10 +05303490static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003491 struct drm_device *dev,
3492 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303493{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003494 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303495 struct i915_drrs *drrs = &dev_priv->drrs;
3496 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003497 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003498 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303499
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003500 drm_connector_list_iter_begin(dev, &conn_iter);
3501 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003502 if (connector->state->crtc != &intel_crtc->base)
3503 continue;
3504
3505 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303506 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003507 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303508
3509 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3510 seq_puts(m, "\tVBT: DRRS_type: Static");
3511 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3512 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3513 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3514 seq_puts(m, "\tVBT: DRRS_type: None");
3515 else
3516 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3517
3518 seq_puts(m, "\n\n");
3519
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003520 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303521 struct intel_panel *panel;
3522
3523 mutex_lock(&drrs->mutex);
3524 /* DRRS Supported */
3525 seq_puts(m, "\tDRRS Supported: Yes\n");
3526
3527 /* disable_drrs() will make drrs->dp NULL */
3528 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303529 seq_puts(m, "Idleness DRRS: Disabled\n");
3530 if (dev_priv->psr.enabled)
3531 seq_puts(m,
3532 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303533 mutex_unlock(&drrs->mutex);
3534 return;
3535 }
3536
3537 panel = &drrs->dp->attached_connector->panel;
3538 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3539 drrs->busy_frontbuffer_bits);
3540
3541 seq_puts(m, "\n\t\t");
3542 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3543 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3544 vrefresh = panel->fixed_mode->vrefresh;
3545 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3546 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3547 vrefresh = panel->downclock_mode->vrefresh;
3548 } else {
3549 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3550 drrs->refresh_rate_type);
3551 mutex_unlock(&drrs->mutex);
3552 return;
3553 }
3554 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3555
3556 seq_puts(m, "\n\t\t");
3557 mutex_unlock(&drrs->mutex);
3558 } else {
3559 /* DRRS not supported. Print the VBT parameter*/
3560 seq_puts(m, "\tDRRS Supported : No");
3561 }
3562 seq_puts(m, "\n");
3563}
3564
3565static int i915_drrs_status(struct seq_file *m, void *unused)
3566{
David Weinehall36cdd012016-08-22 13:59:31 +03003567 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3568 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303569 struct intel_crtc *intel_crtc;
3570 int active_crtc_cnt = 0;
3571
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003572 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303573 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003574 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303575 active_crtc_cnt++;
3576 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3577
3578 drrs_status_per_crtc(m, dev, intel_crtc);
3579 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303580 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003581 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303582
3583 if (!active_crtc_cnt)
3584 seq_puts(m, "No active crtc found\n");
3585
3586 return 0;
3587}
3588
Dave Airlie11bed952014-05-12 15:22:27 +10003589static int i915_dp_mst_info(struct seq_file *m, void *unused)
3590{
David Weinehall36cdd012016-08-22 13:59:31 +03003591 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3592 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003593 struct intel_encoder *intel_encoder;
3594 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003595 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003596 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003597
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003598 drm_connector_list_iter_begin(dev, &conn_iter);
3599 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003600 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003601 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003602
3603 intel_encoder = intel_attached_encoder(connector);
3604 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3605 continue;
3606
3607 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003608 if (!intel_dig_port->dp.can_mst)
3609 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003610
Jim Bride40ae80c2016-04-14 10:18:37 -07003611 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003612 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003613 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3614 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003615 drm_connector_list_iter_end(&conn_iter);
3616
Dave Airlie11bed952014-05-12 15:22:27 +10003617 return 0;
3618}
3619
Todd Previteeb3394fa2015-04-18 00:04:19 -07003620static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003621 const char __user *ubuf,
3622 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003623{
3624 char *input_buffer;
3625 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003626 struct drm_device *dev;
3627 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003628 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003629 struct intel_dp *intel_dp;
3630 int val = 0;
3631
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303632 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003633
Todd Previteeb3394fa2015-04-18 00:04:19 -07003634 if (len == 0)
3635 return 0;
3636
Geliang Tang261aeba2017-05-06 23:40:17 +08003637 input_buffer = memdup_user_nul(ubuf, len);
3638 if (IS_ERR(input_buffer))
3639 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003640
Todd Previteeb3394fa2015-04-18 00:04:19 -07003641 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3642
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003643 drm_connector_list_iter_begin(dev, &conn_iter);
3644 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003645 struct intel_encoder *encoder;
3646
Todd Previteeb3394fa2015-04-18 00:04:19 -07003647 if (connector->connector_type !=
3648 DRM_MODE_CONNECTOR_DisplayPort)
3649 continue;
3650
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003651 encoder = to_intel_encoder(connector->encoder);
3652 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3653 continue;
3654
3655 if (encoder && connector->status == connector_status_connected) {
3656 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003657 status = kstrtoint(input_buffer, 10, &val);
3658 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003659 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003660 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3661 /* To prevent erroneous activation of the compliance
3662 * testing code, only accept an actual value of 1 here
3663 */
3664 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003665 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003666 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003667 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003668 }
3669 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003670 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003671 kfree(input_buffer);
3672 if (status < 0)
3673 return status;
3674
3675 *offp += len;
3676 return len;
3677}
3678
3679static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3680{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003681 struct drm_i915_private *dev_priv = m->private;
3682 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003683 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003684 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003685 struct intel_dp *intel_dp;
3686
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003687 drm_connector_list_iter_begin(dev, &conn_iter);
3688 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003689 struct intel_encoder *encoder;
3690
Todd Previteeb3394fa2015-04-18 00:04:19 -07003691 if (connector->connector_type !=
3692 DRM_MODE_CONNECTOR_DisplayPort)
3693 continue;
3694
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003695 encoder = to_intel_encoder(connector->encoder);
3696 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3697 continue;
3698
3699 if (encoder && connector->status == connector_status_connected) {
3700 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003701 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003702 seq_puts(m, "1");
3703 else
3704 seq_puts(m, "0");
3705 } else
3706 seq_puts(m, "0");
3707 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003708 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003709
3710 return 0;
3711}
3712
3713static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003714 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003715{
David Weinehall36cdd012016-08-22 13:59:31 +03003716 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003717 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003718}
3719
3720static const struct file_operations i915_displayport_test_active_fops = {
3721 .owner = THIS_MODULE,
3722 .open = i915_displayport_test_active_open,
3723 .read = seq_read,
3724 .llseek = seq_lseek,
3725 .release = single_release,
3726 .write = i915_displayport_test_active_write
3727};
3728
3729static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3730{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003731 struct drm_i915_private *dev_priv = m->private;
3732 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003733 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003734 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003735 struct intel_dp *intel_dp;
3736
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003737 drm_connector_list_iter_begin(dev, &conn_iter);
3738 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003739 struct intel_encoder *encoder;
3740
Todd Previteeb3394fa2015-04-18 00:04:19 -07003741 if (connector->connector_type !=
3742 DRM_MODE_CONNECTOR_DisplayPort)
3743 continue;
3744
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003745 encoder = to_intel_encoder(connector->encoder);
3746 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3747 continue;
3748
3749 if (encoder && connector->status == connector_status_connected) {
3750 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003751 if (intel_dp->compliance.test_type ==
3752 DP_TEST_LINK_EDID_READ)
3753 seq_printf(m, "%lx",
3754 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003755 else if (intel_dp->compliance.test_type ==
3756 DP_TEST_LINK_VIDEO_PATTERN) {
3757 seq_printf(m, "hdisplay: %d\n",
3758 intel_dp->compliance.test_data.hdisplay);
3759 seq_printf(m, "vdisplay: %d\n",
3760 intel_dp->compliance.test_data.vdisplay);
3761 seq_printf(m, "bpc: %u\n",
3762 intel_dp->compliance.test_data.bpc);
3763 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003764 } else
3765 seq_puts(m, "0");
3766 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003767 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003768
3769 return 0;
3770}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003771DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003772
3773static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3774{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003775 struct drm_i915_private *dev_priv = m->private;
3776 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003777 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003778 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003779 struct intel_dp *intel_dp;
3780
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003781 drm_connector_list_iter_begin(dev, &conn_iter);
3782 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003783 struct intel_encoder *encoder;
3784
Todd Previteeb3394fa2015-04-18 00:04:19 -07003785 if (connector->connector_type !=
3786 DRM_MODE_CONNECTOR_DisplayPort)
3787 continue;
3788
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003789 encoder = to_intel_encoder(connector->encoder);
3790 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3791 continue;
3792
3793 if (encoder && connector->status == connector_status_connected) {
3794 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003795 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003796 } else
3797 seq_puts(m, "0");
3798 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003799 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003800
3801 return 0;
3802}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003803DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003804
Damien Lespiau97e94b22014-11-04 17:06:50 +00003805static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003806{
David Weinehall36cdd012016-08-22 13:59:31 +03003807 struct drm_i915_private *dev_priv = m->private;
3808 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003809 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003810 int num_levels;
3811
David Weinehall36cdd012016-08-22 13:59:31 +03003812 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003813 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003814 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003815 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003816 else if (IS_G4X(dev_priv))
3817 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003818 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003819 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003820
3821 drm_modeset_lock_all(dev);
3822
3823 for (level = 0; level < num_levels; level++) {
3824 unsigned int latency = wm[level];
3825
Damien Lespiau97e94b22014-11-04 17:06:50 +00003826 /*
3827 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003828 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003829 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003830 if (INTEL_GEN(dev_priv) >= 9 ||
3831 IS_VALLEYVIEW(dev_priv) ||
3832 IS_CHERRYVIEW(dev_priv) ||
3833 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003834 latency *= 10;
3835 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003836 latency *= 5;
3837
3838 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003839 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003840 }
3841
3842 drm_modeset_unlock_all(dev);
3843}
3844
3845static int pri_wm_latency_show(struct seq_file *m, void *data)
3846{
David Weinehall36cdd012016-08-22 13:59:31 +03003847 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003848 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003849
David Weinehall36cdd012016-08-22 13:59:31 +03003850 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003851 latencies = dev_priv->wm.skl_latency;
3852 else
David Weinehall36cdd012016-08-22 13:59:31 +03003853 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003854
3855 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003856
3857 return 0;
3858}
3859
3860static int spr_wm_latency_show(struct seq_file *m, void *data)
3861{
David Weinehall36cdd012016-08-22 13:59:31 +03003862 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003863 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003864
David Weinehall36cdd012016-08-22 13:59:31 +03003865 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003866 latencies = dev_priv->wm.skl_latency;
3867 else
David Weinehall36cdd012016-08-22 13:59:31 +03003868 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003869
3870 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003871
3872 return 0;
3873}
3874
3875static int cur_wm_latency_show(struct seq_file *m, void *data)
3876{
David Weinehall36cdd012016-08-22 13:59:31 +03003877 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003878 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003879
David Weinehall36cdd012016-08-22 13:59:31 +03003880 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003881 latencies = dev_priv->wm.skl_latency;
3882 else
David Weinehall36cdd012016-08-22 13:59:31 +03003883 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003884
3885 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003886
3887 return 0;
3888}
3889
3890static int pri_wm_latency_open(struct inode *inode, struct file *file)
3891{
David Weinehall36cdd012016-08-22 13:59:31 +03003892 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003893
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003894 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003895 return -ENODEV;
3896
David Weinehall36cdd012016-08-22 13:59:31 +03003897 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003898}
3899
3900static int spr_wm_latency_open(struct inode *inode, struct file *file)
3901{
David Weinehall36cdd012016-08-22 13:59:31 +03003902 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003903
David Weinehall36cdd012016-08-22 13:59:31 +03003904 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003905 return -ENODEV;
3906
David Weinehall36cdd012016-08-22 13:59:31 +03003907 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003908}
3909
3910static int cur_wm_latency_open(struct inode *inode, struct file *file)
3911{
David Weinehall36cdd012016-08-22 13:59:31 +03003912 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003913
David Weinehall36cdd012016-08-22 13:59:31 +03003914 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003915 return -ENODEV;
3916
David Weinehall36cdd012016-08-22 13:59:31 +03003917 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003918}
3919
3920static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Damien Lespiau97e94b22014-11-04 17:06:50 +00003921 size_t len, loff_t *offp, uint16_t wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003922{
3923 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003924 struct drm_i915_private *dev_priv = m->private;
3925 struct drm_device *dev = &dev_priv->drm;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003926 uint16_t new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003927 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003928 int level;
3929 int ret;
3930 char tmp[32];
3931
David Weinehall36cdd012016-08-22 13:59:31 +03003932 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003933 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003934 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003935 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003936 else if (IS_G4X(dev_priv))
3937 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003938 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003939 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003940
Ville Syrjälä369a1342014-01-22 14:36:08 +02003941 if (len >= sizeof(tmp))
3942 return -EINVAL;
3943
3944 if (copy_from_user(tmp, ubuf, len))
3945 return -EFAULT;
3946
3947 tmp[len] = '\0';
3948
Damien Lespiau97e94b22014-11-04 17:06:50 +00003949 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3950 &new[0], &new[1], &new[2], &new[3],
3951 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003952 if (ret != num_levels)
3953 return -EINVAL;
3954
3955 drm_modeset_lock_all(dev);
3956
3957 for (level = 0; level < num_levels; level++)
3958 wm[level] = new[level];
3959
3960 drm_modeset_unlock_all(dev);
3961
3962 return len;
3963}
3964
3965
3966static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3967 size_t len, loff_t *offp)
3968{
3969 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003970 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003971 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003972
David Weinehall36cdd012016-08-22 13:59:31 +03003973 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003974 latencies = dev_priv->wm.skl_latency;
3975 else
David Weinehall36cdd012016-08-22 13:59:31 +03003976 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003977
3978 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003979}
3980
3981static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3982 size_t len, loff_t *offp)
3983{
3984 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003985 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003986 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003987
David Weinehall36cdd012016-08-22 13:59:31 +03003988 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003989 latencies = dev_priv->wm.skl_latency;
3990 else
David Weinehall36cdd012016-08-22 13:59:31 +03003991 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003992
3993 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003994}
3995
3996static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3997 size_t len, loff_t *offp)
3998{
3999 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03004000 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00004001 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02004002
David Weinehall36cdd012016-08-22 13:59:31 +03004003 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00004004 latencies = dev_priv->wm.skl_latency;
4005 else
David Weinehall36cdd012016-08-22 13:59:31 +03004006 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00004007
4008 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02004009}
4010
4011static const struct file_operations i915_pri_wm_latency_fops = {
4012 .owner = THIS_MODULE,
4013 .open = pri_wm_latency_open,
4014 .read = seq_read,
4015 .llseek = seq_lseek,
4016 .release = single_release,
4017 .write = pri_wm_latency_write
4018};
4019
4020static const struct file_operations i915_spr_wm_latency_fops = {
4021 .owner = THIS_MODULE,
4022 .open = spr_wm_latency_open,
4023 .read = seq_read,
4024 .llseek = seq_lseek,
4025 .release = single_release,
4026 .write = spr_wm_latency_write
4027};
4028
4029static const struct file_operations i915_cur_wm_latency_fops = {
4030 .owner = THIS_MODULE,
4031 .open = cur_wm_latency_open,
4032 .read = seq_read,
4033 .llseek = seq_lseek,
4034 .release = single_release,
4035 .write = cur_wm_latency_write
4036};
4037
Kees Cook647416f2013-03-10 14:10:06 -07004038static int
4039i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004040{
David Weinehall36cdd012016-08-22 13:59:31 +03004041 struct drm_i915_private *dev_priv = data;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004042
Chris Wilsond98c52c2016-04-13 17:35:05 +01004043 *val = i915_terminally_wedged(&dev_priv->gpu_error);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004044
Kees Cook647416f2013-03-10 14:10:06 -07004045 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004046}
4047
Kees Cook647416f2013-03-10 14:10:06 -07004048static int
4049i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004050{
Chris Wilson598b6b52017-03-25 13:47:35 +00004051 struct drm_i915_private *i915 = data;
4052 struct intel_engine_cs *engine;
4053 unsigned int tmp;
Imre Deakd46c0512014-04-14 20:24:27 +03004054
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02004055 /*
4056 * There is no safeguard against this debugfs entry colliding
4057 * with the hangcheck calling same i915_handle_error() in
4058 * parallel, causing an explosion. For now we assume that the
4059 * test harness is responsible enough not to inject gpu hangs
4060 * while it is writing to 'i915_wedged'
4061 */
4062
Chris Wilson598b6b52017-03-25 13:47:35 +00004063 if (i915_reset_backoff(&i915->gpu_error))
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02004064 return -EAGAIN;
4065
Chris Wilson598b6b52017-03-25 13:47:35 +00004066 for_each_engine_masked(engine, i915, val, tmp) {
4067 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4068 engine->hangcheck.stalled = true;
4069 }
Imre Deakd46c0512014-04-14 20:24:27 +03004070
Chris Wilsonce800752018-03-20 10:04:49 +00004071 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4072 "Manually set wedged engine mask = %llx", val);
Chris Wilson598b6b52017-03-25 13:47:35 +00004073
4074 wait_on_bit(&i915->gpu_error.flags,
Chris Wilsond3df42b2017-03-16 17:13:05 +00004075 I915_RESET_HANDOFF,
4076 TASK_UNINTERRUPTIBLE);
4077
Kees Cook647416f2013-03-10 14:10:06 -07004078 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004079}
4080
Kees Cook647416f2013-03-10 14:10:06 -07004081DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4082 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03004083 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004084
Kees Cook647416f2013-03-10 14:10:06 -07004085static int
Chris Wilson64486ae2017-03-07 15:59:08 +00004086fault_irq_set(struct drm_i915_private *i915,
4087 unsigned long *irq,
4088 unsigned long val)
4089{
4090 int err;
4091
4092 err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4093 if (err)
4094 return err;
4095
4096 err = i915_gem_wait_for_idle(i915,
4097 I915_WAIT_LOCKED |
4098 I915_WAIT_INTERRUPTIBLE);
4099 if (err)
4100 goto err_unlock;
4101
Chris Wilson64486ae2017-03-07 15:59:08 +00004102 *irq = val;
4103 mutex_unlock(&i915->drm.struct_mutex);
4104
4105 /* Flush idle worker to disarm irq */
Chris Wilson7c262402017-10-06 11:40:38 +01004106 drain_delayed_work(&i915->gt.idle_work);
Chris Wilson64486ae2017-03-07 15:59:08 +00004107
4108 return 0;
4109
4110err_unlock:
4111 mutex_unlock(&i915->drm.struct_mutex);
4112 return err;
4113}
4114
4115static int
Chris Wilson094f9a52013-09-25 17:34:55 +01004116i915_ring_missed_irq_get(void *data, u64 *val)
4117{
David Weinehall36cdd012016-08-22 13:59:31 +03004118 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004119
4120 *val = dev_priv->gpu_error.missed_irq_rings;
4121 return 0;
4122}
4123
4124static int
4125i915_ring_missed_irq_set(void *data, u64 val)
4126{
Chris Wilson64486ae2017-03-07 15:59:08 +00004127 struct drm_i915_private *i915 = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004128
Chris Wilson64486ae2017-03-07 15:59:08 +00004129 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004130}
4131
4132DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4133 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4134 "0x%08llx\n");
4135
4136static int
4137i915_ring_test_irq_get(void *data, u64 *val)
4138{
David Weinehall36cdd012016-08-22 13:59:31 +03004139 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004140
4141 *val = dev_priv->gpu_error.test_irq_rings;
4142
4143 return 0;
4144}
4145
4146static int
4147i915_ring_test_irq_set(void *data, u64 val)
4148{
Chris Wilson64486ae2017-03-07 15:59:08 +00004149 struct drm_i915_private *i915 = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004150
Chris Wilson64486ae2017-03-07 15:59:08 +00004151 val &= INTEL_INFO(i915)->ring_mask;
Chris Wilson094f9a52013-09-25 17:34:55 +01004152 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004153
Chris Wilson64486ae2017-03-07 15:59:08 +00004154 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004155}
4156
4157DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4158 i915_ring_test_irq_get, i915_ring_test_irq_set,
4159 "0x%08llx\n");
4160
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004161#define DROP_UNBOUND BIT(0)
4162#define DROP_BOUND BIT(1)
4163#define DROP_RETIRE BIT(2)
4164#define DROP_ACTIVE BIT(3)
4165#define DROP_FREED BIT(4)
4166#define DROP_SHRINK_ALL BIT(5)
4167#define DROP_IDLE BIT(6)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004168#define DROP_ALL (DROP_UNBOUND | \
4169 DROP_BOUND | \
4170 DROP_RETIRE | \
4171 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00004172 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004173 DROP_SHRINK_ALL |\
4174 DROP_IDLE)
Kees Cook647416f2013-03-10 14:10:06 -07004175static int
4176i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004177{
Kees Cook647416f2013-03-10 14:10:06 -07004178 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00004179
Kees Cook647416f2013-03-10 14:10:06 -07004180 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004181}
4182
Kees Cook647416f2013-03-10 14:10:06 -07004183static int
4184i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004185{
David Weinehall36cdd012016-08-22 13:59:31 +03004186 struct drm_i915_private *dev_priv = data;
4187 struct drm_device *dev = &dev_priv->drm;
Chris Wilson00c26cf2017-05-24 17:26:53 +01004188 int ret = 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004189
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004190 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4191 val, val & DROP_ALL);
Chris Wilsondd624af2013-01-15 12:39:35 +00004192
4193 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4194 * on ioctls on -EAGAIN. */
Chris Wilson00c26cf2017-05-24 17:26:53 +01004195 if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4196 ret = mutex_lock_interruptible(&dev->struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00004197 if (ret)
Chris Wilson00c26cf2017-05-24 17:26:53 +01004198 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00004199
Chris Wilson00c26cf2017-05-24 17:26:53 +01004200 if (val & DROP_ACTIVE)
4201 ret = i915_gem_wait_for_idle(dev_priv,
4202 I915_WAIT_INTERRUPTIBLE |
4203 I915_WAIT_LOCKED);
4204
4205 if (val & DROP_RETIRE)
Chris Wilsone61e0f52018-02-21 09:56:36 +00004206 i915_retire_requests(dev_priv);
Chris Wilson00c26cf2017-05-24 17:26:53 +01004207
4208 mutex_unlock(&dev->struct_mutex);
4209 }
Chris Wilsondd624af2013-01-15 12:39:35 +00004210
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004211 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01004212 if (val & DROP_BOUND)
Chris Wilson912d5722017-09-06 16:19:30 -07004213 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01004214
Chris Wilson21ab4e72014-09-09 11:16:08 +01004215 if (val & DROP_UNBOUND)
Chris Wilson912d5722017-09-06 16:19:30 -07004216 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00004217
Chris Wilson8eadc192017-03-08 14:46:22 +00004218 if (val & DROP_SHRINK_ALL)
4219 i915_gem_shrink_all(dev_priv);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004220 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00004221
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004222 if (val & DROP_IDLE)
4223 drain_delayed_work(&dev_priv->gt.idle_work);
4224
Chris Wilsonc9c70472018-02-19 22:06:31 +00004225 if (val & DROP_FREED)
Chris Wilsonbdeb9782016-12-23 14:57:56 +00004226 i915_gem_drain_freed_objects(dev_priv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004227
Kees Cook647416f2013-03-10 14:10:06 -07004228 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00004229}
4230
Kees Cook647416f2013-03-10 14:10:06 -07004231DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4232 i915_drop_caches_get, i915_drop_caches_set,
4233 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00004234
Kees Cook647416f2013-03-10 14:10:06 -07004235static int
Kees Cook647416f2013-03-10 14:10:06 -07004236i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004237{
David Weinehall36cdd012016-08-22 13:59:31 +03004238 struct drm_i915_private *dev_priv = data;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004239 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004240
David Weinehall36cdd012016-08-22 13:59:31 +03004241 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004242 return -ENODEV;
4243
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004244 intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02004245
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004246 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004247
4248 intel_runtime_pm_put(dev_priv);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004249
Kees Cook647416f2013-03-10 14:10:06 -07004250 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004251
Kees Cook647416f2013-03-10 14:10:06 -07004252 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004253}
4254
Kees Cook647416f2013-03-10 14:10:06 -07004255static int
4256i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004257{
David Weinehall36cdd012016-08-22 13:59:31 +03004258 struct drm_i915_private *dev_priv = data;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004259 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004260
David Weinehall36cdd012016-08-22 13:59:31 +03004261 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004262 return -ENODEV;
4263
Kees Cook647416f2013-03-10 14:10:06 -07004264 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004265 return -EINVAL;
4266
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004267 intel_runtime_pm_get(dev_priv);
Kees Cook647416f2013-03-10 14:10:06 -07004268 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004269
4270 /* Update the cache sharing policy here as well */
4271 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4272 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4273 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4274 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4275
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004276 intel_runtime_pm_put(dev_priv);
Kees Cook647416f2013-03-10 14:10:06 -07004277 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004278}
4279
Kees Cook647416f2013-03-10 14:10:06 -07004280DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4281 i915_cache_sharing_get, i915_cache_sharing_set,
4282 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004283
David Weinehall36cdd012016-08-22 13:59:31 +03004284static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004285 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004286{
Chris Wilson7aa0b142018-03-13 00:40:54 +00004287#define SS_MAX 2
4288 const int ss_max = SS_MAX;
4289 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07004290 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004291
4292 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4293 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4294 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4295 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4296
4297 for (ss = 0; ss < ss_max; ss++) {
4298 unsigned int eu_cnt;
4299
4300 if (sig1[ss] & CHV_SS_PG_ENABLE)
4301 /* skip disabled subslice */
4302 continue;
4303
Imre Deakf08a0c92016-08-31 19:13:04 +03004304 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004305 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07004306 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4307 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4308 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4309 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03004310 sseu->eu_total += eu_cnt;
4311 sseu->eu_per_subslice = max_t(unsigned int,
4312 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004313 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00004314#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004315}
4316
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004317static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4318 struct sseu_dev_info *sseu)
4319{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004320#define SS_MAX 6
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004321 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004322 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004323 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004324
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004325 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004326 /*
4327 * FIXME: Valid SS Mask respects the spec and read
4328 * only valid bits for those registers, excluding reserverd
4329 * although this seems wrong because it would leave many
4330 * subslices without ACK.
4331 */
4332 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4333 GEN10_PGCTL_VALID_SS_MASK(s);
4334 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4335 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4336 }
4337
4338 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4339 GEN9_PGCTL_SSA_EU19_ACK |
4340 GEN9_PGCTL_SSA_EU210_ACK |
4341 GEN9_PGCTL_SSA_EU311_ACK;
4342 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4343 GEN9_PGCTL_SSB_EU19_ACK |
4344 GEN9_PGCTL_SSB_EU210_ACK |
4345 GEN9_PGCTL_SSB_EU311_ACK;
4346
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004347 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004348 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4349 /* skip disabled slice */
4350 continue;
4351
4352 sseu->slice_mask |= BIT(s);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004353 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004354
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004355 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004356 unsigned int eu_cnt;
4357
4358 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4359 /* skip disabled subslice */
4360 continue;
4361
4362 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4363 eu_mask[ss % 2]);
4364 sseu->eu_total += eu_cnt;
4365 sseu->eu_per_subslice = max_t(unsigned int,
4366 sseu->eu_per_subslice,
4367 eu_cnt);
4368 }
4369 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004370#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004371}
4372
David Weinehall36cdd012016-08-22 13:59:31 +03004373static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004374 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004375{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004376#define SS_MAX 3
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004377 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004378 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07004379 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004380
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004381 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004382 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4383 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4384 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4385 }
4386
Jeff McGee5d395252015-04-03 18:13:17 -07004387 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4388 GEN9_PGCTL_SSA_EU19_ACK |
4389 GEN9_PGCTL_SSA_EU210_ACK |
4390 GEN9_PGCTL_SSA_EU311_ACK;
4391 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4392 GEN9_PGCTL_SSB_EU19_ACK |
4393 GEN9_PGCTL_SSB_EU210_ACK |
4394 GEN9_PGCTL_SSB_EU311_ACK;
4395
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004396 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004397 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4398 /* skip disabled slice */
4399 continue;
4400
Imre Deakf08a0c92016-08-31 19:13:04 +03004401 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004402
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004403 if (IS_GEN9_BC(dev_priv))
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004404 sseu->subslice_mask[s] =
4405 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07004406
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004407 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004408 unsigned int eu_cnt;
4409
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004410 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004411 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4412 /* skip disabled subslice */
4413 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004414
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004415 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03004416 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004417
Jeff McGee5d395252015-04-03 18:13:17 -07004418 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4419 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004420 sseu->eu_total += eu_cnt;
4421 sseu->eu_per_subslice = max_t(unsigned int,
4422 sseu->eu_per_subslice,
4423 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004424 }
4425 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004426#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004427}
4428
David Weinehall36cdd012016-08-22 13:59:31 +03004429static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004430 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004431{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004432 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004433 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004434
Imre Deakf08a0c92016-08-31 19:13:04 +03004435 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004436
Imre Deakf08a0c92016-08-31 19:13:04 +03004437 if (sseu->slice_mask) {
Imre Deak43b67992016-08-31 19:13:02 +03004438 sseu->eu_per_subslice =
4439 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004440 for (s = 0; s < fls(sseu->slice_mask); s++) {
4441 sseu->subslice_mask[s] =
4442 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4443 }
Imre Deak57ec1712016-08-31 19:13:05 +03004444 sseu->eu_total = sseu->eu_per_subslice *
4445 sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004446
4447 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004448 for (s = 0; s < fls(sseu->slice_mask); s++) {
Imre Deak43b67992016-08-31 19:13:02 +03004449 u8 subslice_7eu =
4450 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004451
Imre Deak915490d2016-08-31 19:13:01 +03004452 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004453 }
4454 }
4455}
4456
Imre Deak615d8902016-08-31 19:13:03 +03004457static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4458 const struct sseu_dev_info *sseu)
4459{
4460 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4461 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004462 int s;
Imre Deak615d8902016-08-31 19:13:03 +03004463
Imre Deakc67ba532016-08-31 19:13:06 +03004464 seq_printf(m, " %s Slice Mask: %04x\n", type,
4465 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004466 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004467 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004468 seq_printf(m, " %s Subslice Total: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004469 sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004470 for (s = 0; s < fls(sseu->slice_mask); s++) {
4471 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4472 s, hweight8(sseu->subslice_mask[s]));
4473 }
Imre Deak615d8902016-08-31 19:13:03 +03004474 seq_printf(m, " %s EU Total: %u\n", type,
4475 sseu->eu_total);
4476 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4477 sseu->eu_per_subslice);
4478
4479 if (!is_available_info)
4480 return;
4481
4482 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4483 if (HAS_POOLED_EU(dev_priv))
4484 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4485
4486 seq_printf(m, " Has Slice Power Gating: %s\n",
4487 yesno(sseu->has_slice_pg));
4488 seq_printf(m, " Has Subslice Power Gating: %s\n",
4489 yesno(sseu->has_subslice_pg));
4490 seq_printf(m, " Has EU Power Gating: %s\n",
4491 yesno(sseu->has_eu_pg));
4492}
4493
Jeff McGee38732182015-02-13 10:27:54 -06004494static int i915_sseu_status(struct seq_file *m, void *unused)
4495{
David Weinehall36cdd012016-08-22 13:59:31 +03004496 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004497 struct sseu_dev_info sseu;
Jeff McGee38732182015-02-13 10:27:54 -06004498
David Weinehall36cdd012016-08-22 13:59:31 +03004499 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004500 return -ENODEV;
4501
4502 seq_puts(m, "SSEU Device Info\n");
Imre Deak615d8902016-08-31 19:13:03 +03004503 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004504
Jeff McGee7f992ab2015-02-13 10:27:55 -06004505 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004506 memset(&sseu, 0, sizeof(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004507 sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4508 sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4509 sseu.max_eus_per_subslice =
4510 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03004511
4512 intel_runtime_pm_get(dev_priv);
4513
David Weinehall36cdd012016-08-22 13:59:31 +03004514 if (IS_CHERRYVIEW(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004515 cherryview_sseu_device_status(dev_priv, &sseu);
David Weinehall36cdd012016-08-22 13:59:31 +03004516 } else if (IS_BROADWELL(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004517 broadwell_sseu_device_status(dev_priv, &sseu);
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004518 } else if (IS_GEN9(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004519 gen9_sseu_device_status(dev_priv, &sseu);
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004520 } else if (INTEL_GEN(dev_priv) >= 10) {
4521 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004522 }
David Weinehall238010e2016-08-01 17:33:27 +03004523
4524 intel_runtime_pm_put(dev_priv);
4525
Imre Deak615d8902016-08-31 19:13:03 +03004526 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004527
Jeff McGee38732182015-02-13 10:27:54 -06004528 return 0;
4529}
4530
Ben Widawsky6d794d42011-04-25 11:25:56 -07004531static int i915_forcewake_open(struct inode *inode, struct file *file)
4532{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004533 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004534
Chris Wilsond7a133d2017-09-07 14:44:41 +01004535 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004536 return 0;
4537
Chris Wilsond7a133d2017-09-07 14:44:41 +01004538 intel_runtime_pm_get(i915);
4539 intel_uncore_forcewake_user_get(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004540
4541 return 0;
4542}
4543
Ben Widawskyc43b5632012-04-16 14:07:40 -07004544static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004545{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004546 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004547
Chris Wilsond7a133d2017-09-07 14:44:41 +01004548 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004549 return 0;
4550
Chris Wilsond7a133d2017-09-07 14:44:41 +01004551 intel_uncore_forcewake_user_put(i915);
4552 intel_runtime_pm_put(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004553
4554 return 0;
4555}
4556
4557static const struct file_operations i915_forcewake_fops = {
4558 .owner = THIS_MODULE,
4559 .open = i915_forcewake_open,
4560 .release = i915_forcewake_release,
4561};
4562
Lyude317eaa92017-02-03 21:18:25 -05004563static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4564{
4565 struct drm_i915_private *dev_priv = m->private;
4566 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4567
4568 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4569 seq_printf(m, "Detected: %s\n",
4570 yesno(delayed_work_pending(&hotplug->reenable_work)));
4571
4572 return 0;
4573}
4574
4575static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4576 const char __user *ubuf, size_t len,
4577 loff_t *offp)
4578{
4579 struct seq_file *m = file->private_data;
4580 struct drm_i915_private *dev_priv = m->private;
4581 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4582 unsigned int new_threshold;
4583 int i;
4584 char *newline;
4585 char tmp[16];
4586
4587 if (len >= sizeof(tmp))
4588 return -EINVAL;
4589
4590 if (copy_from_user(tmp, ubuf, len))
4591 return -EFAULT;
4592
4593 tmp[len] = '\0';
4594
4595 /* Strip newline, if any */
4596 newline = strchr(tmp, '\n');
4597 if (newline)
4598 *newline = '\0';
4599
4600 if (strcmp(tmp, "reset") == 0)
4601 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4602 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4603 return -EINVAL;
4604
4605 if (new_threshold > 0)
4606 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4607 new_threshold);
4608 else
4609 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4610
4611 spin_lock_irq(&dev_priv->irq_lock);
4612 hotplug->hpd_storm_threshold = new_threshold;
4613 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4614 for_each_hpd_pin(i)
4615 hotplug->stats[i].count = 0;
4616 spin_unlock_irq(&dev_priv->irq_lock);
4617
4618 /* Re-enable hpd immediately if we were in an irq storm */
4619 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4620
4621 return len;
4622}
4623
4624static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4625{
4626 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4627}
4628
4629static const struct file_operations i915_hpd_storm_ctl_fops = {
4630 .owner = THIS_MODULE,
4631 .open = i915_hpd_storm_ctl_open,
4632 .read = seq_read,
4633 .llseek = seq_lseek,
4634 .release = single_release,
4635 .write = i915_hpd_storm_ctl_write
4636};
4637
C, Ramalingam35954e82017-11-08 00:08:23 +05304638static int i915_drrs_ctl_set(void *data, u64 val)
4639{
4640 struct drm_i915_private *dev_priv = data;
4641 struct drm_device *dev = &dev_priv->drm;
4642 struct intel_crtc *intel_crtc;
4643 struct intel_encoder *encoder;
4644 struct intel_dp *intel_dp;
4645
4646 if (INTEL_GEN(dev_priv) < 7)
4647 return -ENODEV;
4648
4649 drm_modeset_lock_all(dev);
4650 for_each_intel_crtc(dev, intel_crtc) {
4651 if (!intel_crtc->base.state->active ||
4652 !intel_crtc->config->has_drrs)
4653 continue;
4654
4655 for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4656 if (encoder->type != INTEL_OUTPUT_EDP)
4657 continue;
4658
4659 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4660 val ? "en" : "dis", val);
4661
4662 intel_dp = enc_to_intel_dp(&encoder->base);
4663 if (val)
4664 intel_edp_drrs_enable(intel_dp,
4665 intel_crtc->config);
4666 else
4667 intel_edp_drrs_disable(intel_dp,
4668 intel_crtc->config);
4669 }
4670 }
4671 drm_modeset_unlock_all(dev);
4672
4673 return 0;
4674}
4675
4676DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4677
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004678static ssize_t
4679i915_fifo_underrun_reset_write(struct file *filp,
4680 const char __user *ubuf,
4681 size_t cnt, loff_t *ppos)
4682{
4683 struct drm_i915_private *dev_priv = filp->private_data;
4684 struct intel_crtc *intel_crtc;
4685 struct drm_device *dev = &dev_priv->drm;
4686 int ret;
4687 bool reset;
4688
4689 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4690 if (ret)
4691 return ret;
4692
4693 if (!reset)
4694 return cnt;
4695
4696 for_each_intel_crtc(dev, intel_crtc) {
4697 struct drm_crtc_commit *commit;
4698 struct intel_crtc_state *crtc_state;
4699
4700 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4701 if (ret)
4702 return ret;
4703
4704 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4705 commit = crtc_state->base.commit;
4706 if (commit) {
4707 ret = wait_for_completion_interruptible(&commit->hw_done);
4708 if (!ret)
4709 ret = wait_for_completion_interruptible(&commit->flip_done);
4710 }
4711
4712 if (!ret && crtc_state->base.active) {
4713 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4714 pipe_name(intel_crtc->pipe));
4715
4716 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4717 }
4718
4719 drm_modeset_unlock(&intel_crtc->base.mutex);
4720
4721 if (ret)
4722 return ret;
4723 }
4724
4725 ret = intel_fbc_reset_underrun(dev_priv);
4726 if (ret)
4727 return ret;
4728
4729 return cnt;
4730}
4731
4732static const struct file_operations i915_fifo_underrun_reset_ops = {
4733 .owner = THIS_MODULE,
4734 .open = simple_open,
4735 .write = i915_fifo_underrun_reset_write,
4736 .llseek = default_llseek,
4737};
4738
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004739static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004740 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004741 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00004742 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004743 {"i915_gem_stolen", i915_gem_stolen_list_info },
Chris Wilsona6172a82009-02-11 14:26:38 +00004744 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004745 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004746 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004747 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004748 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004749 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004750 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004751 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004752 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304753 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004754 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Michel Thierry061d06a2017-06-20 10:57:49 +01004755 {"i915_reset_info", i915_reset_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004756 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004757 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004758 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004759 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004760 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004761 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004762 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004763 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004764 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004765 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004766 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004767 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004768 {"i915_swizzle_info", i915_swizzle_info, 0},
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01004769 {"i915_ppgtt_info", i915_ppgtt_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004770 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004771 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004772 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004773 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004774 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004775 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004776 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004777 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004778 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004779 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004780 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004781 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004782 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004783 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004784 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004785 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304786 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004787 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004788};
Ben Gamari27c202a2009-07-01 22:26:52 -04004789#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004790
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004791static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004792 const char *name;
4793 const struct file_operations *fops;
4794} i915_debugfs_files[] = {
4795 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004796 {"i915_cache_sharing", &i915_cache_sharing_fops},
Chris Wilson094f9a52013-09-25 17:34:55 +01004797 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4798 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004799 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004800#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004801 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004802 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004803#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004804 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004805 {"i915_next_seqno", &i915_next_seqno_fops},
Damien Lespiaubd9db022013-10-15 18:55:36 +01004806 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004807 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4808 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4809 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004810 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004811 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4812 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304813 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004814 {"i915_guc_log_level", &i915_guc_log_level_fops},
4815 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304816 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304817 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004818 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4819 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004820};
4821
Chris Wilson1dac8912016-06-24 14:00:17 +01004822int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004823{
Chris Wilson91c8a322016-07-05 10:40:23 +01004824 struct drm_minor *minor = dev_priv->drm.primary;
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004825 struct dentry *ent;
Daniel Vetter34b96742013-07-04 20:49:44 +02004826 int ret, i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004827
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004828 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4829 minor->debugfs_root, to_i915(minor->dev),
4830 &i915_forcewake_fops);
4831 if (!ent)
4832 return -ENOMEM;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004833
Tomeu Vizoso731035f2016-12-12 13:29:48 +01004834 ret = intel_pipe_crc_create(minor);
4835 if (ret)
4836 return ret;
Damien Lespiau07144422013-10-15 18:55:40 +01004837
Daniel Vetter34b96742013-07-04 20:49:44 +02004838 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004839 ent = debugfs_create_file(i915_debugfs_files[i].name,
4840 S_IRUGO | S_IWUSR,
4841 minor->debugfs_root,
4842 to_i915(minor->dev),
Daniel Vetter34b96742013-07-04 20:49:44 +02004843 i915_debugfs_files[i].fops);
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004844 if (!ent)
4845 return -ENOMEM;
Daniel Vetter34b96742013-07-04 20:49:44 +02004846 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004847
Ben Gamari27c202a2009-07-01 22:26:52 -04004848 return drm_debugfs_create_files(i915_debugfs_list,
4849 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004850 minor->debugfs_root, minor);
4851}
4852
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004853struct dpcd_block {
4854 /* DPCD dump start address. */
4855 unsigned int offset;
4856 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4857 unsigned int end;
4858 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4859 size_t size;
4860 /* Only valid for eDP. */
4861 bool edp;
4862};
4863
4864static const struct dpcd_block i915_dpcd_debug[] = {
4865 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4866 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4867 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4868 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4869 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4870 { .offset = DP_SET_POWER },
4871 { .offset = DP_EDP_DPCD_REV },
4872 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4873 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4874 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4875};
4876
4877static int i915_dpcd_show(struct seq_file *m, void *data)
4878{
4879 struct drm_connector *connector = m->private;
4880 struct intel_dp *intel_dp =
4881 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4882 uint8_t buf[16];
4883 ssize_t err;
4884 int i;
4885
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004886 if (connector->status != connector_status_connected)
4887 return -ENODEV;
4888
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004889 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4890 const struct dpcd_block *b = &i915_dpcd_debug[i];
4891 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4892
4893 if (b->edp &&
4894 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4895 continue;
4896
4897 /* low tech for now */
4898 if (WARN_ON(size > sizeof(buf)))
4899 continue;
4900
4901 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4902 if (err <= 0) {
4903 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4904 size, b->offset, err);
4905 continue;
4906 }
4907
4908 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004909 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004910
4911 return 0;
4912}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004913DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004914
David Weinehallecbd6782016-08-23 12:23:56 +03004915static int i915_panel_show(struct seq_file *m, void *data)
4916{
4917 struct drm_connector *connector = m->private;
4918 struct intel_dp *intel_dp =
4919 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4920
4921 if (connector->status != connector_status_connected)
4922 return -ENODEV;
4923
4924 seq_printf(m, "Panel power up delay: %d\n",
4925 intel_dp->panel_power_up_delay);
4926 seq_printf(m, "Panel power down delay: %d\n",
4927 intel_dp->panel_power_down_delay);
4928 seq_printf(m, "Backlight on delay: %d\n",
4929 intel_dp->backlight_on_delay);
4930 seq_printf(m, "Backlight off delay: %d\n",
4931 intel_dp->backlight_off_delay);
4932
4933 return 0;
4934}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004935DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004936
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004937/**
4938 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4939 * @connector: pointer to a registered drm_connector
4940 *
4941 * Cleanup will be done by drm_connector_unregister() through a call to
4942 * drm_debugfs_connector_remove().
4943 *
4944 * Returns 0 on success, negative error codes on error.
4945 */
4946int i915_debugfs_connector_add(struct drm_connector *connector)
4947{
4948 struct dentry *root = connector->debugfs_entry;
4949
4950 /* The connector must have been registered beforehands. */
4951 if (!root)
4952 return -ENODEV;
4953
4954 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4955 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03004956 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4957 connector, &i915_dpcd_fops);
4958
4959 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4960 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4961 connector, &i915_panel_fops);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004962
4963 return 0;
4964}