blob: 85911bc0b7036062242efb7045dbb45b0e1c8200 [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Chris Wilsonf3cd4742009-10-13 22:20:20 +010029#include <linux/debugfs.h>
Chris Wilsone637d2c2017-03-16 13:19:57 +000030#include <linux/sort.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010031#include <linux/sched/mm.h>
Simon Farnsworth4e5359c2010-09-01 17:47:52 +010032#include "intel_drv.h"
Sagar Arun Kamblea2695742017-11-16 19:02:41 +053033#include "intel_guc_submission.h"
Ben Gamari20172632009-02-17 20:08:50 -050034
David Weinehall36cdd012016-08-22 13:59:31 +030035static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36{
37 return to_i915(node->minor->dev);
38}
39
Chris Wilson70d39fe2010-08-25 16:03:34 +010040static int i915_capabilities(struct seq_file *m, void *data)
41{
David Weinehall36cdd012016-08-22 13:59:31 +030042 struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000044 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010045
David Weinehall36cdd012016-08-22 13:59:31 +030046 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020047 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030048 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000049
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000050 intel_device_info_dump_flags(info, &p);
Michal Wajdeczko5fbbe8d2017-12-21 21:57:34 +000051 intel_device_info_dump_runtime(info, &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000052 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010053
Chris Wilson418e3cd2017-02-06 21:36:08 +000054 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000055 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000056 kernel_param_unlock(THIS_MODULE);
57
Chris Wilson70d39fe2010-08-25 16:03:34 +010058 return 0;
59}
Ben Gamari433e12f2009-02-17 20:08:51 -050060
Imre Deaka7363de2016-05-12 16:18:52 +030061static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000062{
Chris Wilson573adb32016-08-04 16:32:39 +010063 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000064}
65
Imre Deaka7363de2016-05-12 16:18:52 +030066static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010067{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010068 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010069}
70
Imre Deaka7363de2016-05-12 16:18:52 +030071static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000072{
Chris Wilson3e510a82016-08-05 10:14:23 +010073 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040074 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010075 case I915_TILING_NONE: return ' ';
76 case I915_TILING_X: return 'X';
77 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040078 }
Chris Wilsona6172a82009-02-11 14:26:38 +000079}
80
Imre Deaka7363de2016-05-12 16:18:52 +030081static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070082{
Chris Wilsona65adaf2017-10-09 09:43:57 +010083 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010084}
85
Imre Deaka7363de2016-05-12 16:18:52 +030086static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010087{
Chris Wilsona4f5ea62016-10-28 13:58:35 +010088 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -070089}
90
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010091static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92{
93 u64 size = 0;
94 struct i915_vma *vma;
95
Chris Wilsone2189dd2017-12-07 21:14:07 +000096 for_each_ggtt_vma(vma, obj) {
97 if (drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010098 size += vma->node.size;
99 }
100
101 return size;
102}
103
Matthew Auld7393b7e2017-10-06 23:18:28 +0100104static const char *
105stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106{
107 size_t x = 0;
108
109 switch (page_sizes) {
110 case 0:
111 return "";
112 case I915_GTT_PAGE_SIZE_4K:
113 return "4K";
114 case I915_GTT_PAGE_SIZE_64K:
115 return "64K";
116 case I915_GTT_PAGE_SIZE_2M:
117 return "2M";
118 default:
119 if (!buf)
120 return "M";
121
122 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 x += snprintf(buf + x, len - x, "2M, ");
124 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 x += snprintf(buf + x, len - x, "64K, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 x += snprintf(buf + x, len - x, "4K, ");
128 buf[x-2] = '\0';
129
130 return buf;
131 }
132}
133
Chris Wilson37811fc2010-08-25 22:45:57 +0100134static void
135describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136{
Chris Wilsonb4716182015-04-27 13:41:17 +0100137 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000138 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700139 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100140 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800141 int pin_count = 0;
142
Chris Wilson188c1ab2016-04-03 14:14:20 +0100143 lockdep_assert_held(&obj->base.dev->struct_mutex);
144
Chris Wilsond07f0e52016-10-28 13:58:44 +0100145 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100146 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100147 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100148 get_pin_flag(obj),
149 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700150 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100151 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800152 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100153 obj->read_domains,
154 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300155 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100156 obj->mm.dirty ? " dirty" : "",
157 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100158 if (obj->base.name)
159 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000160 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100161 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800162 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300163 }
164 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100165 if (obj->pin_global)
166 seq_printf(m, " (global)");
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000167 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100168 if (!drm_mm_node_allocated(&vma->node))
169 continue;
170
Matthew Auld7393b7e2017-10-06 23:18:28 +0100171 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100172 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100173 vma->node.start, vma->node.size,
174 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000175 if (i915_vma_is_ggtt(vma)) {
176 switch (vma->ggtt_view.type) {
177 case I915_GGTT_VIEW_NORMAL:
178 seq_puts(m, ", normal");
179 break;
180
181 case I915_GGTT_VIEW_PARTIAL:
182 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000183 vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000185 break;
186
187 case I915_GGTT_VIEW_ROTATED:
188 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000189 vma->ggtt_view.rotated.plane[0].width,
190 vma->ggtt_view.rotated.plane[0].height,
191 vma->ggtt_view.rotated.plane[0].stride,
192 vma->ggtt_view.rotated.plane[0].offset,
193 vma->ggtt_view.rotated.plane[1].width,
194 vma->ggtt_view.rotated.plane[1].height,
195 vma->ggtt_view.rotated.plane[1].stride,
196 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000197 break;
198
199 default:
200 MISSING_CASE(vma->ggtt_view.type);
201 break;
202 }
203 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100204 if (vma->fence)
205 seq_printf(m, " , fence: %d%s",
206 vma->fence->id,
207 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000208 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700209 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000210 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100211 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100212
Chris Wilsond07f0e52016-10-28 13:58:44 +0100213 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100214 if (engine)
215 seq_printf(m, " (%s)", engine->name);
216
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100217 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 if (frontbuffer_bits)
219 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100220}
221
Chris Wilsone637d2c2017-03-16 13:19:57 +0000222static int obj_rank_by_stolen(const void *A, const void *B)
Chris Wilson6d2b88852013-08-07 18:30:54 +0100223{
Chris Wilsone637d2c2017-03-16 13:19:57 +0000224 const struct drm_i915_gem_object *a =
225 *(const struct drm_i915_gem_object **)A;
226 const struct drm_i915_gem_object *b =
227 *(const struct drm_i915_gem_object **)B;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100228
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200229 if (a->stolen->start < b->stolen->start)
230 return -1;
231 if (a->stolen->start > b->stolen->start)
232 return 1;
233 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100234}
235
236static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237{
David Weinehall36cdd012016-08-22 13:59:31 +0300238 struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000240 struct drm_i915_gem_object **objects;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100241 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300242 u64 total_obj_size, total_gtt_size;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000243 unsigned long total, count, n;
244 int ret;
245
246 total = READ_ONCE(dev_priv->mm.object_count);
Michal Hocko20981052017-05-17 14:23:12 +0200247 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000248 if (!objects)
249 return -ENOMEM;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100250
251 ret = mutex_lock_interruptible(&dev->struct_mutex);
252 if (ret)
Chris Wilsone637d2c2017-03-16 13:19:57 +0000253 goto out;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100254
255 total_obj_size = total_gtt_size = count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100256
257 spin_lock(&dev_priv->mm.obj_lock);
258 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000259 if (count == total)
260 break;
261
Chris Wilson6d2b88852013-08-07 18:30:54 +0100262 if (obj->stolen == NULL)
263 continue;
264
Chris Wilsone637d2c2017-03-16 13:19:57 +0000265 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100266 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100267 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000268
Chris Wilson6d2b88852013-08-07 18:30:54 +0100269 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100270 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000271 if (count == total)
272 break;
273
Chris Wilson6d2b88852013-08-07 18:30:54 +0100274 if (obj->stolen == NULL)
275 continue;
276
Chris Wilsone637d2c2017-03-16 13:19:57 +0000277 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100278 total_obj_size += obj->base.size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100279 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100280 spin_unlock(&dev_priv->mm.obj_lock);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100281
Chris Wilsone637d2c2017-03-16 13:19:57 +0000282 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
284 seq_puts(m, "Stolen:\n");
285 for (n = 0; n < count; n++) {
286 seq_puts(m, " ");
287 describe_obj(m, objects[n]);
288 seq_putc(m, '\n');
289 }
290 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100291 count, total_obj_size, total_gtt_size);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000292
293 mutex_unlock(&dev->struct_mutex);
294out:
Michal Hocko20981052017-05-17 14:23:12 +0200295 kvfree(objects);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000296 return ret;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100297}
298
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100299struct file_stats {
Chris Wilson6313c202014-03-19 13:45:45 +0000300 struct drm_i915_file_private *file_priv;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300301 unsigned long count;
302 u64 total, unbound;
303 u64 global, shared;
304 u64 active, inactive;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100305};
306
307static int per_file_stats(int id, void *ptr, void *data)
308{
309 struct drm_i915_gem_object *obj = ptr;
310 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000311 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100312
Chris Wilson0caf81b2017-06-17 12:57:44 +0100313 lockdep_assert_held(&obj->base.dev->struct_mutex);
314
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100315 stats->count++;
316 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100317 if (!obj->bind_count)
318 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000319 if (obj->base.name || obj->base.dma_buf)
320 stats->shared += obj->base.size;
321
Chris Wilson894eeec2016-08-04 07:52:20 +0100322 list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 if (!drm_mm_node_allocated(&vma->node))
324 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000325
Chris Wilson3272db52016-08-04 16:32:32 +0100326 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100327 stats->global += vma->node.size;
328 } else {
329 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
Chris Wilson6313c202014-03-19 13:45:45 +0000330
Chris Wilson2bfa9962016-08-04 07:52:25 +0100331 if (ppgtt->base.file != stats->file_priv)
Chris Wilson6313c202014-03-19 13:45:45 +0000332 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000333 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100334
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100335 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100336 stats->active += vma->node.size;
337 else
338 stats->inactive += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100339 }
340
341 return 0;
342}
343
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100344#define print_file_stats(m, name, stats) do { \
345 if (stats.count) \
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300346 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100347 name, \
348 stats.count, \
349 stats.total, \
350 stats.active, \
351 stats.inactive, \
352 stats.global, \
353 stats.shared, \
354 stats.unbound); \
355} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800356
357static void print_batch_pool_stats(struct seq_file *m,
358 struct drm_i915_private *dev_priv)
359{
360 struct drm_i915_gem_object *obj;
361 struct file_stats stats;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000362 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530363 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000364 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800365
366 memset(&stats, 0, sizeof(stats));
367
Akash Goel3b3f1652016-10-13 22:44:48 +0530368 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000369 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100370 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000371 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100372 batch_pool_link)
373 per_file_stats(0, obj, &stats);
374 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100375 }
Brad Volkin493018d2014-12-11 12:13:08 -0800376
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100377 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800378}
379
Chris Wilsonab82a062018-04-30 14:15:01 +0100380static int per_file_ctx_stats(int idx, void *ptr, void *data)
Chris Wilson15da9562016-05-24 14:53:43 +0100381{
382 struct i915_gem_context *ctx = ptr;
Chris Wilsonab82a062018-04-30 14:15:01 +0100383 struct intel_engine_cs *engine;
384 enum intel_engine_id id;
Chris Wilson15da9562016-05-24 14:53:43 +0100385
Chris Wilsonab82a062018-04-30 14:15:01 +0100386 for_each_engine(engine, ctx->i915, id) {
387 struct intel_context *ce = to_intel_context(ctx, engine);
388
389 if (ce->state)
390 per_file_stats(0, ce->state->obj, data);
391 if (ce->ring)
392 per_file_stats(0, ce->ring->vma->obj, data);
Chris Wilson15da9562016-05-24 14:53:43 +0100393 }
394
395 return 0;
396}
397
398static void print_context_stats(struct seq_file *m,
399 struct drm_i915_private *dev_priv)
400{
David Weinehall36cdd012016-08-22 13:59:31 +0300401 struct drm_device *dev = &dev_priv->drm;
Chris Wilson15da9562016-05-24 14:53:43 +0100402 struct file_stats stats;
403 struct drm_file *file;
404
405 memset(&stats, 0, sizeof(stats));
406
David Weinehall36cdd012016-08-22 13:59:31 +0300407 mutex_lock(&dev->struct_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100408 if (dev_priv->kernel_context)
409 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410
David Weinehall36cdd012016-08-22 13:59:31 +0300411 list_for_each_entry(file, &dev->filelist, lhead) {
Chris Wilson15da9562016-05-24 14:53:43 +0100412 struct drm_i915_file_private *fpriv = file->driver_priv;
413 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414 }
David Weinehall36cdd012016-08-22 13:59:31 +0300415 mutex_unlock(&dev->struct_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100416
417 print_file_stats(m, "[k]contexts", stats);
418}
419
David Weinehall36cdd012016-08-22 13:59:31 +0300420static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100421{
David Weinehall36cdd012016-08-22 13:59:31 +0300422 struct drm_i915_private *dev_priv = node_to_i915(m->private);
423 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300424 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100425 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000427 struct drm_i915_gem_object *obj;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100428 unsigned int page_sizes = 0;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100429 struct drm_file *file;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100430 char buf[80];
Chris Wilson73aa8082010-09-30 11:46:12 +0100431 int ret;
432
433 ret = mutex_lock_interruptible(&dev->struct_mutex);
434 if (ret)
435 return ret;
436
Chris Wilson3ef7f222016-10-18 13:02:48 +0100437 seq_printf(m, "%u objects, %llu bytes\n",
Chris Wilson6299f992010-11-24 12:23:44 +0000438 dev_priv->mm.object_count,
439 dev_priv->mm.object_memory);
440
Chris Wilson1544c422016-08-15 13:18:16 +0100441 size = count = 0;
442 mapped_size = mapped_count = 0;
443 purgeable_size = purgeable_count = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100444 huge_size = huge_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100445
446 spin_lock(&dev_priv->mm.obj_lock);
447 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100448 size += obj->base.size;
449 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200450
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100451 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200452 purgeable_size += obj->base.size;
453 ++purgeable_count;
454 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100455
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100456 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100457 mapped_count++;
458 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100459 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100460
461 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462 huge_count++;
463 huge_size += obj->base.size;
464 page_sizes |= obj->mm.page_sizes.sg;
465 }
Chris Wilson6299f992010-11-24 12:23:44 +0000466 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100467 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
468
469 size = count = dpy_size = dpy_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100470 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100471 size += obj->base.size;
472 ++count;
473
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100474 if (obj->pin_global) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100475 dpy_size += obj->base.size;
476 ++dpy_count;
477 }
478
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100479 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100480 purgeable_size += obj->base.size;
481 ++purgeable_count;
482 }
483
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100484 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100485 mapped_count++;
486 mapped_size += obj->base.size;
487 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100488
489 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490 huge_count++;
491 huge_size += obj->base.size;
492 page_sizes |= obj->mm.page_sizes.sg;
493 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100494 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100495 spin_unlock(&dev_priv->mm.obj_lock);
496
Chris Wilson2bd160a2016-08-15 10:48:45 +0100497 seq_printf(m, "%u bound objects, %llu bytes\n",
498 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300499 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200500 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100501 seq_printf(m, "%u mapped objects, %llu bytes\n",
502 mapped_count, mapped_size);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100503 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504 huge_count,
505 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506 huge_size);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100507 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
Chris Wilson2bd160a2016-08-15 10:48:45 +0100508 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000509
Matthew Auldb7128ef2017-12-11 15:18:22 +0000510 seq_printf(m, "%llu [%pa] gtt total\n",
511 ggtt->base.total, &ggtt->mappable_end);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100512 seq_printf(m, "Supported page sizes: %s\n",
513 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514 buf, sizeof(buf)));
Chris Wilson73aa8082010-09-30 11:46:12 +0100515
Damien Lespiau267f0c92013-06-24 22:59:48 +0100516 seq_putc(m, '\n');
Brad Volkin493018d2014-12-11 12:13:08 -0800517 print_batch_pool_stats(m, dev_priv);
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200518 mutex_unlock(&dev->struct_mutex);
519
520 mutex_lock(&dev->filelist_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100521 print_context_stats(m, dev_priv);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100522 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523 struct file_stats stats;
Chris Wilsonc84455b2016-08-15 10:49:08 +0100524 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsone61e0f52018-02-21 09:56:36 +0000525 struct i915_request *request;
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900526 struct task_struct *task;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100527
Chris Wilson0caf81b2017-06-17 12:57:44 +0100528 mutex_lock(&dev->struct_mutex);
529
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100530 memset(&stats, 0, sizeof(stats));
Chris Wilson6313c202014-03-19 13:45:45 +0000531 stats.file_priv = file->driver_priv;
Chris Wilson5b5ffff2014-06-17 09:56:24 +0100532 spin_lock(&file->table_lock);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100533 idr_for_each(&file->object_idr, per_file_stats, &stats);
Chris Wilson5b5ffff2014-06-17 09:56:24 +0100534 spin_unlock(&file->table_lock);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900535 /*
536 * Although we have a valid reference on file->pid, that does
537 * not guarantee that the task_struct who called get_pid() is
538 * still alive (e.g. get_pid(current) => fork() => exit()).
539 * Therefore, we need to protect this ->comm access using RCU.
540 */
Chris Wilsonc84455b2016-08-15 10:49:08 +0100541 request = list_first_entry_or_null(&file_priv->mm.request_list,
Chris Wilsone61e0f52018-02-21 09:56:36 +0000542 struct i915_request,
Chris Wilsonc8659ef2017-03-02 12:25:25 +0000543 client_link);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900544 rcu_read_lock();
Chris Wilsonc84455b2016-08-15 10:49:08 +0100545 task = pid_task(request && request->ctx->pid ?
546 request->ctx->pid : file->pid,
547 PIDTYPE_PID);
Brad Volkin493018d2014-12-11 12:13:08 -0800548 print_file_stats(m, task ? task->comm : "<unknown>", stats);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900549 rcu_read_unlock();
Chris Wilson0caf81b2017-06-17 12:57:44 +0100550
Chris Wilsonc84455b2016-08-15 10:49:08 +0100551 mutex_unlock(&dev->struct_mutex);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100552 }
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200553 mutex_unlock(&dev->filelist_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100554
555 return 0;
556}
557
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100558static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000559{
Damien Lespiau9f25d002014-05-13 15:30:28 +0100560 struct drm_info_node *node = m->private;
David Weinehall36cdd012016-08-22 13:59:31 +0300561 struct drm_i915_private *dev_priv = node_to_i915(node);
562 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonf2123812017-10-16 12:40:37 +0100563 struct drm_i915_gem_object **objects;
Chris Wilson08c18322011-01-10 00:00:24 +0000564 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300565 u64 total_obj_size, total_gtt_size;
Chris Wilsonf2123812017-10-16 12:40:37 +0100566 unsigned long nobject, n;
Chris Wilson08c18322011-01-10 00:00:24 +0000567 int count, ret;
568
Chris Wilsonf2123812017-10-16 12:40:37 +0100569 nobject = READ_ONCE(dev_priv->mm.object_count);
570 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571 if (!objects)
572 return -ENOMEM;
573
Chris Wilson08c18322011-01-10 00:00:24 +0000574 ret = mutex_lock_interruptible(&dev->struct_mutex);
575 if (ret)
576 return ret;
577
Chris Wilsonf2123812017-10-16 12:40:37 +0100578 count = 0;
579 spin_lock(&dev_priv->mm.obj_lock);
580 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581 objects[count++] = obj;
582 if (count == nobject)
583 break;
584 }
585 spin_unlock(&dev_priv->mm.obj_lock);
586
587 total_obj_size = total_gtt_size = 0;
588 for (n = 0; n < count; n++) {
589 obj = objects[n];
590
Damien Lespiau267f0c92013-06-24 22:59:48 +0100591 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000592 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100593 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000594 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100595 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000596 }
597
598 mutex_unlock(&dev->struct_mutex);
599
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300600 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson08c18322011-01-10 00:00:24 +0000601 count, total_obj_size, total_gtt_size);
Chris Wilsonf2123812017-10-16 12:40:37 +0100602 kvfree(objects);
Chris Wilson08c18322011-01-10 00:00:24 +0000603
604 return 0;
605}
606
Brad Volkin493018d2014-12-11 12:13:08 -0800607static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608{
David Weinehall36cdd012016-08-22 13:59:31 +0300609 struct drm_i915_private *dev_priv = node_to_i915(m->private);
610 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800611 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000612 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530613 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100614 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000615 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800616
617 ret = mutex_lock_interruptible(&dev->struct_mutex);
618 if (ret)
619 return ret;
620
Akash Goel3b3f1652016-10-13 22:44:48 +0530621 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000622 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100623 int count;
624
625 count = 0;
626 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000627 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100628 batch_pool_link)
629 count++;
630 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000631 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100632
633 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000634 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100635 batch_pool_link) {
636 seq_puts(m, " ");
637 describe_obj(m, obj);
638 seq_putc(m, '\n');
639 }
640
641 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100642 }
Brad Volkin493018d2014-12-11 12:13:08 -0800643 }
644
Chris Wilson8d9d5742015-04-07 16:20:38 +0100645 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800646
647 mutex_unlock(&dev->struct_mutex);
648
649 return 0;
650}
651
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200652static void gen8_display_interrupt_info(struct seq_file *m)
653{
654 struct drm_i915_private *dev_priv = node_to_i915(m->private);
655 int pipe;
656
657 for_each_pipe(dev_priv, pipe) {
658 enum intel_display_power_domain power_domain;
659
660 power_domain = POWER_DOMAIN_PIPE(pipe);
661 if (!intel_display_power_get_if_enabled(dev_priv,
662 power_domain)) {
663 seq_printf(m, "Pipe %c power disabled\n",
664 pipe_name(pipe));
665 continue;
666 }
667 seq_printf(m, "Pipe %c IMR:\t%08x\n",
668 pipe_name(pipe),
669 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670 seq_printf(m, "Pipe %c IIR:\t%08x\n",
671 pipe_name(pipe),
672 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673 seq_printf(m, "Pipe %c IER:\t%08x\n",
674 pipe_name(pipe),
675 I915_READ(GEN8_DE_PIPE_IER(pipe)));
676
677 intel_display_power_put(dev_priv, power_domain);
678 }
679
680 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681 I915_READ(GEN8_DE_PORT_IMR));
682 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683 I915_READ(GEN8_DE_PORT_IIR));
684 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685 I915_READ(GEN8_DE_PORT_IER));
686
687 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688 I915_READ(GEN8_DE_MISC_IMR));
689 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690 I915_READ(GEN8_DE_MISC_IIR));
691 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692 I915_READ(GEN8_DE_MISC_IER));
693
694 seq_printf(m, "PCU interrupt mask:\t%08x\n",
695 I915_READ(GEN8_PCU_IMR));
696 seq_printf(m, "PCU interrupt identity:\t%08x\n",
697 I915_READ(GEN8_PCU_IIR));
698 seq_printf(m, "PCU interrupt enable:\t%08x\n",
699 I915_READ(GEN8_PCU_IER));
700}
701
Ben Gamari20172632009-02-17 20:08:50 -0500702static int i915_interrupt_info(struct seq_file *m, void *data)
703{
David Weinehall36cdd012016-08-22 13:59:31 +0300704 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000705 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530706 enum intel_engine_id id;
Chris Wilson4bb05042016-09-03 07:53:43 +0100707 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100708
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200709 intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500710
David Weinehall36cdd012016-08-22 13:59:31 +0300711 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300712 seq_printf(m, "Master Interrupt Control:\t%08x\n",
713 I915_READ(GEN8_MASTER_IRQ));
714
715 seq_printf(m, "Display IER:\t%08x\n",
716 I915_READ(VLV_IER));
717 seq_printf(m, "Display IIR:\t%08x\n",
718 I915_READ(VLV_IIR));
719 seq_printf(m, "Display IIR_RW:\t%08x\n",
720 I915_READ(VLV_IIR_RW));
721 seq_printf(m, "Display IMR:\t%08x\n",
722 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100723 for_each_pipe(dev_priv, pipe) {
724 enum intel_display_power_domain power_domain;
725
726 power_domain = POWER_DOMAIN_PIPE(pipe);
727 if (!intel_display_power_get_if_enabled(dev_priv,
728 power_domain)) {
729 seq_printf(m, "Pipe %c power disabled\n",
730 pipe_name(pipe));
731 continue;
732 }
733
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300734 seq_printf(m, "Pipe %c stat:\t%08x\n",
735 pipe_name(pipe),
736 I915_READ(PIPESTAT(pipe)));
737
Chris Wilson9c870d02016-10-24 13:42:15 +0100738 intel_display_power_put(dev_priv, power_domain);
739 }
740
741 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300742 seq_printf(m, "Port hotplug:\t%08x\n",
743 I915_READ(PORT_HOTPLUG_EN));
744 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745 I915_READ(VLV_DPFLIPSTAT));
746 seq_printf(m, "DPINVGTT:\t%08x\n",
747 I915_READ(DPINVGTT));
Chris Wilson9c870d02016-10-24 13:42:15 +0100748 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300749
750 for (i = 0; i < 4; i++) {
751 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752 i, I915_READ(GEN8_GT_IMR(i)));
753 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754 i, I915_READ(GEN8_GT_IIR(i)));
755 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756 i, I915_READ(GEN8_GT_IER(i)));
757 }
758
759 seq_printf(m, "PCU interrupt mask:\t%08x\n",
760 I915_READ(GEN8_PCU_IMR));
761 seq_printf(m, "PCU interrupt identity:\t%08x\n",
762 I915_READ(GEN8_PCU_IIR));
763 seq_printf(m, "PCU interrupt enable:\t%08x\n",
764 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200765 } else if (INTEL_GEN(dev_priv) >= 11) {
766 seq_printf(m, "Master Interrupt Control: %08x\n",
767 I915_READ(GEN11_GFX_MSTR_IRQ));
768
769 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
770 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
772 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
774 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
778 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
780 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781
782 seq_printf(m, "Display Interrupt Control:\t%08x\n",
783 I915_READ(GEN11_DISPLAY_INT_CTL));
784
785 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300786 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700787 seq_printf(m, "Master Interrupt Control:\t%08x\n",
788 I915_READ(GEN8_MASTER_IRQ));
789
790 for (i = 0; i < 4; i++) {
791 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792 i, I915_READ(GEN8_GT_IMR(i)));
793 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794 i, I915_READ(GEN8_GT_IIR(i)));
795 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796 i, I915_READ(GEN8_GT_IER(i)));
797 }
798
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200799 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300800 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700801 seq_printf(m, "Display IER:\t%08x\n",
802 I915_READ(VLV_IER));
803 seq_printf(m, "Display IIR:\t%08x\n",
804 I915_READ(VLV_IIR));
805 seq_printf(m, "Display IIR_RW:\t%08x\n",
806 I915_READ(VLV_IIR_RW));
807 seq_printf(m, "Display IMR:\t%08x\n",
808 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000809 for_each_pipe(dev_priv, pipe) {
810 enum intel_display_power_domain power_domain;
811
812 power_domain = POWER_DOMAIN_PIPE(pipe);
813 if (!intel_display_power_get_if_enabled(dev_priv,
814 power_domain)) {
815 seq_printf(m, "Pipe %c power disabled\n",
816 pipe_name(pipe));
817 continue;
818 }
819
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700820 seq_printf(m, "Pipe %c stat:\t%08x\n",
821 pipe_name(pipe),
822 I915_READ(PIPESTAT(pipe)));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000823 intel_display_power_put(dev_priv, power_domain);
824 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700825
826 seq_printf(m, "Master IER:\t%08x\n",
827 I915_READ(VLV_MASTER_IER));
828
829 seq_printf(m, "Render IER:\t%08x\n",
830 I915_READ(GTIER));
831 seq_printf(m, "Render IIR:\t%08x\n",
832 I915_READ(GTIIR));
833 seq_printf(m, "Render IMR:\t%08x\n",
834 I915_READ(GTIMR));
835
836 seq_printf(m, "PM IER:\t\t%08x\n",
837 I915_READ(GEN6_PMIER));
838 seq_printf(m, "PM IIR:\t\t%08x\n",
839 I915_READ(GEN6_PMIIR));
840 seq_printf(m, "PM IMR:\t\t%08x\n",
841 I915_READ(GEN6_PMIMR));
842
843 seq_printf(m, "Port hotplug:\t%08x\n",
844 I915_READ(PORT_HOTPLUG_EN));
845 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846 I915_READ(VLV_DPFLIPSTAT));
847 seq_printf(m, "DPINVGTT:\t%08x\n",
848 I915_READ(DPINVGTT));
849
David Weinehall36cdd012016-08-22 13:59:31 +0300850 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800851 seq_printf(m, "Interrupt enable: %08x\n",
852 I915_READ(IER));
853 seq_printf(m, "Interrupt identity: %08x\n",
854 I915_READ(IIR));
855 seq_printf(m, "Interrupt mask: %08x\n",
856 I915_READ(IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100857 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800858 seq_printf(m, "Pipe %c stat: %08x\n",
859 pipe_name(pipe),
860 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800861 } else {
862 seq_printf(m, "North Display Interrupt enable: %08x\n",
863 I915_READ(DEIER));
864 seq_printf(m, "North Display Interrupt identity: %08x\n",
865 I915_READ(DEIIR));
866 seq_printf(m, "North Display Interrupt mask: %08x\n",
867 I915_READ(DEIMR));
868 seq_printf(m, "South Display Interrupt enable: %08x\n",
869 I915_READ(SDEIER));
870 seq_printf(m, "South Display Interrupt identity: %08x\n",
871 I915_READ(SDEIIR));
872 seq_printf(m, "South Display Interrupt mask: %08x\n",
873 I915_READ(SDEIMR));
874 seq_printf(m, "Graphics Interrupt enable: %08x\n",
875 I915_READ(GTIER));
876 seq_printf(m, "Graphics Interrupt identity: %08x\n",
877 I915_READ(GTIIR));
878 seq_printf(m, "Graphics Interrupt mask: %08x\n",
879 I915_READ(GTIMR));
880 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200881
882 if (INTEL_GEN(dev_priv) >= 11) {
883 seq_printf(m, "RCS Intr Mask:\t %08x\n",
884 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885 seq_printf(m, "BCS Intr Mask:\t %08x\n",
886 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894 I915_READ(GEN11_GUC_SG_INTR_MASK));
895 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901
902 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsond5acadf2017-12-09 10:44:18 +0000903 for_each_engine(engine, dev_priv, id) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100904 seq_printf(m,
905 "Graphics Interrupt mask (%s): %08x\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000906 engine->name, I915_READ_IMR(engine));
Chris Wilson9862e602011-01-04 22:22:17 +0000907 }
Chris Wilson9862e602011-01-04 22:22:17 +0000908 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200909
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200910 intel_runtime_pm_put(dev_priv);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100911
Ben Gamari20172632009-02-17 20:08:50 -0500912 return 0;
913}
914
Chris Wilsona6172a82009-02-11 14:26:38 +0000915static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916{
David Weinehall36cdd012016-08-22 13:59:31 +0300917 struct drm_i915_private *dev_priv = node_to_i915(m->private);
918 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100919 int i, ret;
920
921 ret = mutex_lock_interruptible(&dev->struct_mutex);
922 if (ret)
923 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000924
Chris Wilsona6172a82009-02-11 14:26:38 +0000925 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100927 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000928
Chris Wilson6c085a72012-08-20 11:40:46 +0200929 seq_printf(m, "Fence %d, pin count = %d, object = ",
930 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100931 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100932 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100933 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100934 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100935 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000936 }
937
Chris Wilson05394f32010-11-08 19:18:58 +0000938 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000939 return 0;
940}
941
Chris Wilson98a2f412016-10-12 10:05:18 +0100942#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000943static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944 size_t count, loff_t *pos)
945{
946 struct i915_gpu_state *error = file->private_data;
947 struct drm_i915_error_state_buf str;
948 ssize_t ret;
949 loff_t tmp;
950
951 if (!error)
952 return 0;
953
954 ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
955 if (ret)
956 return ret;
957
958 ret = i915_error_state_to_str(&str, error);
959 if (ret)
960 goto out;
961
962 tmp = 0;
963 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
964 if (ret < 0)
965 goto out;
966
967 *pos = str.start + ret;
968out:
969 i915_error_state_buf_release(&str);
970 return ret;
971}
972
973static int gpu_state_release(struct inode *inode, struct file *file)
974{
975 i915_gpu_state_put(file->private_data);
976 return 0;
977}
978
979static int i915_gpu_info_open(struct inode *inode, struct file *file)
980{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100981 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000982 struct i915_gpu_state *gpu;
983
Chris Wilson090e5fe2017-03-28 14:14:07 +0100984 intel_runtime_pm_get(i915);
985 gpu = i915_capture_gpu_state(i915);
986 intel_runtime_pm_put(i915);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000987 if (!gpu)
988 return -ENOMEM;
989
990 file->private_data = gpu;
991 return 0;
992}
993
994static const struct file_operations i915_gpu_info_fops = {
995 .owner = THIS_MODULE,
996 .open = i915_gpu_info_open,
997 .read = gpu_state_read,
998 .llseek = default_llseek,
999 .release = gpu_state_release,
1000};
Chris Wilson98a2f412016-10-12 10:05:18 +01001001
Daniel Vetterd5442302012-04-27 15:17:40 +02001002static ssize_t
1003i915_error_state_write(struct file *filp,
1004 const char __user *ubuf,
1005 size_t cnt,
1006 loff_t *ppos)
1007{
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001008 struct i915_gpu_state *error = filp->private_data;
1009
1010 if (!error)
1011 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001012
1013 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001014 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +02001015
1016 return cnt;
1017}
1018
1019static int i915_error_state_open(struct inode *inode, struct file *file)
1020{
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001021 file->private_data = i915_first_error_state(inode->i_private);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001022 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001023}
1024
Daniel Vetterd5442302012-04-27 15:17:40 +02001025static const struct file_operations i915_error_state_fops = {
1026 .owner = THIS_MODULE,
1027 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001028 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +02001029 .write = i915_error_state_write,
1030 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001031 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +02001032};
Chris Wilson98a2f412016-10-12 10:05:18 +01001033#endif
1034
Kees Cook647416f2013-03-10 14:10:06 -07001035static int
Kees Cook647416f2013-03-10 14:10:06 -07001036i915_next_seqno_set(void *data, u64 val)
Mika Kuoppala40633212012-12-04 15:12:00 +02001037{
David Weinehall36cdd012016-08-22 13:59:31 +03001038 struct drm_i915_private *dev_priv = data;
1039 struct drm_device *dev = &dev_priv->drm;
Mika Kuoppala40633212012-12-04 15:12:00 +02001040 int ret;
1041
Mika Kuoppala40633212012-12-04 15:12:00 +02001042 ret = mutex_lock_interruptible(&dev->struct_mutex);
1043 if (ret)
1044 return ret;
1045
Chris Wilson65c475c2018-01-02 15:12:31 +00001046 intel_runtime_pm_get(dev_priv);
Chris Wilson73cb9702016-10-28 13:58:46 +01001047 ret = i915_gem_set_global_seqno(dev, val);
Chris Wilson65c475c2018-01-02 15:12:31 +00001048 intel_runtime_pm_put(dev_priv);
1049
Mika Kuoppala40633212012-12-04 15:12:00 +02001050 mutex_unlock(&dev->struct_mutex);
1051
Kees Cook647416f2013-03-10 14:10:06 -07001052 return ret;
Mika Kuoppala40633212012-12-04 15:12:00 +02001053}
1054
Kees Cook647416f2013-03-10 14:10:06 -07001055DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
Chris Wilson9b6586a2017-02-23 07:44:08 +00001056 NULL, i915_next_seqno_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03001057 "0x%llx\n");
Mika Kuoppala40633212012-12-04 15:12:00 +02001058
Deepak Sadb4bd12014-03-31 11:30:02 +05301059static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001060{
David Weinehall36cdd012016-08-22 13:59:31 +03001061 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001062 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001063 int ret = 0;
1064
1065 intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001066
David Weinehall36cdd012016-08-22 13:59:31 +03001067 if (IS_GEN5(dev_priv)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001068 u16 rgvswctl = I915_READ16(MEMSWCTL);
1069 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070
1071 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074 MEMSTAT_VID_SHIFT);
1075 seq_printf(m, "Current P-state: %d\n",
1076 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001077 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001078 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -08001079
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001080 mutex_lock(&dev_priv->pcu_lock);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001081
1082 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083 seq_printf(m, "Video Turbo Mode: %s\n",
1084 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085 seq_printf(m, "HW control enabled: %s\n",
1086 yesno(rpmodectl & GEN6_RP_ENABLE));
1087 seq_printf(m, "SW control enabled: %s\n",
1088 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089 GEN6_RP_MEDIA_SW_MODE));
1090
Wayne Boyer666a4532015-12-09 12:29:35 -08001091 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094
1095 seq_printf(m, "actual GPU freq: %d MHz\n",
1096 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097
1098 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001099 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001100
1101 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001102 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001103
1104 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001105 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001106
1107 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001108 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001109
1110 seq_printf(m,
1111 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001112 intel_gpu_freq(dev_priv, rps->efficient_freq));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001113 mutex_unlock(&dev_priv->pcu_lock);
David Weinehall36cdd012016-08-22 13:59:31 +03001114 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001115 u32 rp_state_limits;
1116 u32 gt_perf_status;
1117 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001118 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001119 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001120 u32 rpupei, rpcurup, rpprevup;
1121 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001122 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001123 int max_freq;
1124
Bob Paauwe35040562015-06-25 14:54:07 -07001125 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001126 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001127 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129 } else {
1130 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132 }
1133
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001134 /* RPSTAT1 is in the GT power well */
Mika Kuoppala59bad942015-01-16 11:34:40 +02001135 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001136
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001137 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001138 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +05301139 reqf >>= 23;
1140 else {
1141 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001142 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301143 reqf >>= 24;
1144 else
1145 reqf >>= 25;
1146 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001147 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001148
Chris Wilson0d8f9492014-03-27 09:06:14 +00001149 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152
Jesse Barnesccab5c82011-01-18 15:49:25 -08001153 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301154 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00001160 cagf = intel_gpu_freq(dev_priv,
1161 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -08001162
Mika Kuoppala59bad942015-01-16 11:34:40 +02001163 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001164
David Weinehall36cdd012016-08-22 13:59:31 +03001165 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001166 pm_ier = I915_READ(GEN6_PMIER);
1167 pm_imr = I915_READ(GEN6_PMIMR);
1168 pm_isr = I915_READ(GEN6_PMISR);
1169 pm_iir = I915_READ(GEN6_PMIIR);
1170 pm_mask = I915_READ(GEN6_PMINTRMSK);
1171 } else {
1172 pm_ier = I915_READ(GEN8_GT_IER(2));
1173 pm_imr = I915_READ(GEN8_GT_IMR(2));
1174 pm_isr = I915_READ(GEN8_GT_ISR(2));
1175 pm_iir = I915_READ(GEN8_GT_IIR(2));
1176 pm_mask = I915_READ(GEN6_PMINTRMSK);
1177 }
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001178 seq_printf(m, "Video Turbo Mode: %s\n",
1179 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1180 seq_printf(m, "HW control enabled: %s\n",
1181 yesno(rpmodectl & GEN6_RP_ENABLE));
1182 seq_printf(m, "SW control enabled: %s\n",
1183 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1184 GEN6_RP_MEDIA_SW_MODE));
Chris Wilson0d8f9492014-03-27 09:06:14 +00001185 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001186 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05301187 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001188 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001189 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001190 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001191 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001192 seq_printf(m, "Render p-state VID: %d\n",
1193 gt_perf_status & 0xff);
1194 seq_printf(m, "Render p-state limit: %d\n",
1195 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001196 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1197 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1198 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1199 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001200 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001201 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301202 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1203 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1204 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1205 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1206 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1207 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001208 seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +01001209
Akash Goeld6cda9c2016-04-23 00:05:46 +05301210 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1211 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1212 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1213 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1214 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1215 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001216 seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001217
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001218 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001219 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001220 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001221 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001222 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001223 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001224
1225 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001226 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001227 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001228 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001229 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001230
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001231 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001232 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001233 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001234 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001235 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001236 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001237 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001238 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001239
Chris Wilsond86ed342015-04-27 13:41:19 +01001240 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001241 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001242 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001243 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001244 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001245 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001246 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001247 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001248 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001249 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001250 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001251 seq_printf(m,
1252 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001253 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001254 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001255 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001256 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001257
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001258 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +03001259 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1260 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1261
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001262 intel_runtime_pm_put(dev_priv);
1263 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001264}
1265
Ben Widawskyd6369512016-09-20 16:54:32 +03001266static void i915_instdone_info(struct drm_i915_private *dev_priv,
1267 struct seq_file *m,
1268 struct intel_instdone *instdone)
1269{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001270 int slice;
1271 int subslice;
1272
Ben Widawskyd6369512016-09-20 16:54:32 +03001273 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1274 instdone->instdone);
1275
1276 if (INTEL_GEN(dev_priv) <= 3)
1277 return;
1278
1279 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1280 instdone->slice_common);
1281
1282 if (INTEL_GEN(dev_priv) <= 6)
1283 return;
1284
Ben Widawskyf9e61372016-09-20 16:54:33 +03001285 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1286 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1287 slice, subslice, instdone->sampler[slice][subslice]);
1288
1289 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1290 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1291 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001292}
1293
Chris Wilsonf6544492015-01-26 18:03:04 +02001294static int i915_hangcheck_info(struct seq_file *m, void *unused)
1295{
David Weinehall36cdd012016-08-22 13:59:31 +03001296 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001297 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001298 u64 acthd[I915_NUM_ENGINES];
1299 u32 seqno[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001300 struct intel_instdone instdone;
Dave Gordonc3232b12016-03-23 18:19:53 +00001301 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001302
Chris Wilson8af29b02016-09-09 14:11:47 +01001303 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001304 seq_puts(m, "Wedged\n");
1305 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1306 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1307 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1308 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001309 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001310 seq_puts(m, "Waiter holding struct mutex\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001311 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001312 seq_puts(m, "struct_mutex blocked for reset\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001313
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001314 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001315 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001316 return 0;
1317 }
1318
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001319 intel_runtime_pm_get(dev_priv);
1320
Akash Goel3b3f1652016-10-13 22:44:48 +05301321 for_each_engine(engine, dev_priv, id) {
Chris Wilson7e37f882016-08-02 22:50:21 +01001322 acthd[id] = intel_engine_get_active_head(engine);
Chris Wilson1b7744e2016-07-01 17:23:17 +01001323 seqno[id] = intel_engine_get_seqno(engine);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001324 }
1325
Akash Goel3b3f1652016-10-13 22:44:48 +05301326 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001327
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001328 intel_runtime_pm_put(dev_priv);
1329
Chris Wilson8352aea2017-03-03 09:00:56 +00001330 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1331 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsonf6544492015-01-26 18:03:04 +02001332 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1333 jiffies));
Chris Wilson8352aea2017-03-03 09:00:56 +00001334 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1335 seq_puts(m, "Hangcheck active, work pending\n");
1336 else
1337 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001338
Chris Wilsonf73b5672017-03-02 15:03:56 +00001339 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1340
Akash Goel3b3f1652016-10-13 22:44:48 +05301341 for_each_engine(engine, dev_priv, id) {
Chris Wilson33f53712016-10-04 21:11:32 +01001342 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1343 struct rb_node *rb;
1344
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001345 seq_printf(m, "%s:\n", engine->name);
Chris Wilson52d7f162018-04-30 14:15:00 +01001346 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
Chris Wilsoncb399ea2016-11-01 10:03:16 +00001347 engine->hangcheck.seqno, seqno[id],
Chris Wilson52d7f162018-04-30 14:15:00 +01001348 intel_engine_last_submit(engine));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001349 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
Chris Wilson83348ba2016-08-09 17:47:51 +01001350 yesno(intel_engine_has_waiter(engine)),
1351 yesno(test_bit(engine->id,
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001352 &dev_priv->gpu_error.missed_irq_rings)),
1353 yesno(engine->hangcheck.stalled));
1354
Chris Wilson61d3dc72017-03-03 19:08:24 +00001355 spin_lock_irq(&b->rb_lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001356 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
Geliang Tangf802cf72016-12-19 22:43:49 +08001357 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
Chris Wilson33f53712016-10-04 21:11:32 +01001358
1359 seq_printf(m, "\t%s [%d] waiting for %x\n",
1360 w->tsk->comm, w->tsk->pid, w->seqno);
1361 }
Chris Wilson61d3dc72017-03-03 19:08:24 +00001362 spin_unlock_irq(&b->rb_lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001363
Chris Wilsonf6544492015-01-26 18:03:04 +02001364 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001365 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001366 (long long)acthd[id]);
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001367 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1368 hangcheck_action_to_str(engine->hangcheck.action),
1369 engine->hangcheck.action,
1370 jiffies_to_msecs(jiffies -
1371 engine->hangcheck.action_timestamp));
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001372
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001373 if (engine->id == RCS) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001374 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001375
Ben Widawskyd6369512016-09-20 16:54:32 +03001376 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001377
Ben Widawskyd6369512016-09-20 16:54:32 +03001378 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001379
Ben Widawskyd6369512016-09-20 16:54:32 +03001380 i915_instdone_info(dev_priv, m,
1381 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001382 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001383 }
1384
1385 return 0;
1386}
1387
Michel Thierry061d06a2017-06-20 10:57:49 +01001388static int i915_reset_info(struct seq_file *m, void *unused)
1389{
1390 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1391 struct i915_gpu_error *error = &dev_priv->gpu_error;
1392 struct intel_engine_cs *engine;
1393 enum intel_engine_id id;
1394
1395 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1396
1397 for_each_engine(engine, dev_priv, id) {
1398 seq_printf(m, "%s = %u\n", engine->name,
1399 i915_reset_engine_count(error, engine));
1400 }
1401
1402 return 0;
1403}
1404
Ben Widawsky4d855292011-12-12 19:34:16 -08001405static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001406{
David Weinehall36cdd012016-08-22 13:59:31 +03001407 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001408 u32 rgvmodectl, rstdbyctl;
1409 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001410
Ben Widawsky616fdb52011-10-05 11:44:54 -07001411 rgvmodectl = I915_READ(MEMMODECTL);
1412 rstdbyctl = I915_READ(RSTDBYCTL);
1413 crstandvid = I915_READ16(CRSTANDVID);
1414
Jani Nikula742f4912015-09-03 11:16:09 +03001415 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001416 seq_printf(m, "Boost freq: %d\n",
1417 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1418 MEMMODE_BOOST_FREQ_SHIFT);
1419 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001420 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001421 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001422 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001423 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001424 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001425 seq_printf(m, "Starting frequency: P%d\n",
1426 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001427 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001428 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001429 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1430 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1431 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1432 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001433 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001434 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001435 switch (rstdbyctl & RSX_STATUS_MASK) {
1436 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001437 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001438 break;
1439 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001440 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001441 break;
1442 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001443 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001444 break;
1445 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001446 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001447 break;
1448 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001449 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001450 break;
1451 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001452 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001453 break;
1454 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001455 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001456 break;
1457 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001458
1459 return 0;
1460}
1461
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001462static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001463{
Chris Wilson233ebf52017-03-23 10:19:44 +00001464 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001465 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001466 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001467
Chris Wilsond7a133d2017-09-07 14:44:41 +01001468 seq_printf(m, "user.bypass_count = %u\n",
1469 i915->uncore.user_forcewake.count);
1470
Chris Wilson233ebf52017-03-23 10:19:44 +00001471 for_each_fw_domain(fw_domain, i915, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001472 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001473 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001474 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001475
1476 return 0;
1477}
1478
Mika Kuoppala13628772017-03-15 17:43:02 +02001479static void print_rc6_res(struct seq_file *m,
1480 const char *title,
1481 const i915_reg_t reg)
1482{
1483 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1484
1485 seq_printf(m, "%s %u (%llu us)\n",
1486 title, I915_READ(reg),
1487 intel_rc6_residency_us(dev_priv, reg));
1488}
1489
Deepak S669ab5a2014-01-10 15:18:26 +05301490static int vlv_drpc_info(struct seq_file *m)
1491{
David Weinehall36cdd012016-08-22 13:59:31 +03001492 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001493 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301494
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001495 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301496 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1497
Deepak S669ab5a2014-01-10 15:18:26 +05301498 seq_printf(m, "RC6 Enabled: %s\n",
1499 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1500 GEN6_RC_CTL_EI_MODE(1))));
1501 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001502 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301503 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001504 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301505
Mika Kuoppala13628772017-03-15 17:43:02 +02001506 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1507 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001508
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001509 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301510}
1511
Ben Widawsky4d855292011-12-12 19:34:16 -08001512static int gen6_drpc_info(struct seq_file *m)
1513{
David Weinehall36cdd012016-08-22 13:59:31 +03001514 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001515 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301516 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001517
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001518 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001519 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001520
Ben Widawsky4d855292011-12-12 19:34:16 -08001521 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001522 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301523 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1524 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1525 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001526
Imre Deak51cc9ad2018-02-08 19:41:02 +02001527 if (INTEL_GEN(dev_priv) <= 7) {
1528 mutex_lock(&dev_priv->pcu_lock);
1529 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1530 &rc6vids);
1531 mutex_unlock(&dev_priv->pcu_lock);
1532 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001533
Eric Anholtfff24e22012-01-23 16:14:05 -08001534 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001535 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1536 seq_printf(m, "RC6 Enabled: %s\n",
1537 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001538 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301539 seq_printf(m, "Render Well Gating Enabled: %s\n",
1540 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1541 seq_printf(m, "Media Well Gating Enabled: %s\n",
1542 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1543 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001544 seq_printf(m, "Deep RC6 Enabled: %s\n",
1545 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1546 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1547 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001548 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001549 switch (gt_core_status & GEN6_RCn_MASK) {
1550 case GEN6_RC0:
1551 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001552 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001553 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001554 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001555 break;
1556 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001557 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001558 break;
1559 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001560 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001561 break;
1562 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001563 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001564 break;
1565 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001566 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001567 break;
1568 }
1569
1570 seq_printf(m, "Core Power Down: %s\n",
1571 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001572 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301573 seq_printf(m, "Render Power Well: %s\n",
1574 (gen9_powergate_status &
1575 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1576 seq_printf(m, "Media Power Well: %s\n",
1577 (gen9_powergate_status &
1578 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1579 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001580
1581 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001582 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1583 GEN6_GT_GFX_RC6_LOCKED);
1584 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1585 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1586 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001587
Imre Deak51cc9ad2018-02-08 19:41:02 +02001588 if (INTEL_GEN(dev_priv) <= 7) {
1589 seq_printf(m, "RC6 voltage: %dmV\n",
1590 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1591 seq_printf(m, "RC6+ voltage: %dmV\n",
1592 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1593 seq_printf(m, "RC6++ voltage: %dmV\n",
1594 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1595 }
1596
Akash Goelf2dd7572016-06-27 20:10:01 +05301597 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001598}
1599
1600static int i915_drpc_info(struct seq_file *m, void *unused)
1601{
David Weinehall36cdd012016-08-22 13:59:31 +03001602 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001603 int err;
1604
1605 intel_runtime_pm_get(dev_priv);
Ben Widawsky4d855292011-12-12 19:34:16 -08001606
David Weinehall36cdd012016-08-22 13:59:31 +03001607 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001608 err = vlv_drpc_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +03001609 else if (INTEL_GEN(dev_priv) >= 6)
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001610 err = gen6_drpc_info(m);
Ben Widawsky4d855292011-12-12 19:34:16 -08001611 else
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001612 err = ironlake_drpc_info(m);
1613
1614 intel_runtime_pm_put(dev_priv);
1615
1616 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001617}
1618
Daniel Vetter9a851782015-06-18 10:30:22 +02001619static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1620{
David Weinehall36cdd012016-08-22 13:59:31 +03001621 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001622
1623 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1624 dev_priv->fb_tracking.busy_bits);
1625
1626 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1627 dev_priv->fb_tracking.flip_bits);
1628
1629 return 0;
1630}
1631
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001632static int i915_fbc_status(struct seq_file *m, void *unused)
1633{
David Weinehall36cdd012016-08-22 13:59:31 +03001634 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001635 struct intel_fbc *fbc = &dev_priv->fbc;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001636
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001637 if (!HAS_FBC(dev_priv))
1638 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001639
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001640 intel_runtime_pm_get(dev_priv);
Chris Wilson31388722017-12-20 20:58:48 +00001641 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001642
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001643 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001644 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001645 else
Chris Wilson31388722017-12-20 20:58:48 +00001646 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1647
1648 if (fbc->work.scheduled)
Dhinakaran Pandiyan1b29b7c2018-02-02 21:12:55 -08001649 seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
Chris Wilson31388722017-12-20 20:58:48 +00001650 fbc->work.scheduled_vblank,
1651 drm_crtc_vblank_count(&fbc->crtc->base));
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001652
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001653 if (intel_fbc_is_active(dev_priv)) {
1654 u32 mask;
1655
1656 if (INTEL_GEN(dev_priv) >= 8)
1657 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1658 else if (INTEL_GEN(dev_priv) >= 7)
1659 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1660 else if (INTEL_GEN(dev_priv) >= 5)
1661 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1662 else if (IS_G4X(dev_priv))
1663 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1664 else
1665 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1666 FBC_STAT_COMPRESSED);
1667
1668 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001669 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001670
Chris Wilson31388722017-12-20 20:58:48 +00001671 mutex_unlock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001672 intel_runtime_pm_put(dev_priv);
1673
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001674 return 0;
1675}
1676
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001677static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001678{
David Weinehall36cdd012016-08-22 13:59:31 +03001679 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001680
David Weinehall36cdd012016-08-22 13:59:31 +03001681 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001682 return -ENODEV;
1683
Rodrigo Vivida46f932014-08-01 02:04:45 -07001684 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001685
1686 return 0;
1687}
1688
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001689static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001690{
David Weinehall36cdd012016-08-22 13:59:31 +03001691 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001692 u32 reg;
1693
David Weinehall36cdd012016-08-22 13:59:31 +03001694 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001695 return -ENODEV;
1696
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001697 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001698
1699 reg = I915_READ(ILK_DPFC_CONTROL);
1700 dev_priv->fbc.false_color = val;
1701
1702 I915_WRITE(ILK_DPFC_CONTROL, val ?
1703 (reg | FBC_CTL_FALSE_COLOR) :
1704 (reg & ~FBC_CTL_FALSE_COLOR));
1705
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001706 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001707 return 0;
1708}
1709
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001710DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1711 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001712 "%llu\n");
1713
Paulo Zanoni92d44622013-05-31 16:33:24 -03001714static int i915_ips_status(struct seq_file *m, void *unused)
1715{
David Weinehall36cdd012016-08-22 13:59:31 +03001716 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Paulo Zanoni92d44622013-05-31 16:33:24 -03001717
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001718 if (!HAS_IPS(dev_priv))
1719 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001720
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001721 intel_runtime_pm_get(dev_priv);
1722
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001723 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001724 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001725
David Weinehall36cdd012016-08-22 13:59:31 +03001726 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001727 seq_puts(m, "Currently: unknown\n");
1728 } else {
1729 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1730 seq_puts(m, "Currently: enabled\n");
1731 else
1732 seq_puts(m, "Currently: disabled\n");
1733 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001734
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001735 intel_runtime_pm_put(dev_priv);
1736
Paulo Zanoni92d44622013-05-31 16:33:24 -03001737 return 0;
1738}
1739
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001740static int i915_sr_status(struct seq_file *m, void *unused)
1741{
David Weinehall36cdd012016-08-22 13:59:31 +03001742 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001743 bool sr_enabled = false;
1744
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001745 intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01001746 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001747
Chris Wilson7342a722017-03-09 14:20:49 +00001748 if (INTEL_GEN(dev_priv) >= 9)
1749 /* no global SR status; inspect per-plane WM */;
1750 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001751 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001752 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001753 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001754 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001755 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001756 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001757 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001758 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001759 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001760 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001761
Chris Wilson9c870d02016-10-24 13:42:15 +01001762 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001763 intel_runtime_pm_put(dev_priv);
1764
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001765 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001766
1767 return 0;
1768}
1769
Jesse Barnes7648fa92010-05-20 14:28:11 -07001770static int i915_emon_status(struct seq_file *m, void *unused)
1771{
David Weinehall36cdd012016-08-22 13:59:31 +03001772 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1773 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001774 unsigned long temp, chipset, gfx;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001775 int ret;
1776
David Weinehall36cdd012016-08-22 13:59:31 +03001777 if (!IS_GEN5(dev_priv))
Chris Wilson582be6b2012-04-30 19:35:02 +01001778 return -ENODEV;
1779
Chris Wilsonde227ef2010-07-03 07:58:38 +01001780 ret = mutex_lock_interruptible(&dev->struct_mutex);
1781 if (ret)
1782 return ret;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001783
1784 temp = i915_mch_val(dev_priv);
1785 chipset = i915_chipset_val(dev_priv);
1786 gfx = i915_gfx_val(dev_priv);
Chris Wilsonde227ef2010-07-03 07:58:38 +01001787 mutex_unlock(&dev->struct_mutex);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001788
1789 seq_printf(m, "GMCH temp: %ld\n", temp);
1790 seq_printf(m, "Chipset power: %ld\n", chipset);
1791 seq_printf(m, "GFX power: %ld\n", gfx);
1792 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1793
1794 return 0;
1795}
1796
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001797static int i915_ring_freq_table(struct seq_file *m, void *unused)
1798{
David Weinehall36cdd012016-08-22 13:59:31 +03001799 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001800 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301801 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001802 int gpu_freq, ia_freq;
1803 int ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001804
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001805 if (!HAS_LLC(dev_priv))
1806 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001807
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001808 intel_runtime_pm_get(dev_priv);
1809
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001810 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001811 if (ret)
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001812 goto out;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001813
Chris Wilsond586b5f2018-03-08 14:26:48 +00001814 min_gpu_freq = rps->min_freq;
1815 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001816 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301817 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001818 min_gpu_freq /= GEN9_FREQ_SCALER;
1819 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301820 }
1821
Damien Lespiau267f0c92013-06-24 22:59:48 +01001822 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001823
Akash Goelf936ec32015-06-29 14:50:22 +05301824 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001825 ia_freq = gpu_freq;
1826 sandybridge_pcode_read(dev_priv,
1827 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1828 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001829 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301830 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001831 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001832 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001833 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001834 ((ia_freq >> 0) & 0xff) * 100,
1835 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001836 }
1837
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001838 mutex_unlock(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001839
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001840out:
1841 intel_runtime_pm_put(dev_priv);
1842 return ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001843}
1844
Chris Wilson44834a62010-08-19 16:09:23 +01001845static int i915_opregion(struct seq_file *m, void *unused)
1846{
David Weinehall36cdd012016-08-22 13:59:31 +03001847 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1848 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001849 struct intel_opregion *opregion = &dev_priv->opregion;
1850 int ret;
1851
1852 ret = mutex_lock_interruptible(&dev->struct_mutex);
1853 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001854 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001855
Jani Nikula2455a8e2015-12-14 12:50:53 +02001856 if (opregion->header)
1857 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001858
1859 mutex_unlock(&dev->struct_mutex);
1860
Daniel Vetter0d38f002012-04-21 22:49:10 +02001861out:
Chris Wilson44834a62010-08-19 16:09:23 +01001862 return 0;
1863}
1864
Jani Nikulaada8f952015-12-15 13:17:12 +02001865static int i915_vbt(struct seq_file *m, void *unused)
1866{
David Weinehall36cdd012016-08-22 13:59:31 +03001867 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001868
1869 if (opregion->vbt)
1870 seq_write(m, opregion->vbt, opregion->vbt_size);
1871
1872 return 0;
1873}
1874
Chris Wilson37811fc2010-08-25 22:45:57 +01001875static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1876{
David Weinehall36cdd012016-08-22 13:59:31 +03001877 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1878 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301879 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001880 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001881 int ret;
1882
1883 ret = mutex_lock_interruptible(&dev->struct_mutex);
1884 if (ret)
1885 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001886
Daniel Vetter06957262015-08-10 13:34:08 +02001887#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001888 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001889 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001890
Chris Wilson25bcce92016-07-02 15:36:00 +01001891 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1892 fbdev_fb->base.width,
1893 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001894 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001895 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001896 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001897 drm_framebuffer_read_refcount(&fbdev_fb->base));
1898 describe_obj(m, fbdev_fb->obj);
1899 seq_putc(m, '\n');
1900 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001901#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001902
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001903 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001904 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301905 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1906 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001907 continue;
1908
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001909 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001910 fb->base.width,
1911 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001912 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001913 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001914 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001915 drm_framebuffer_read_refcount(&fb->base));
Chris Wilson05394f32010-11-08 19:18:58 +00001916 describe_obj(m, fb->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +01001917 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001918 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001919 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001920 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001921
1922 return 0;
1923}
1924
Chris Wilson7e37f882016-08-02 22:50:21 +01001925static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001926{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001927 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1928 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001929}
1930
Ben Widawskye76d3632011-03-19 18:14:29 -07001931static int i915_context_status(struct seq_file *m, void *unused)
1932{
David Weinehall36cdd012016-08-22 13:59:31 +03001933 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1934 struct drm_device *dev = &dev_priv->drm;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001935 struct intel_engine_cs *engine;
Chris Wilsone2efd132016-05-24 14:53:34 +01001936 struct i915_gem_context *ctx;
Akash Goel3b3f1652016-10-13 22:44:48 +05301937 enum intel_engine_id id;
Dave Gordonc3232b12016-03-23 18:19:53 +00001938 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001939
Daniel Vetterf3d28872014-05-29 23:23:08 +02001940 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001941 if (ret)
1942 return ret;
1943
Chris Wilson829a0af2017-06-20 12:05:45 +01001944 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson5d1808e2016-04-28 09:56:51 +01001945 seq_printf(m, "HW context %u ", ctx->hw_id);
Chris Wilsonc84455b2016-08-15 10:49:08 +01001946 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001947 struct task_struct *task;
1948
Chris Wilsonc84455b2016-08-15 10:49:08 +01001949 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001950 if (task) {
1951 seq_printf(m, "(%s [%d]) ",
1952 task->comm, task->pid);
1953 put_task_struct(task);
1954 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001955 } else if (IS_ERR(ctx->file_priv)) {
1956 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001957 } else {
1958 seq_puts(m, "(kernel) ");
1959 }
1960
Chris Wilsonbca44d82016-05-24 14:53:41 +01001961 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1962 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001963
Akash Goel3b3f1652016-10-13 22:44:48 +05301964 for_each_engine(engine, dev_priv, id) {
Chris Wilsonab82a062018-04-30 14:15:01 +01001965 struct intel_context *ce =
1966 to_intel_context(ctx, engine);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001967
1968 seq_printf(m, "%s: ", engine->name);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001969 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001970 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001971 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001972 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001973 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001974 }
1975
Ben Widawskya33afea2013-09-17 21:12:45 -07001976 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001977 }
1978
Daniel Vetterf3d28872014-05-29 23:23:08 +02001979 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001980
1981 return 0;
1982}
1983
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001984static const char *swizzle_string(unsigned swizzle)
1985{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001986 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001987 case I915_BIT_6_SWIZZLE_NONE:
1988 return "none";
1989 case I915_BIT_6_SWIZZLE_9:
1990 return "bit9";
1991 case I915_BIT_6_SWIZZLE_9_10:
1992 return "bit9/bit10";
1993 case I915_BIT_6_SWIZZLE_9_11:
1994 return "bit9/bit11";
1995 case I915_BIT_6_SWIZZLE_9_10_11:
1996 return "bit9/bit10/bit11";
1997 case I915_BIT_6_SWIZZLE_9_17:
1998 return "bit9/bit17";
1999 case I915_BIT_6_SWIZZLE_9_10_17:
2000 return "bit9/bit10/bit17";
2001 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09002002 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002003 }
2004
2005 return "bug";
2006}
2007
2008static int i915_swizzle_info(struct seq_file *m, void *data)
2009{
David Weinehall36cdd012016-08-22 13:59:31 +03002010 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002011
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002012 intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02002013
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002014 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2015 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2016 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2017 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2018
David Weinehall36cdd012016-08-22 13:59:31 +03002019 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002020 seq_printf(m, "DDC = 0x%08x\n",
2021 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01002022 seq_printf(m, "DDC2 = 0x%08x\n",
2023 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002024 seq_printf(m, "C0DRB3 = 0x%04x\n",
2025 I915_READ16(C0DRB3));
2026 seq_printf(m, "C1DRB3 = 0x%04x\n",
2027 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03002028 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002029 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2030 I915_READ(MAD_DIMM_C0));
2031 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2032 I915_READ(MAD_DIMM_C1));
2033 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2034 I915_READ(MAD_DIMM_C2));
2035 seq_printf(m, "TILECTL = 0x%08x\n",
2036 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03002037 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07002038 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2039 I915_READ(GAMTARBMODE));
2040 else
2041 seq_printf(m, "ARB_MODE = 0x%08x\n",
2042 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002043 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2044 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002045 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01002046
2047 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2048 seq_puts(m, "L-shaped memory detected\n");
2049
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002050 intel_runtime_pm_put(dev_priv);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002051
2052 return 0;
2053}
2054
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002055static int per_file_ctx(int id, void *ptr, void *data)
2056{
Chris Wilsone2efd132016-05-24 14:53:34 +01002057 struct i915_gem_context *ctx = ptr;
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002058 struct seq_file *m = data;
Daniel Vetterae6c4802014-08-06 15:04:53 +02002059 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2060
2061 if (!ppgtt) {
2062 seq_printf(m, " no ppgtt for context %d\n",
2063 ctx->user_handle);
2064 return 0;
2065 }
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002066
Oscar Mateof83d6512014-05-22 14:13:38 +01002067 if (i915_gem_context_is_default(ctx))
2068 seq_puts(m, " default context:\n");
2069 else
Oscar Mateo821d66d2014-07-03 16:28:00 +01002070 seq_printf(m, " context %d:\n", ctx->user_handle);
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002071 ppgtt->debug_dump(ppgtt, m);
2072
2073 return 0;
2074}
2075
David Weinehall36cdd012016-08-22 13:59:31 +03002076static void gen8_ppgtt_info(struct seq_file *m,
2077 struct drm_i915_private *dev_priv)
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002078{
Ben Widawsky77df6772013-11-02 21:07:30 -07002079 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Akash Goel3b3f1652016-10-13 22:44:48 +05302080 struct intel_engine_cs *engine;
2081 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002082 int i;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002083
Ben Widawsky77df6772013-11-02 21:07:30 -07002084 if (!ppgtt)
2085 return;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002086
Akash Goel3b3f1652016-10-13 22:44:48 +05302087 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002088 seq_printf(m, "%s\n", engine->name);
Ben Widawsky77df6772013-11-02 21:07:30 -07002089 for (i = 0; i < 4; i++) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002090 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
Ben Widawsky77df6772013-11-02 21:07:30 -07002091 pdp <<= 32;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002092 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
Ville Syrjäläa2a5b152014-03-31 18:17:16 +03002093 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
Ben Widawsky77df6772013-11-02 21:07:30 -07002094 }
2095 }
2096}
2097
David Weinehall36cdd012016-08-22 13:59:31 +03002098static void gen6_ppgtt_info(struct seq_file *m,
2099 struct drm_i915_private *dev_priv)
Ben Widawsky77df6772013-11-02 21:07:30 -07002100{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002101 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302102 enum intel_engine_id id;
Ben Widawsky77df6772013-11-02 21:07:30 -07002103
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002104 if (IS_GEN6(dev_priv))
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002105 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2106
Akash Goel3b3f1652016-10-13 22:44:48 +05302107 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002108 seq_printf(m, "%s\n", engine->name);
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002109 if (IS_GEN7(dev_priv))
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002110 seq_printf(m, "GFX_MODE: 0x%08x\n",
2111 I915_READ(RING_MODE_GEN7(engine)));
2112 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2113 I915_READ(RING_PP_DIR_BASE(engine)));
2114 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2115 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2116 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2117 I915_READ(RING_PP_DIR_DCLV(engine)));
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002118 }
2119 if (dev_priv->mm.aliasing_ppgtt) {
2120 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2121
Damien Lespiau267f0c92013-06-24 22:59:48 +01002122 seq_puts(m, "aliasing PPGTT:\n");
Mika Kuoppala44159dd2015-06-25 18:35:07 +03002123 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002124
Ben Widawsky87d60b62013-12-06 14:11:29 -08002125 ppgtt->debug_dump(ppgtt, m);
Daniel Vetterae6c4802014-08-06 15:04:53 +02002126 }
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002127
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002128 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
Ben Widawsky77df6772013-11-02 21:07:30 -07002129}
2130
2131static int i915_ppgtt_info(struct seq_file *m, void *data)
2132{
David Weinehall36cdd012016-08-22 13:59:31 +03002133 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2134 struct drm_device *dev = &dev_priv->drm;
Michel Thierryea91e402015-07-29 17:23:57 +01002135 struct drm_file *file;
Chris Wilson637ee292016-08-22 14:28:20 +01002136 int ret;
Ben Widawsky77df6772013-11-02 21:07:30 -07002137
Chris Wilson637ee292016-08-22 14:28:20 +01002138 mutex_lock(&dev->filelist_mutex);
2139 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawsky77df6772013-11-02 21:07:30 -07002140 if (ret)
Chris Wilson637ee292016-08-22 14:28:20 +01002141 goto out_unlock;
2142
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002143 intel_runtime_pm_get(dev_priv);
Ben Widawsky77df6772013-11-02 21:07:30 -07002144
David Weinehall36cdd012016-08-22 13:59:31 +03002145 if (INTEL_GEN(dev_priv) >= 8)
2146 gen8_ppgtt_info(m, dev_priv);
2147 else if (INTEL_GEN(dev_priv) >= 6)
2148 gen6_ppgtt_info(m, dev_priv);
Ben Widawsky77df6772013-11-02 21:07:30 -07002149
Michel Thierryea91e402015-07-29 17:23:57 +01002150 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2151 struct drm_i915_file_private *file_priv = file->driver_priv;
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002152 struct task_struct *task;
Michel Thierryea91e402015-07-29 17:23:57 +01002153
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002154 task = get_pid_task(file->pid, PIDTYPE_PID);
Dan Carpenter06812762015-10-02 18:14:22 +03002155 if (!task) {
2156 ret = -ESRCH;
Chris Wilson637ee292016-08-22 14:28:20 +01002157 goto out_rpm;
Dan Carpenter06812762015-10-02 18:14:22 +03002158 }
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002159 seq_printf(m, "\nproc: %s\n", task->comm);
2160 put_task_struct(task);
Michel Thierryea91e402015-07-29 17:23:57 +01002161 idr_for_each(&file_priv->context_idr, per_file_ctx,
2162 (void *)(unsigned long)m);
2163 }
2164
Chris Wilson637ee292016-08-22 14:28:20 +01002165out_rpm:
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002166 intel_runtime_pm_put(dev_priv);
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002167 mutex_unlock(&dev->struct_mutex);
Chris Wilson637ee292016-08-22 14:28:20 +01002168out_unlock:
2169 mutex_unlock(&dev->filelist_mutex);
Dan Carpenter06812762015-10-02 18:14:22 +03002170 return ret;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002171}
2172
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002173static int count_irq_waiters(struct drm_i915_private *i915)
2174{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002175 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302176 enum intel_engine_id id;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002177 int count = 0;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002178
Akash Goel3b3f1652016-10-13 22:44:48 +05302179 for_each_engine(engine, i915, id)
Chris Wilson688e6c72016-07-01 17:23:15 +01002180 count += intel_engine_has_waiter(engine);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002181
2182 return count;
2183}
2184
Chris Wilson7466c292016-08-15 09:49:33 +01002185static const char *rps_power_to_str(unsigned int power)
2186{
2187 static const char * const strings[] = {
2188 [LOW_POWER] = "low power",
2189 [BETWEEN] = "mixed",
2190 [HIGH_POWER] = "high power",
2191 };
2192
2193 if (power >= ARRAY_SIZE(strings) || !strings[power])
2194 return "unknown";
2195
2196 return strings[power];
2197}
2198
Chris Wilson1854d5c2015-04-07 16:20:32 +01002199static int i915_rps_boost_info(struct seq_file *m, void *data)
2200{
David Weinehall36cdd012016-08-22 13:59:31 +03002201 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2202 struct drm_device *dev = &dev_priv->drm;
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002203 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002204 struct drm_file *file;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002205
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002206 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson28176ef2016-10-28 13:58:56 +01002207 seq_printf(m, "GPU busy? %s [%d requests]\n",
2208 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002209 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002210 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002211 atomic_read(&rps->num_waiters));
Chris Wilson7466c292016-08-15 09:49:33 +01002212 seq_printf(m, "Frequency requested %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002213 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002214 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002215 intel_gpu_freq(dev_priv, rps->min_freq),
2216 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2217 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2218 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002219 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002220 intel_gpu_freq(dev_priv, rps->idle_freq),
2221 intel_gpu_freq(dev_priv, rps->efficient_freq),
2222 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002223
2224 mutex_lock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002225 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2226 struct drm_i915_file_private *file_priv = file->driver_priv;
2227 struct task_struct *task;
2228
2229 rcu_read_lock();
2230 task = pid_task(file->pid, PIDTYPE_PID);
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002231 seq_printf(m, "%s [%d]: %d boosts\n",
Chris Wilson1854d5c2015-04-07 16:20:32 +01002232 task ? task->comm : "<unknown>",
2233 task ? task->pid : -1,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002234 atomic_read(&file_priv->rps_client.boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01002235 rcu_read_unlock();
2236 }
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002237 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002238 atomic_read(&rps->boosts));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002239 mutex_unlock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002240
Chris Wilson7466c292016-08-15 09:49:33 +01002241 if (INTEL_GEN(dev_priv) >= 6 &&
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002242 rps->enabled &&
Chris Wilson28176ef2016-10-28 13:58:56 +01002243 dev_priv->gt.active_requests) {
Chris Wilson7466c292016-08-15 09:49:33 +01002244 u32 rpup, rpupei;
2245 u32 rpdown, rpdownei;
2246
2247 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2248 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2249 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2250 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2251 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2252 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2253
2254 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002255 rps_power_to_str(rps->power));
Chris Wilson7466c292016-08-15 09:49:33 +01002256 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002257 rpup && rpupei ? 100 * rpup / rpupei : 0,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002258 rps->up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002259 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002260 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002261 rps->down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002262 } else {
2263 seq_puts(m, "\nRPS Autotuning inactive\n");
2264 }
2265
Chris Wilson8d3afd72015-05-21 21:01:47 +01002266 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002267}
2268
Ben Widawsky63573eb2013-07-04 11:02:07 -07002269static int i915_llc(struct seq_file *m, void *data)
2270{
David Weinehall36cdd012016-08-22 13:59:31 +03002271 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002272 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002273
David Weinehall36cdd012016-08-22 13:59:31 +03002274 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002275 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2276 intel_uncore_edram_size(dev_priv)/1024/1024);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002277
2278 return 0;
2279}
2280
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002281static int i915_huc_load_status_info(struct seq_file *m, void *data)
2282{
2283 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002284 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002285
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002286 if (!HAS_HUC(dev_priv))
2287 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002288
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002289 p = drm_seq_file_printer(m);
2290 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002291
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302292 intel_runtime_pm_get(dev_priv);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002293 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302294 intel_runtime_pm_put(dev_priv);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002295
2296 return 0;
2297}
2298
Alex Daifdf5d352015-08-12 15:43:37 +01002299static int i915_guc_load_status_info(struct seq_file *m, void *data)
2300{
David Weinehall36cdd012016-08-22 13:59:31 +03002301 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002302 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01002303 u32 tmp, i;
2304
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002305 if (!HAS_GUC(dev_priv))
2306 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01002307
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002308 p = drm_seq_file_printer(m);
2309 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01002310
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302311 intel_runtime_pm_get(dev_priv);
2312
Alex Daifdf5d352015-08-12 15:43:37 +01002313 tmp = I915_READ(GUC_STATUS);
2314
2315 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2316 seq_printf(m, "\tBootrom status = 0x%x\n",
2317 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2318 seq_printf(m, "\tuKernel status = 0x%x\n",
2319 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2320 seq_printf(m, "\tMIA Core status = 0x%x\n",
2321 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2322 seq_puts(m, "\nScratch registers:\n");
2323 for (i = 0; i < 16; i++)
2324 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2325
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302326 intel_runtime_pm_put(dev_priv);
2327
Alex Daifdf5d352015-08-12 15:43:37 +01002328 return 0;
2329}
2330
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002331static const char *
2332stringify_guc_log_type(enum guc_log_buffer_type type)
2333{
2334 switch (type) {
2335 case GUC_ISR_LOG_BUFFER:
2336 return "ISR";
2337 case GUC_DPC_LOG_BUFFER:
2338 return "DPC";
2339 case GUC_CRASH_DUMP_LOG_BUFFER:
2340 return "CRASH";
2341 default:
2342 MISSING_CASE(type);
2343 }
2344
2345 return "";
2346}
2347
Akash Goel5aa1ee42016-10-12 21:54:36 +05302348static void i915_guc_log_info(struct seq_file *m,
2349 struct drm_i915_private *dev_priv)
2350{
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002351 struct intel_guc_log *log = &dev_priv->guc.log;
2352 enum guc_log_buffer_type type;
2353
2354 if (!intel_guc_log_relay_enabled(log)) {
2355 seq_puts(m, "GuC log relay disabled\n");
2356 return;
2357 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302358
Michał Winiarskidb557992018-03-19 10:53:43 +01002359 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05302360
Michał Winiarski6a96be22018-03-19 10:53:42 +01002361 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002362 log->relay.full_count);
2363
2364 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2365 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2366 stringify_guc_log_type(type),
2367 log->stats[type].flush,
2368 log->stats[type].sampled_overflow);
2369 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302370}
2371
Dave Gordon8b417c22015-08-12 15:43:44 +01002372static void i915_guc_client_info(struct seq_file *m,
2373 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302374 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01002375{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002376 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002377 enum intel_engine_id id;
Dave Gordon8b417c22015-08-12 15:43:44 +01002378 uint64_t tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002379
Oscar Mateob09935a2017-03-22 10:39:53 -07002380 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2381 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02002382 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2383 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01002384
Akash Goel3b3f1652016-10-13 22:44:48 +05302385 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002386 u64 submissions = client->submissions[id];
2387 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002388 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002389 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002390 }
2391 seq_printf(m, "\tTotal: %llu\n", tot);
2392}
2393
2394static int i915_guc_info(struct seq_file *m, void *data)
2395{
David Weinehall36cdd012016-08-22 13:59:31 +03002396 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002397 const struct intel_guc *guc = &dev_priv->guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01002398
Michał Winiarskidb557992018-03-19 10:53:43 +01002399 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002400 return -ENODEV;
2401
Michał Winiarskidb557992018-03-19 10:53:43 +01002402 i915_guc_log_info(m, dev_priv);
2403
2404 if (!USES_GUC_SUBMISSION(dev_priv))
2405 return 0;
2406
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002407 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002408
Michał Winiarskidb557992018-03-19 10:53:43 +01002409 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07002410 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01002411 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002412
Chris Wilson334636c2016-11-29 12:10:20 +00002413 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2414 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Chris Wilsone78c9172018-02-07 21:05:42 +00002415 if (guc->preempt_client) {
2416 seq_printf(m, "\nGuC preempt client @ %p:\n",
2417 guc->preempt_client);
2418 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2419 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002420
2421 /* Add more as required ... */
2422
2423 return 0;
2424}
2425
Oscar Mateoa8b93702017-05-10 15:04:51 +00002426static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002427{
David Weinehall36cdd012016-08-22 13:59:31 +03002428 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Oscar Mateoa8b93702017-05-10 15:04:51 +00002429 const struct intel_guc *guc = &dev_priv->guc;
2430 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302431 struct intel_guc_client *client = guc->execbuf_client;
Oscar Mateoa8b93702017-05-10 15:04:51 +00002432 unsigned int tmp;
2433 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002434
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002435 if (!USES_GUC_SUBMISSION(dev_priv))
2436 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002437
Oscar Mateoa8b93702017-05-10 15:04:51 +00002438 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2439 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002440
Oscar Mateoa8b93702017-05-10 15:04:51 +00002441 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2442 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002443
Oscar Mateoa8b93702017-05-10 15:04:51 +00002444 seq_printf(m, "GuC stage descriptor %u:\n", index);
2445 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2446 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2447 seq_printf(m, "\tPriority: %d\n", desc->priority);
2448 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2449 seq_printf(m, "\tEngines used: 0x%x\n",
2450 desc->engines_used);
2451 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2452 desc->db_trigger_phy,
2453 desc->db_trigger_cpu,
2454 desc->db_trigger_uk);
2455 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2456 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01002457 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00002458 desc->wq_addr, desc->wq_size);
2459 seq_putc(m, '\n');
2460
2461 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2462 u32 guc_engine_id = engine->guc_id;
2463 struct guc_execlist_context *lrc =
2464 &desc->lrc[guc_engine_id];
2465
2466 seq_printf(m, "\t%s LRC:\n", engine->name);
2467 seq_printf(m, "\t\tContext desc: 0x%x\n",
2468 lrc->context_desc);
2469 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2470 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2471 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2472 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2473 seq_putc(m, '\n');
2474 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01002475 }
2476
Oscar Mateoa8b93702017-05-10 15:04:51 +00002477 return 0;
2478}
2479
Alex Dai4c7e77f2015-08-12 15:43:40 +01002480static int i915_guc_log_dump(struct seq_file *m, void *data)
2481{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002482 struct drm_info_node *node = m->private;
2483 struct drm_i915_private *dev_priv = node_to_i915(node);
2484 bool dump_load_err = !!node->info_ent->data;
2485 struct drm_i915_gem_object *obj = NULL;
2486 u32 *log;
2487 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002488
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002489 if (!HAS_GUC(dev_priv))
2490 return -ENODEV;
2491
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002492 if (dump_load_err)
2493 obj = dev_priv->guc.load_err_log;
2494 else if (dev_priv->guc.log.vma)
2495 obj = dev_priv->guc.log.vma->obj;
2496
2497 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002498 return 0;
2499
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002500 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2501 if (IS_ERR(log)) {
2502 DRM_DEBUG("Failed to pin object\n");
2503 seq_puts(m, "(log data unaccessible)\n");
2504 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002505 }
2506
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002507 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2508 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2509 *(log + i), *(log + i + 1),
2510 *(log + i + 2), *(log + i + 3));
2511
Alex Dai4c7e77f2015-08-12 15:43:40 +01002512 seq_putc(m, '\n');
2513
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002514 i915_gem_object_unpin_map(obj);
2515
Alex Dai4c7e77f2015-08-12 15:43:40 +01002516 return 0;
2517}
2518
Michał Winiarski4977a282018-03-19 10:53:40 +01002519static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302520{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002521 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302522
Michał Winiarski86aa8242018-03-08 16:46:53 +01002523 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002524 return -ENODEV;
2525
Michał Winiarski4977a282018-03-19 10:53:40 +01002526 *val = intel_guc_log_level_get(&dev_priv->guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302527
2528 return 0;
2529}
2530
Michał Winiarski4977a282018-03-19 10:53:40 +01002531static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302532{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002533 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302534
Michał Winiarski86aa8242018-03-08 16:46:53 +01002535 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002536 return -ENODEV;
2537
Michał Winiarski4977a282018-03-19 10:53:40 +01002538 return intel_guc_log_level_set(&dev_priv->guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302539}
2540
Michał Winiarski4977a282018-03-19 10:53:40 +01002541DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2542 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302543 "%lld\n");
2544
Michał Winiarski4977a282018-03-19 10:53:40 +01002545static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2546{
2547 struct drm_i915_private *dev_priv = inode->i_private;
2548
2549 if (!USES_GUC(dev_priv))
2550 return -ENODEV;
2551
2552 file->private_data = &dev_priv->guc.log;
2553
2554 return intel_guc_log_relay_open(&dev_priv->guc.log);
2555}
2556
2557static ssize_t
2558i915_guc_log_relay_write(struct file *filp,
2559 const char __user *ubuf,
2560 size_t cnt,
2561 loff_t *ppos)
2562{
2563 struct intel_guc_log *log = filp->private_data;
2564
2565 intel_guc_log_relay_flush(log);
2566
2567 return cnt;
2568}
2569
2570static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2571{
2572 struct drm_i915_private *dev_priv = inode->i_private;
2573
2574 intel_guc_log_relay_close(&dev_priv->guc.log);
2575
2576 return 0;
2577}
2578
2579static const struct file_operations i915_guc_log_relay_fops = {
2580 .owner = THIS_MODULE,
2581 .open = i915_guc_log_relay_open,
2582 .write = i915_guc_log_relay_write,
2583 .release = i915_guc_log_relay_release,
2584};
2585
Chris Wilsonb86bef202017-01-16 13:06:21 +00002586static const char *psr2_live_status(u32 val)
2587{
2588 static const char * const live_status[] = {
2589 "IDLE",
2590 "CAPTURE",
2591 "CAPTURE_FS",
2592 "SLEEP",
2593 "BUFON_FW",
2594 "ML_UP",
2595 "SU_STANDBY",
2596 "FAST_SLEEP",
2597 "DEEP_SLEEP",
2598 "BUF_ON",
2599 "TG_ON"
2600 };
2601
2602 val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
2603 if (val < ARRAY_SIZE(live_status))
2604 return live_status[val];
2605
2606 return "unknown";
2607}
2608
José Roberto de Souzad0bc8622018-04-25 14:23:33 -07002609static const char *psr_sink_status(u8 val)
2610{
2611 static const char * const sink_status[] = {
2612 "inactive",
2613 "transition to active, capture and display",
2614 "active, display from RFB",
2615 "active, capture and display on sink device timings",
2616 "transition to inactive, capture and display, timing re-sync",
2617 "reserved",
2618 "reserved",
2619 "sink internal error"
2620 };
2621
2622 val &= DP_PSR_SINK_STATE_MASK;
2623 if (val < ARRAY_SIZE(sink_status))
2624 return sink_status[val];
2625
2626 return "unknown";
2627}
2628
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002629static int i915_edp_psr_status(struct seq_file *m, void *data)
2630{
David Weinehall36cdd012016-08-22 13:59:31 +03002631 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Rodrigo Vivia031d702013-10-03 16:15:06 -03002632 u32 psrperf = 0;
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002633 u32 stat[3];
2634 enum pipe pipe;
Rodrigo Vivia031d702013-10-03 16:15:06 -03002635 bool enabled = false;
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002636 bool sink_support;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002637
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002638 if (!HAS_PSR(dev_priv))
2639 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002640
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002641 sink_support = dev_priv->psr.sink_support;
2642 seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2643 if (!sink_support)
2644 return 0;
2645
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002646 intel_runtime_pm_get(dev_priv);
2647
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002648 mutex_lock(&dev_priv->psr.lock);
Daniel Vetter2807cf62014-07-11 10:30:11 -07002649 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002650 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2651 dev_priv->psr.busy_frontbuffer_bits);
2652 seq_printf(m, "Re-enable work scheduled: %s\n",
2653 yesno(work_busy(&dev_priv->psr.work.work)));
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002654
Nagaraju, Vathsala7e3eb592016-12-09 23:42:09 +05302655 if (HAS_DDI(dev_priv)) {
José Roberto de Souza95f28d22018-03-28 15:30:42 -07002656 if (dev_priv->psr.psr2_enabled)
Nagaraju, Vathsala7e3eb592016-12-09 23:42:09 +05302657 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2658 else
2659 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2660 } else {
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002661 for_each_pipe(dev_priv, pipe) {
Chris Wilson9c870d02016-10-24 13:42:15 +01002662 enum transcoder cpu_transcoder =
2663 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
2664 enum intel_display_power_domain power_domain;
2665
2666 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
2667 if (!intel_display_power_get_if_enabled(dev_priv,
2668 power_domain))
2669 continue;
2670
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002671 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2672 VLV_EDP_PSR_CURR_STATE_MASK;
2673 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2674 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2675 enabled = true;
Chris Wilson9c870d02016-10-24 13:42:15 +01002676
2677 intel_display_power_put(dev_priv, power_domain);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002678 }
2679 }
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002680
2681 seq_printf(m, "Main link in standby mode: %s\n",
2682 yesno(dev_priv->psr.link_standby));
2683
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002684 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002685
David Weinehall36cdd012016-08-22 13:59:31 +03002686 if (!HAS_DDI(dev_priv))
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002687 for_each_pipe(dev_priv, pipe) {
2688 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2689 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2690 seq_printf(m, " pipe %c", pipe_name(pipe));
2691 }
2692 seq_puts(m, "\n");
2693
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002694 /*
2695 * VLV/CHV PSR has no kind of performance counter
2696 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2697 */
David Weinehall36cdd012016-08-22 13:59:31 +03002698 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Ville Syrjälä443a3892015-11-11 20:34:15 +02002699 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
Rodrigo Vivia031d702013-10-03 16:15:06 -03002700 EDP_PSR_PERF_CNT_MASK;
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002701
2702 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2703 }
José Roberto de Souza95f28d22018-03-28 15:30:42 -07002704 if (dev_priv->psr.psr2_enabled) {
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -08002705 u32 psr2 = I915_READ(EDP_PSR2_STATUS);
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302706
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -08002707 seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
Chris Wilsonb86bef202017-01-16 13:06:21 +00002708 psr2, psr2_live_status(psr2));
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302709 }
José Roberto de Souzad0bc8622018-04-25 14:23:33 -07002710
2711 if (dev_priv->psr.enabled) {
2712 struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
2713 u8 val;
2714
2715 if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
2716 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
2717 psr_sink_status(val));
2718 }
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002719 mutex_unlock(&dev_priv->psr.lock);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002720
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002721 if (READ_ONCE(dev_priv->psr.debug)) {
2722 seq_printf(m, "Last attempted entry at: %lld\n",
2723 dev_priv->psr.last_entry_attempt);
2724 seq_printf(m, "Last exit at: %lld\n",
2725 dev_priv->psr.last_exit);
2726 }
2727
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002728 intel_runtime_pm_put(dev_priv);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002729 return 0;
2730}
2731
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002732static int
2733i915_edp_psr_debug_set(void *data, u64 val)
2734{
2735 struct drm_i915_private *dev_priv = data;
2736
2737 if (!CAN_PSR(dev_priv))
2738 return -ENODEV;
2739
2740 DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
2741
2742 intel_runtime_pm_get(dev_priv);
2743 intel_psr_irq_control(dev_priv, !!val);
2744 intel_runtime_pm_put(dev_priv);
2745
2746 return 0;
2747}
2748
2749static int
2750i915_edp_psr_debug_get(void *data, u64 *val)
2751{
2752 struct drm_i915_private *dev_priv = data;
2753
2754 if (!CAN_PSR(dev_priv))
2755 return -ENODEV;
2756
2757 *val = READ_ONCE(dev_priv->psr.debug);
2758 return 0;
2759}
2760
2761DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2762 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2763 "%llu\n");
2764
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002765static int i915_sink_crc(struct seq_file *m, void *data)
2766{
David Weinehall36cdd012016-08-22 13:59:31 +03002767 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2768 struct drm_device *dev = &dev_priv->drm;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002769 struct intel_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002770 struct drm_connector_list_iter conn_iter;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002771 struct intel_dp *intel_dp = NULL;
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002772 struct drm_modeset_acquire_ctx ctx;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002773 int ret;
2774 u8 crc[6];
2775
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002776 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2777
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002778 drm_connector_list_iter_begin(dev, &conn_iter);
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002779
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002780 for_each_intel_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26c17cf2016-06-20 15:57:38 +02002781 struct drm_crtc *crtc;
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002782 struct drm_connector_state *state;
Maarten Lankhorst93313532017-11-10 12:34:59 +01002783 struct intel_crtc_state *crtc_state;
Paulo Zanonib6ae3c72014-02-13 17:51:33 -02002784
Maarten Lankhorst26c17cf2016-06-20 15:57:38 +02002785 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002786 continue;
2787
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002788retry:
2789 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
2790 if (ret)
2791 goto err;
2792
2793 state = connector->base.state;
2794 if (!state->best_encoder)
2795 continue;
2796
2797 crtc = state->crtc;
2798 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2799 if (ret)
2800 goto err;
2801
Maarten Lankhorst93313532017-11-10 12:34:59 +01002802 crtc_state = to_intel_crtc_state(crtc->state);
2803 if (!crtc_state->base.active)
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002804 continue;
2805
Maarten Lankhorst93313532017-11-10 12:34:59 +01002806 /*
2807 * We need to wait for all crtc updates to complete, to make
2808 * sure any pending modesets and plane updates are completed.
2809 */
2810 if (crtc_state->base.commit) {
2811 ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
2812
2813 if (ret)
2814 goto err;
2815 }
2816
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002817 intel_dp = enc_to_intel_dp(state->best_encoder);
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002818
Maarten Lankhorst93313532017-11-10 12:34:59 +01002819 ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002820 if (ret)
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002821 goto err;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002822
2823 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2824 crc[0], crc[1], crc[2],
2825 crc[3], crc[4], crc[5]);
2826 goto out;
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002827
2828err:
2829 if (ret == -EDEADLK) {
2830 ret = drm_modeset_backoff(&ctx);
2831 if (!ret)
2832 goto retry;
2833 }
2834 goto out;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002835 }
2836 ret = -ENODEV;
2837out:
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002838 drm_connector_list_iter_end(&conn_iter);
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002839 drm_modeset_drop_locks(&ctx);
2840 drm_modeset_acquire_fini(&ctx);
2841
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002842 return ret;
2843}
2844
Jesse Barnesec013e72013-08-20 10:29:23 +01002845static int i915_energy_uJ(struct seq_file *m, void *data)
2846{
David Weinehall36cdd012016-08-22 13:59:31 +03002847 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002848 unsigned long long power;
Jesse Barnesec013e72013-08-20 10:29:23 +01002849 u32 units;
2850
David Weinehall36cdd012016-08-22 13:59:31 +03002851 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002852 return -ENODEV;
2853
Paulo Zanoni36623ef2014-02-21 13:52:23 -03002854 intel_runtime_pm_get(dev_priv);
2855
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002856 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2857 intel_runtime_pm_put(dev_priv);
2858 return -ENODEV;
2859 }
2860
2861 units = (power & 0x1f00) >> 8;
Jesse Barnesec013e72013-08-20 10:29:23 +01002862 power = I915_READ(MCH_SECP_NRG_STTS);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002863 power = (1000000 * power) >> units; /* convert to uJ */
Jesse Barnesec013e72013-08-20 10:29:23 +01002864
Paulo Zanoni36623ef2014-02-21 13:52:23 -03002865 intel_runtime_pm_put(dev_priv);
2866
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002867 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002868
2869 return 0;
2870}
2871
Damien Lespiau6455c872015-06-04 18:23:57 +01002872static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002873{
David Weinehall36cdd012016-08-22 13:59:31 +03002874 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002875 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002876
Chris Wilsona156e642016-04-03 14:14:21 +01002877 if (!HAS_RUNTIME_PM(dev_priv))
2878 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002879
Chris Wilson6f561032018-01-24 11:36:07 +00002880 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2881 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
Paulo Zanoni371db662013-08-19 13:18:10 -03002882 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002883 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002884#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002885 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002886 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002887#else
2888 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2889#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002890 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002891 pci_power_name(pdev->current_state),
2892 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002893
Jesse Barnesec013e72013-08-20 10:29:23 +01002894 return 0;
2895}
2896
Imre Deak1da51582013-11-25 17:15:35 +02002897static int i915_power_domain_info(struct seq_file *m, void *unused)
2898{
David Weinehall36cdd012016-08-22 13:59:31 +03002899 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002900 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2901 int i;
2902
2903 mutex_lock(&power_domains->lock);
2904
2905 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2906 for (i = 0; i < power_domains->power_well_count; i++) {
2907 struct i915_power_well *power_well;
2908 enum intel_display_power_domain power_domain;
2909
2910 power_well = &power_domains->power_wells[i];
2911 seq_printf(m, "%-25s %d\n", power_well->name,
2912 power_well->count);
2913
Joonas Lahtinen8385c2e2017-02-08 15:12:10 +02002914 for_each_power_domain(power_domain, power_well->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002915 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002916 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002917 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002918 }
2919
2920 mutex_unlock(&power_domains->lock);
2921
2922 return 0;
2923}
2924
Damien Lespiaub7cec662015-10-27 14:47:01 +02002925static int i915_dmc_info(struct seq_file *m, void *unused)
2926{
David Weinehall36cdd012016-08-22 13:59:31 +03002927 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Damien Lespiaub7cec662015-10-27 14:47:01 +02002928 struct intel_csr *csr;
2929
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002930 if (!HAS_CSR(dev_priv))
2931 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002932
2933 csr = &dev_priv->csr;
2934
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002935 intel_runtime_pm_get(dev_priv);
2936
Damien Lespiaub7cec662015-10-27 14:47:01 +02002937 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2938 seq_printf(m, "path: %s\n", csr->fw_path);
2939
2940 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002941 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002942
2943 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2944 CSR_VERSION_MINOR(csr->version));
2945
Mika Kuoppala48de5682017-05-09 13:05:22 +03002946 if (IS_KABYLAKE(dev_priv) ||
2947 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
Damien Lespiau83372062015-10-30 17:53:32 +02002948 seq_printf(m, "DC3 -> DC5 count: %d\n",
2949 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2950 seq_printf(m, "DC5 -> DC6 count: %d\n",
2951 I915_READ(SKL_CSR_DC5_DC6_COUNT));
David Weinehall36cdd012016-08-22 13:59:31 +03002952 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
Mika Kuoppala16e11b92015-10-27 14:47:03 +02002953 seq_printf(m, "DC3 -> DC5 count: %d\n",
2954 I915_READ(BXT_CSR_DC3_DC5_COUNT));
Damien Lespiau83372062015-10-30 17:53:32 +02002955 }
2956
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002957out:
2958 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2959 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2960 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2961
Damien Lespiau83372062015-10-30 17:53:32 +02002962 intel_runtime_pm_put(dev_priv);
2963
Damien Lespiaub7cec662015-10-27 14:47:01 +02002964 return 0;
2965}
2966
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002967static void intel_seq_print_mode(struct seq_file *m, int tabs,
2968 struct drm_display_mode *mode)
2969{
2970 int i;
2971
2972 for (i = 0; i < tabs; i++)
2973 seq_putc(m, '\t');
2974
2975 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2976 mode->base.id, mode->name,
2977 mode->vrefresh, mode->clock,
2978 mode->hdisplay, mode->hsync_start,
2979 mode->hsync_end, mode->htotal,
2980 mode->vdisplay, mode->vsync_start,
2981 mode->vsync_end, mode->vtotal,
2982 mode->type, mode->flags);
2983}
2984
2985static void intel_encoder_info(struct seq_file *m,
2986 struct intel_crtc *intel_crtc,
2987 struct intel_encoder *intel_encoder)
2988{
David Weinehall36cdd012016-08-22 13:59:31 +03002989 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2990 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002991 struct drm_crtc *crtc = &intel_crtc->base;
2992 struct intel_connector *intel_connector;
2993 struct drm_encoder *encoder;
2994
2995 encoder = &intel_encoder->base;
2996 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002997 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002998 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2999 struct drm_connector *connector = &intel_connector->base;
3000 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
3001 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03003002 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003003 drm_get_connector_status_name(connector->status));
3004 if (connector->status == connector_status_connected) {
3005 struct drm_display_mode *mode = &crtc->mode;
3006 seq_printf(m, ", mode:\n");
3007 intel_seq_print_mode(m, 2, mode);
3008 } else {
3009 seq_putc(m, '\n');
3010 }
3011 }
3012}
3013
3014static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3015{
David Weinehall36cdd012016-08-22 13:59:31 +03003016 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3017 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003018 struct drm_crtc *crtc = &intel_crtc->base;
3019 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02003020 struct drm_plane_state *plane_state = crtc->primary->state;
3021 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003022
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02003023 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07003024 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02003025 fb->base.id, plane_state->src_x >> 16,
3026 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07003027 else
3028 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003029 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3030 intel_encoder_info(m, intel_crtc, intel_encoder);
3031}
3032
3033static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3034{
3035 struct drm_display_mode *mode = panel->fixed_mode;
3036
3037 seq_printf(m, "\tfixed mode:\n");
3038 intel_seq_print_mode(m, 2, mode);
3039}
3040
3041static void intel_dp_info(struct seq_file *m,
3042 struct intel_connector *intel_connector)
3043{
3044 struct intel_encoder *intel_encoder = intel_connector->encoder;
3045 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3046
3047 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03003048 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003049 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003050 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03003051
3052 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3053 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003054}
3055
Libin Yang9a148a92016-11-28 20:07:05 +08003056static void intel_dp_mst_info(struct seq_file *m,
3057 struct intel_connector *intel_connector)
3058{
3059 struct intel_encoder *intel_encoder = intel_connector->encoder;
3060 struct intel_dp_mst_encoder *intel_mst =
3061 enc_to_mst(&intel_encoder->base);
3062 struct intel_digital_port *intel_dig_port = intel_mst->primary;
3063 struct intel_dp *intel_dp = &intel_dig_port->dp;
3064 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3065 intel_connector->port);
3066
3067 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3068}
3069
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003070static void intel_hdmi_info(struct seq_file *m,
3071 struct intel_connector *intel_connector)
3072{
3073 struct intel_encoder *intel_encoder = intel_connector->encoder;
3074 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3075
Jani Nikula742f4912015-09-03 11:16:09 +03003076 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003077}
3078
3079static void intel_lvds_info(struct seq_file *m,
3080 struct intel_connector *intel_connector)
3081{
3082 intel_panel_info(m, &intel_connector->panel);
3083}
3084
3085static void intel_connector_info(struct seq_file *m,
3086 struct drm_connector *connector)
3087{
3088 struct intel_connector *intel_connector = to_intel_connector(connector);
3089 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08003090 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003091
3092 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03003093 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003094 drm_get_connector_status_name(connector->status));
3095 if (connector->status == connector_status_connected) {
3096 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3097 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3098 connector->display_info.width_mm,
3099 connector->display_info.height_mm);
3100 seq_printf(m, "\tsubpixel order: %s\n",
3101 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3102 seq_printf(m, "\tCEA rev: %d\n",
3103 connector->display_info.cea_rev);
3104 }
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003105
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02003106 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003107 return;
3108
3109 switch (connector->connector_type) {
3110 case DRM_MODE_CONNECTOR_DisplayPort:
3111 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08003112 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3113 intel_dp_mst_info(m, intel_connector);
3114 else
3115 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003116 break;
3117 case DRM_MODE_CONNECTOR_LVDS:
3118 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10003119 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003120 break;
3121 case DRM_MODE_CONNECTOR_HDMIA:
3122 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03003123 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003124 intel_hdmi_info(m, intel_connector);
3125 break;
3126 default:
3127 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10003128 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003129
Jesse Barnesf103fc72014-02-20 12:39:57 -08003130 seq_printf(m, "\tmodes:\n");
3131 list_for_each_entry(mode, &connector->modes, head)
3132 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003133}
3134
Robert Fekete3abc4e02015-10-27 16:58:32 +01003135static const char *plane_type(enum drm_plane_type type)
3136{
3137 switch (type) {
3138 case DRM_PLANE_TYPE_OVERLAY:
3139 return "OVL";
3140 case DRM_PLANE_TYPE_PRIMARY:
3141 return "PRI";
3142 case DRM_PLANE_TYPE_CURSOR:
3143 return "CUR";
3144 /*
3145 * Deliberately omitting default: to generate compiler warnings
3146 * when a new drm_plane_type gets added.
3147 */
3148 }
3149
3150 return "unknown";
3151}
3152
3153static const char *plane_rotation(unsigned int rotation)
3154{
3155 static char buf[48];
3156 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04003157 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01003158 * will print them all to visualize if the values are misused
3159 */
3160 snprintf(buf, sizeof(buf),
3161 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04003162 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3163 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3164 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3165 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3166 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3167 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01003168 rotation);
3169
3170 return buf;
3171}
3172
3173static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3174{
David Weinehall36cdd012016-08-22 13:59:31 +03003175 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3176 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01003177 struct intel_plane *intel_plane;
3178
3179 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3180 struct drm_plane_state *state;
3181 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003182 struct drm_format_name_buf format_name;
Robert Fekete3abc4e02015-10-27 16:58:32 +01003183
3184 if (!plane->state) {
3185 seq_puts(m, "plane->state is NULL!\n");
3186 continue;
3187 }
3188
3189 state = plane->state;
3190
Eric Engestrom90844f02016-08-15 01:02:38 +01003191 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003192 drm_get_format_name(state->fb->format->format,
3193 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01003194 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003195 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01003196 }
3197
Robert Fekete3abc4e02015-10-27 16:58:32 +01003198 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3199 plane->base.id,
3200 plane_type(intel_plane->base.type),
3201 state->crtc_x, state->crtc_y,
3202 state->crtc_w, state->crtc_h,
3203 (state->src_x >> 16),
3204 ((state->src_x & 0xffff) * 15625) >> 10,
3205 (state->src_y >> 16),
3206 ((state->src_y & 0xffff) * 15625) >> 10,
3207 (state->src_w >> 16),
3208 ((state->src_w & 0xffff) * 15625) >> 10,
3209 (state->src_h >> 16),
3210 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003211 format_name.str,
Robert Fekete3abc4e02015-10-27 16:58:32 +01003212 plane_rotation(state->rotation));
3213 }
3214}
3215
3216static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3217{
3218 struct intel_crtc_state *pipe_config;
3219 int num_scalers = intel_crtc->num_scalers;
3220 int i;
3221
3222 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3223
3224 /* Not all platformas have a scaler */
3225 if (num_scalers) {
3226 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3227 num_scalers,
3228 pipe_config->scaler_state.scaler_users,
3229 pipe_config->scaler_state.scaler_id);
3230
A.Sunil Kamath58415912016-11-20 23:20:26 +05303231 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01003232 struct intel_scaler *sc =
3233 &pipe_config->scaler_state.scalers[i];
3234
3235 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3236 i, yesno(sc->in_use), sc->mode);
3237 }
3238 seq_puts(m, "\n");
3239 } else {
3240 seq_puts(m, "\tNo scalers available on this platform\n");
3241 }
3242}
3243
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003244static int i915_display_info(struct seq_file *m, void *unused)
3245{
David Weinehall36cdd012016-08-22 13:59:31 +03003246 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3247 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec2014-03-12 09:13:13 +00003248 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003249 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003250 struct drm_connector_list_iter conn_iter;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003251
Paulo Zanonib0e5ddf2014-04-01 14:55:10 -03003252 intel_runtime_pm_get(dev_priv);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003253 seq_printf(m, "CRTC info\n");
3254 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01003255 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003256 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003257
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003258 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003259 pipe_config = to_intel_crtc_state(crtc->base.state);
3260
Robert Fekete3abc4e02015-10-27 16:58:32 +01003261 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec2014-03-12 09:13:13 +00003262 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003263 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01003264 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3265 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3266
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003267 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003268 struct intel_plane *cursor =
3269 to_intel_plane(crtc->base.cursor);
3270
Chris Wilson065f2ec2014-03-12 09:13:13 +00003271 intel_crtc_info(m, crtc);
3272
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003273 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3274 yesno(cursor->base.state->visible),
3275 cursor->base.state->crtc_x,
3276 cursor->base.state->crtc_y,
3277 cursor->base.state->crtc_w,
3278 cursor->base.state->crtc_h,
3279 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003280 intel_scaler_info(m, crtc);
3281 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003282 }
Daniel Vettercace8412014-05-22 17:56:31 +02003283
3284 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3285 yesno(!crtc->cpu_fifo_underrun_disabled),
3286 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003287 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003288 }
3289
3290 seq_printf(m, "\n");
3291 seq_printf(m, "Connector info\n");
3292 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003293 mutex_lock(&dev->mode_config.mutex);
3294 drm_connector_list_iter_begin(dev, &conn_iter);
3295 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003296 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003297 drm_connector_list_iter_end(&conn_iter);
3298 mutex_unlock(&dev->mode_config.mutex);
3299
Paulo Zanonib0e5ddf2014-04-01 14:55:10 -03003300 intel_runtime_pm_put(dev_priv);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003301
3302 return 0;
3303}
3304
Chris Wilson1b365952016-10-04 21:11:31 +01003305static int i915_engine_info(struct seq_file *m, void *unused)
3306{
3307 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3308 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05303309 enum intel_engine_id id;
Chris Wilsonf636edb2017-10-09 12:02:57 +01003310 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01003311
Chris Wilson9c870d02016-10-24 13:42:15 +01003312 intel_runtime_pm_get(dev_priv);
3313
Chris Wilson6f561032018-01-24 11:36:07 +00003314 seq_printf(m, "GT awake? %s (epoch %u)\n",
3315 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003316 seq_printf(m, "Global active requests: %d\n",
3317 dev_priv->gt.active_requests);
Lionel Landwerlinf577a032017-11-13 23:34:53 +00003318 seq_printf(m, "CS timestamp frequency: %u kHz\n",
3319 dev_priv->info.cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003320
Chris Wilsonf636edb2017-10-09 12:02:57 +01003321 p = drm_seq_file_printer(m);
3322 for_each_engine(engine, dev_priv, id)
Chris Wilson0db18b12017-12-08 01:23:00 +00003323 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01003324
Chris Wilson9c870d02016-10-24 13:42:15 +01003325 intel_runtime_pm_put(dev_priv);
3326
Chris Wilson1b365952016-10-04 21:11:31 +01003327 return 0;
3328}
3329
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003330static int i915_rcs_topology(struct seq_file *m, void *unused)
3331{
3332 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3333 struct drm_printer p = drm_seq_file_printer(m);
3334
3335 intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3336
3337 return 0;
3338}
3339
Chris Wilsonc5418a82017-10-13 21:26:19 +01003340static int i915_shrinker_info(struct seq_file *m, void *unused)
3341{
3342 struct drm_i915_private *i915 = node_to_i915(m->private);
3343
3344 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3345 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3346
3347 return 0;
3348}
3349
Daniel Vetter728e29d2014-06-25 22:01:53 +03003350static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3351{
David Weinehall36cdd012016-08-22 13:59:31 +03003352 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3353 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003354 int i;
3355
3356 drm_modeset_lock_all(dev);
3357 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3358 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3359
Lucas De Marchi72f775f2018-03-20 15:06:34 -07003360 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07003361 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003362 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003363 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003364 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003365 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003366 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003367 pll->state.hw_state.dpll_md);
3368 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3369 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3370 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003371 }
3372 drm_modeset_unlock_all(dev);
3373
3374 return 0;
3375}
3376
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003377static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003378{
David Weinehall36cdd012016-08-22 13:59:31 +03003379 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Arun Siluvery33136b02016-01-21 21:43:47 +00003380 struct i915_workarounds *workarounds = &dev_priv->workarounds;
Chris Wilsonf4ecfbf2018-04-14 13:27:54 +01003381 int i;
Arun Siluvery888b5992014-08-26 14:44:51 +01003382
3383 intel_runtime_pm_get(dev_priv);
3384
Arun Siluvery33136b02016-01-21 21:43:47 +00003385 seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
Arun Siluvery33136b02016-01-21 21:43:47 +00003386 for (i = 0; i < workarounds->count; ++i) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003387 i915_reg_t addr;
3388 u32 mask, value, read;
Mika Kuoppala2fa60f62014-10-07 17:21:27 +03003389 bool ok;
Arun Siluvery888b5992014-08-26 14:44:51 +01003390
Arun Siluvery33136b02016-01-21 21:43:47 +00003391 addr = workarounds->reg[i].addr;
3392 mask = workarounds->reg[i].mask;
3393 value = workarounds->reg[i].value;
Mika Kuoppala2fa60f62014-10-07 17:21:27 +03003394 read = I915_READ(addr);
3395 ok = (value & mask) == (read & mask);
3396 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003397 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
Arun Siluvery888b5992014-08-26 14:44:51 +01003398 }
3399
3400 intel_runtime_pm_put(dev_priv);
Arun Siluvery888b5992014-08-26 14:44:51 +01003401
3402 return 0;
3403}
3404
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303405static int i915_ipc_status_show(struct seq_file *m, void *data)
3406{
3407 struct drm_i915_private *dev_priv = m->private;
3408
3409 seq_printf(m, "Isochronous Priority Control: %s\n",
3410 yesno(dev_priv->ipc_enabled));
3411 return 0;
3412}
3413
3414static int i915_ipc_status_open(struct inode *inode, struct file *file)
3415{
3416 struct drm_i915_private *dev_priv = inode->i_private;
3417
3418 if (!HAS_IPC(dev_priv))
3419 return -ENODEV;
3420
3421 return single_open(file, i915_ipc_status_show, dev_priv);
3422}
3423
3424static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3425 size_t len, loff_t *offp)
3426{
3427 struct seq_file *m = file->private_data;
3428 struct drm_i915_private *dev_priv = m->private;
3429 int ret;
3430 bool enable;
3431
3432 ret = kstrtobool_from_user(ubuf, len, &enable);
3433 if (ret < 0)
3434 return ret;
3435
3436 intel_runtime_pm_get(dev_priv);
3437 if (!dev_priv->ipc_enabled && enable)
3438 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3439 dev_priv->wm.distrust_bios_wm = true;
3440 dev_priv->ipc_enabled = enable;
3441 intel_enable_ipc(dev_priv);
3442 intel_runtime_pm_put(dev_priv);
3443
3444 return len;
3445}
3446
3447static const struct file_operations i915_ipc_status_fops = {
3448 .owner = THIS_MODULE,
3449 .open = i915_ipc_status_open,
3450 .read = seq_read,
3451 .llseek = seq_lseek,
3452 .release = single_release,
3453 .write = i915_ipc_status_write
3454};
3455
Damien Lespiauc5511e42014-11-04 17:06:51 +00003456static int i915_ddb_info(struct seq_file *m, void *unused)
3457{
David Weinehall36cdd012016-08-22 13:59:31 +03003458 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3459 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003460 struct skl_ddb_allocation *ddb;
3461 struct skl_ddb_entry *entry;
3462 enum pipe pipe;
3463 int plane;
3464
David Weinehall36cdd012016-08-22 13:59:31 +03003465 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00003466 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003467
Damien Lespiauc5511e42014-11-04 17:06:51 +00003468 drm_modeset_lock_all(dev);
3469
3470 ddb = &dev_priv->wm.skl_hw.ddb;
3471
3472 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3473
3474 for_each_pipe(dev_priv, pipe) {
3475 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3476
Matt Roper8b364b42016-10-26 15:51:28 -07003477 for_each_universal_plane(dev_priv, pipe, plane) {
Damien Lespiauc5511e42014-11-04 17:06:51 +00003478 entry = &ddb->plane[pipe][plane];
3479 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
3480 entry->start, entry->end,
3481 skl_ddb_entry_size(entry));
3482 }
3483
Matt Roper4969d332015-09-24 15:53:10 -07003484 entry = &ddb->plane[pipe][PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003485 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3486 entry->end, skl_ddb_entry_size(entry));
3487 }
3488
3489 drm_modeset_unlock_all(dev);
3490
3491 return 0;
3492}
3493
Vandana Kannana54746e2015-03-03 20:53:10 +05303494static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003495 struct drm_device *dev,
3496 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303497{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003498 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303499 struct i915_drrs *drrs = &dev_priv->drrs;
3500 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003501 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003502 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303503
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003504 drm_connector_list_iter_begin(dev, &conn_iter);
3505 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003506 if (connector->state->crtc != &intel_crtc->base)
3507 continue;
3508
3509 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303510 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003511 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303512
3513 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3514 seq_puts(m, "\tVBT: DRRS_type: Static");
3515 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3516 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3517 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3518 seq_puts(m, "\tVBT: DRRS_type: None");
3519 else
3520 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3521
3522 seq_puts(m, "\n\n");
3523
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003524 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303525 struct intel_panel *panel;
3526
3527 mutex_lock(&drrs->mutex);
3528 /* DRRS Supported */
3529 seq_puts(m, "\tDRRS Supported: Yes\n");
3530
3531 /* disable_drrs() will make drrs->dp NULL */
3532 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303533 seq_puts(m, "Idleness DRRS: Disabled\n");
3534 if (dev_priv->psr.enabled)
3535 seq_puts(m,
3536 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303537 mutex_unlock(&drrs->mutex);
3538 return;
3539 }
3540
3541 panel = &drrs->dp->attached_connector->panel;
3542 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3543 drrs->busy_frontbuffer_bits);
3544
3545 seq_puts(m, "\n\t\t");
3546 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3547 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3548 vrefresh = panel->fixed_mode->vrefresh;
3549 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3550 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3551 vrefresh = panel->downclock_mode->vrefresh;
3552 } else {
3553 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3554 drrs->refresh_rate_type);
3555 mutex_unlock(&drrs->mutex);
3556 return;
3557 }
3558 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3559
3560 seq_puts(m, "\n\t\t");
3561 mutex_unlock(&drrs->mutex);
3562 } else {
3563 /* DRRS not supported. Print the VBT parameter*/
3564 seq_puts(m, "\tDRRS Supported : No");
3565 }
3566 seq_puts(m, "\n");
3567}
3568
3569static int i915_drrs_status(struct seq_file *m, void *unused)
3570{
David Weinehall36cdd012016-08-22 13:59:31 +03003571 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3572 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303573 struct intel_crtc *intel_crtc;
3574 int active_crtc_cnt = 0;
3575
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003576 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303577 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003578 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303579 active_crtc_cnt++;
3580 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3581
3582 drrs_status_per_crtc(m, dev, intel_crtc);
3583 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303584 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003585 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303586
3587 if (!active_crtc_cnt)
3588 seq_puts(m, "No active crtc found\n");
3589
3590 return 0;
3591}
3592
Dave Airlie11bed952014-05-12 15:22:27 +10003593static int i915_dp_mst_info(struct seq_file *m, void *unused)
3594{
David Weinehall36cdd012016-08-22 13:59:31 +03003595 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3596 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003597 struct intel_encoder *intel_encoder;
3598 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003599 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003600 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003601
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003602 drm_connector_list_iter_begin(dev, &conn_iter);
3603 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003604 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003605 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003606
3607 intel_encoder = intel_attached_encoder(connector);
3608 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3609 continue;
3610
3611 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003612 if (!intel_dig_port->dp.can_mst)
3613 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003614
Jim Bride40ae80c2016-04-14 10:18:37 -07003615 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003616 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003617 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3618 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003619 drm_connector_list_iter_end(&conn_iter);
3620
Dave Airlie11bed952014-05-12 15:22:27 +10003621 return 0;
3622}
3623
Todd Previteeb3394fa2015-04-18 00:04:19 -07003624static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003625 const char __user *ubuf,
3626 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003627{
3628 char *input_buffer;
3629 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003630 struct drm_device *dev;
3631 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003632 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003633 struct intel_dp *intel_dp;
3634 int val = 0;
3635
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303636 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003637
Todd Previteeb3394fa2015-04-18 00:04:19 -07003638 if (len == 0)
3639 return 0;
3640
Geliang Tang261aeba2017-05-06 23:40:17 +08003641 input_buffer = memdup_user_nul(ubuf, len);
3642 if (IS_ERR(input_buffer))
3643 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003644
Todd Previteeb3394fa2015-04-18 00:04:19 -07003645 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3646
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003647 drm_connector_list_iter_begin(dev, &conn_iter);
3648 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003649 struct intel_encoder *encoder;
3650
Todd Previteeb3394fa2015-04-18 00:04:19 -07003651 if (connector->connector_type !=
3652 DRM_MODE_CONNECTOR_DisplayPort)
3653 continue;
3654
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003655 encoder = to_intel_encoder(connector->encoder);
3656 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3657 continue;
3658
3659 if (encoder && connector->status == connector_status_connected) {
3660 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003661 status = kstrtoint(input_buffer, 10, &val);
3662 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003663 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003664 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3665 /* To prevent erroneous activation of the compliance
3666 * testing code, only accept an actual value of 1 here
3667 */
3668 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003669 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003670 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003671 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003672 }
3673 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003674 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003675 kfree(input_buffer);
3676 if (status < 0)
3677 return status;
3678
3679 *offp += len;
3680 return len;
3681}
3682
3683static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3684{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003685 struct drm_i915_private *dev_priv = m->private;
3686 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003687 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003688 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003689 struct intel_dp *intel_dp;
3690
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003691 drm_connector_list_iter_begin(dev, &conn_iter);
3692 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003693 struct intel_encoder *encoder;
3694
Todd Previteeb3394fa2015-04-18 00:04:19 -07003695 if (connector->connector_type !=
3696 DRM_MODE_CONNECTOR_DisplayPort)
3697 continue;
3698
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003699 encoder = to_intel_encoder(connector->encoder);
3700 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3701 continue;
3702
3703 if (encoder && connector->status == connector_status_connected) {
3704 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003705 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003706 seq_puts(m, "1");
3707 else
3708 seq_puts(m, "0");
3709 } else
3710 seq_puts(m, "0");
3711 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003712 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003713
3714 return 0;
3715}
3716
3717static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003718 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003719{
David Weinehall36cdd012016-08-22 13:59:31 +03003720 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003721 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003722}
3723
3724static const struct file_operations i915_displayport_test_active_fops = {
3725 .owner = THIS_MODULE,
3726 .open = i915_displayport_test_active_open,
3727 .read = seq_read,
3728 .llseek = seq_lseek,
3729 .release = single_release,
3730 .write = i915_displayport_test_active_write
3731};
3732
3733static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3734{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003735 struct drm_i915_private *dev_priv = m->private;
3736 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003737 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003738 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003739 struct intel_dp *intel_dp;
3740
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003741 drm_connector_list_iter_begin(dev, &conn_iter);
3742 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003743 struct intel_encoder *encoder;
3744
Todd Previteeb3394fa2015-04-18 00:04:19 -07003745 if (connector->connector_type !=
3746 DRM_MODE_CONNECTOR_DisplayPort)
3747 continue;
3748
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003749 encoder = to_intel_encoder(connector->encoder);
3750 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3751 continue;
3752
3753 if (encoder && connector->status == connector_status_connected) {
3754 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003755 if (intel_dp->compliance.test_type ==
3756 DP_TEST_LINK_EDID_READ)
3757 seq_printf(m, "%lx",
3758 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003759 else if (intel_dp->compliance.test_type ==
3760 DP_TEST_LINK_VIDEO_PATTERN) {
3761 seq_printf(m, "hdisplay: %d\n",
3762 intel_dp->compliance.test_data.hdisplay);
3763 seq_printf(m, "vdisplay: %d\n",
3764 intel_dp->compliance.test_data.vdisplay);
3765 seq_printf(m, "bpc: %u\n",
3766 intel_dp->compliance.test_data.bpc);
3767 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003768 } else
3769 seq_puts(m, "0");
3770 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003771 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003772
3773 return 0;
3774}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003775DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003776
3777static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3778{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003779 struct drm_i915_private *dev_priv = m->private;
3780 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003781 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003782 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003783 struct intel_dp *intel_dp;
3784
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003785 drm_connector_list_iter_begin(dev, &conn_iter);
3786 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003787 struct intel_encoder *encoder;
3788
Todd Previteeb3394fa2015-04-18 00:04:19 -07003789 if (connector->connector_type !=
3790 DRM_MODE_CONNECTOR_DisplayPort)
3791 continue;
3792
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003793 encoder = to_intel_encoder(connector->encoder);
3794 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3795 continue;
3796
3797 if (encoder && connector->status == connector_status_connected) {
3798 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003799 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003800 } else
3801 seq_puts(m, "0");
3802 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003803 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003804
3805 return 0;
3806}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003807DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003808
Damien Lespiau97e94b22014-11-04 17:06:50 +00003809static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003810{
David Weinehall36cdd012016-08-22 13:59:31 +03003811 struct drm_i915_private *dev_priv = m->private;
3812 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003813 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003814 int num_levels;
3815
David Weinehall36cdd012016-08-22 13:59:31 +03003816 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003817 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003818 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003819 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003820 else if (IS_G4X(dev_priv))
3821 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003822 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003823 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003824
3825 drm_modeset_lock_all(dev);
3826
3827 for (level = 0; level < num_levels; level++) {
3828 unsigned int latency = wm[level];
3829
Damien Lespiau97e94b22014-11-04 17:06:50 +00003830 /*
3831 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003832 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003833 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003834 if (INTEL_GEN(dev_priv) >= 9 ||
3835 IS_VALLEYVIEW(dev_priv) ||
3836 IS_CHERRYVIEW(dev_priv) ||
3837 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003838 latency *= 10;
3839 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003840 latency *= 5;
3841
3842 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003843 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003844 }
3845
3846 drm_modeset_unlock_all(dev);
3847}
3848
3849static int pri_wm_latency_show(struct seq_file *m, void *data)
3850{
David Weinehall36cdd012016-08-22 13:59:31 +03003851 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003852 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003853
David Weinehall36cdd012016-08-22 13:59:31 +03003854 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003855 latencies = dev_priv->wm.skl_latency;
3856 else
David Weinehall36cdd012016-08-22 13:59:31 +03003857 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003858
3859 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003860
3861 return 0;
3862}
3863
3864static int spr_wm_latency_show(struct seq_file *m, void *data)
3865{
David Weinehall36cdd012016-08-22 13:59:31 +03003866 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003867 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003868
David Weinehall36cdd012016-08-22 13:59:31 +03003869 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003870 latencies = dev_priv->wm.skl_latency;
3871 else
David Weinehall36cdd012016-08-22 13:59:31 +03003872 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003873
3874 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003875
3876 return 0;
3877}
3878
3879static int cur_wm_latency_show(struct seq_file *m, void *data)
3880{
David Weinehall36cdd012016-08-22 13:59:31 +03003881 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003882 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003883
David Weinehall36cdd012016-08-22 13:59:31 +03003884 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003885 latencies = dev_priv->wm.skl_latency;
3886 else
David Weinehall36cdd012016-08-22 13:59:31 +03003887 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003888
3889 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003890
3891 return 0;
3892}
3893
3894static int pri_wm_latency_open(struct inode *inode, struct file *file)
3895{
David Weinehall36cdd012016-08-22 13:59:31 +03003896 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003897
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003898 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003899 return -ENODEV;
3900
David Weinehall36cdd012016-08-22 13:59:31 +03003901 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003902}
3903
3904static int spr_wm_latency_open(struct inode *inode, struct file *file)
3905{
David Weinehall36cdd012016-08-22 13:59:31 +03003906 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003907
David Weinehall36cdd012016-08-22 13:59:31 +03003908 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003909 return -ENODEV;
3910
David Weinehall36cdd012016-08-22 13:59:31 +03003911 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003912}
3913
3914static int cur_wm_latency_open(struct inode *inode, struct file *file)
3915{
David Weinehall36cdd012016-08-22 13:59:31 +03003916 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003917
David Weinehall36cdd012016-08-22 13:59:31 +03003918 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003919 return -ENODEV;
3920
David Weinehall36cdd012016-08-22 13:59:31 +03003921 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003922}
3923
3924static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Damien Lespiau97e94b22014-11-04 17:06:50 +00003925 size_t len, loff_t *offp, uint16_t wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003926{
3927 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003928 struct drm_i915_private *dev_priv = m->private;
3929 struct drm_device *dev = &dev_priv->drm;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003930 uint16_t new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003931 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003932 int level;
3933 int ret;
3934 char tmp[32];
3935
David Weinehall36cdd012016-08-22 13:59:31 +03003936 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003937 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003938 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003939 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003940 else if (IS_G4X(dev_priv))
3941 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003942 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003943 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003944
Ville Syrjälä369a1342014-01-22 14:36:08 +02003945 if (len >= sizeof(tmp))
3946 return -EINVAL;
3947
3948 if (copy_from_user(tmp, ubuf, len))
3949 return -EFAULT;
3950
3951 tmp[len] = '\0';
3952
Damien Lespiau97e94b22014-11-04 17:06:50 +00003953 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3954 &new[0], &new[1], &new[2], &new[3],
3955 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003956 if (ret != num_levels)
3957 return -EINVAL;
3958
3959 drm_modeset_lock_all(dev);
3960
3961 for (level = 0; level < num_levels; level++)
3962 wm[level] = new[level];
3963
3964 drm_modeset_unlock_all(dev);
3965
3966 return len;
3967}
3968
3969
3970static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3971 size_t len, loff_t *offp)
3972{
3973 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003974 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003975 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003976
David Weinehall36cdd012016-08-22 13:59:31 +03003977 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003978 latencies = dev_priv->wm.skl_latency;
3979 else
David Weinehall36cdd012016-08-22 13:59:31 +03003980 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003981
3982 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003983}
3984
3985static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3986 size_t len, loff_t *offp)
3987{
3988 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003989 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003990 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003991
David Weinehall36cdd012016-08-22 13:59:31 +03003992 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003993 latencies = dev_priv->wm.skl_latency;
3994 else
David Weinehall36cdd012016-08-22 13:59:31 +03003995 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003996
3997 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003998}
3999
4000static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4001 size_t len, loff_t *offp)
4002{
4003 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03004004 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00004005 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02004006
David Weinehall36cdd012016-08-22 13:59:31 +03004007 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00004008 latencies = dev_priv->wm.skl_latency;
4009 else
David Weinehall36cdd012016-08-22 13:59:31 +03004010 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00004011
4012 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02004013}
4014
4015static const struct file_operations i915_pri_wm_latency_fops = {
4016 .owner = THIS_MODULE,
4017 .open = pri_wm_latency_open,
4018 .read = seq_read,
4019 .llseek = seq_lseek,
4020 .release = single_release,
4021 .write = pri_wm_latency_write
4022};
4023
4024static const struct file_operations i915_spr_wm_latency_fops = {
4025 .owner = THIS_MODULE,
4026 .open = spr_wm_latency_open,
4027 .read = seq_read,
4028 .llseek = seq_lseek,
4029 .release = single_release,
4030 .write = spr_wm_latency_write
4031};
4032
4033static const struct file_operations i915_cur_wm_latency_fops = {
4034 .owner = THIS_MODULE,
4035 .open = cur_wm_latency_open,
4036 .read = seq_read,
4037 .llseek = seq_lseek,
4038 .release = single_release,
4039 .write = cur_wm_latency_write
4040};
4041
Kees Cook647416f2013-03-10 14:10:06 -07004042static int
4043i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004044{
David Weinehall36cdd012016-08-22 13:59:31 +03004045 struct drm_i915_private *dev_priv = data;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004046
Chris Wilsond98c52c2016-04-13 17:35:05 +01004047 *val = i915_terminally_wedged(&dev_priv->gpu_error);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004048
Kees Cook647416f2013-03-10 14:10:06 -07004049 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004050}
4051
Kees Cook647416f2013-03-10 14:10:06 -07004052static int
4053i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004054{
Chris Wilson598b6b52017-03-25 13:47:35 +00004055 struct drm_i915_private *i915 = data;
4056 struct intel_engine_cs *engine;
4057 unsigned int tmp;
Imre Deakd46c0512014-04-14 20:24:27 +03004058
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02004059 /*
4060 * There is no safeguard against this debugfs entry colliding
4061 * with the hangcheck calling same i915_handle_error() in
4062 * parallel, causing an explosion. For now we assume that the
4063 * test harness is responsible enough not to inject gpu hangs
4064 * while it is writing to 'i915_wedged'
4065 */
4066
Chris Wilson598b6b52017-03-25 13:47:35 +00004067 if (i915_reset_backoff(&i915->gpu_error))
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02004068 return -EAGAIN;
4069
Chris Wilson598b6b52017-03-25 13:47:35 +00004070 for_each_engine_masked(engine, i915, val, tmp) {
4071 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4072 engine->hangcheck.stalled = true;
4073 }
Imre Deakd46c0512014-04-14 20:24:27 +03004074
Chris Wilsonce800752018-03-20 10:04:49 +00004075 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4076 "Manually set wedged engine mask = %llx", val);
Chris Wilson598b6b52017-03-25 13:47:35 +00004077
4078 wait_on_bit(&i915->gpu_error.flags,
Chris Wilsond3df42b2017-03-16 17:13:05 +00004079 I915_RESET_HANDOFF,
4080 TASK_UNINTERRUPTIBLE);
4081
Kees Cook647416f2013-03-10 14:10:06 -07004082 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004083}
4084
Kees Cook647416f2013-03-10 14:10:06 -07004085DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4086 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03004087 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004088
Kees Cook647416f2013-03-10 14:10:06 -07004089static int
Chris Wilson64486ae2017-03-07 15:59:08 +00004090fault_irq_set(struct drm_i915_private *i915,
4091 unsigned long *irq,
4092 unsigned long val)
4093{
4094 int err;
4095
4096 err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4097 if (err)
4098 return err;
4099
4100 err = i915_gem_wait_for_idle(i915,
4101 I915_WAIT_LOCKED |
4102 I915_WAIT_INTERRUPTIBLE);
4103 if (err)
4104 goto err_unlock;
4105
Chris Wilson64486ae2017-03-07 15:59:08 +00004106 *irq = val;
4107 mutex_unlock(&i915->drm.struct_mutex);
4108
4109 /* Flush idle worker to disarm irq */
Chris Wilson7c262402017-10-06 11:40:38 +01004110 drain_delayed_work(&i915->gt.idle_work);
Chris Wilson64486ae2017-03-07 15:59:08 +00004111
4112 return 0;
4113
4114err_unlock:
4115 mutex_unlock(&i915->drm.struct_mutex);
4116 return err;
4117}
4118
4119static int
Chris Wilson094f9a52013-09-25 17:34:55 +01004120i915_ring_missed_irq_get(void *data, u64 *val)
4121{
David Weinehall36cdd012016-08-22 13:59:31 +03004122 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004123
4124 *val = dev_priv->gpu_error.missed_irq_rings;
4125 return 0;
4126}
4127
4128static int
4129i915_ring_missed_irq_set(void *data, u64 val)
4130{
Chris Wilson64486ae2017-03-07 15:59:08 +00004131 struct drm_i915_private *i915 = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004132
Chris Wilson64486ae2017-03-07 15:59:08 +00004133 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004134}
4135
4136DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4137 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4138 "0x%08llx\n");
4139
4140static int
4141i915_ring_test_irq_get(void *data, u64 *val)
4142{
David Weinehall36cdd012016-08-22 13:59:31 +03004143 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004144
4145 *val = dev_priv->gpu_error.test_irq_rings;
4146
4147 return 0;
4148}
4149
4150static int
4151i915_ring_test_irq_set(void *data, u64 val)
4152{
Chris Wilson64486ae2017-03-07 15:59:08 +00004153 struct drm_i915_private *i915 = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004154
Chris Wilson64486ae2017-03-07 15:59:08 +00004155 val &= INTEL_INFO(i915)->ring_mask;
Chris Wilson094f9a52013-09-25 17:34:55 +01004156 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004157
Chris Wilson64486ae2017-03-07 15:59:08 +00004158 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004159}
4160
4161DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4162 i915_ring_test_irq_get, i915_ring_test_irq_set,
4163 "0x%08llx\n");
4164
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004165#define DROP_UNBOUND BIT(0)
4166#define DROP_BOUND BIT(1)
4167#define DROP_RETIRE BIT(2)
4168#define DROP_ACTIVE BIT(3)
4169#define DROP_FREED BIT(4)
4170#define DROP_SHRINK_ALL BIT(5)
4171#define DROP_IDLE BIT(6)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004172#define DROP_ALL (DROP_UNBOUND | \
4173 DROP_BOUND | \
4174 DROP_RETIRE | \
4175 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00004176 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004177 DROP_SHRINK_ALL |\
4178 DROP_IDLE)
Kees Cook647416f2013-03-10 14:10:06 -07004179static int
4180i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004181{
Kees Cook647416f2013-03-10 14:10:06 -07004182 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00004183
Kees Cook647416f2013-03-10 14:10:06 -07004184 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004185}
4186
Kees Cook647416f2013-03-10 14:10:06 -07004187static int
4188i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004189{
David Weinehall36cdd012016-08-22 13:59:31 +03004190 struct drm_i915_private *dev_priv = data;
4191 struct drm_device *dev = &dev_priv->drm;
Chris Wilson00c26cf2017-05-24 17:26:53 +01004192 int ret = 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004193
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004194 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4195 val, val & DROP_ALL);
Chris Wilsondd624af2013-01-15 12:39:35 +00004196
4197 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4198 * on ioctls on -EAGAIN. */
Chris Wilson00c26cf2017-05-24 17:26:53 +01004199 if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4200 ret = mutex_lock_interruptible(&dev->struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00004201 if (ret)
Chris Wilson00c26cf2017-05-24 17:26:53 +01004202 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00004203
Chris Wilson00c26cf2017-05-24 17:26:53 +01004204 if (val & DROP_ACTIVE)
4205 ret = i915_gem_wait_for_idle(dev_priv,
4206 I915_WAIT_INTERRUPTIBLE |
4207 I915_WAIT_LOCKED);
4208
4209 if (val & DROP_RETIRE)
Chris Wilsone61e0f52018-02-21 09:56:36 +00004210 i915_retire_requests(dev_priv);
Chris Wilson00c26cf2017-05-24 17:26:53 +01004211
4212 mutex_unlock(&dev->struct_mutex);
4213 }
Chris Wilsondd624af2013-01-15 12:39:35 +00004214
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004215 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01004216 if (val & DROP_BOUND)
Chris Wilson912d5722017-09-06 16:19:30 -07004217 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01004218
Chris Wilson21ab4e72014-09-09 11:16:08 +01004219 if (val & DROP_UNBOUND)
Chris Wilson912d5722017-09-06 16:19:30 -07004220 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00004221
Chris Wilson8eadc192017-03-08 14:46:22 +00004222 if (val & DROP_SHRINK_ALL)
4223 i915_gem_shrink_all(dev_priv);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004224 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00004225
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004226 if (val & DROP_IDLE)
4227 drain_delayed_work(&dev_priv->gt.idle_work);
4228
Chris Wilsonc9c70472018-02-19 22:06:31 +00004229 if (val & DROP_FREED)
Chris Wilsonbdeb9782016-12-23 14:57:56 +00004230 i915_gem_drain_freed_objects(dev_priv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004231
Kees Cook647416f2013-03-10 14:10:06 -07004232 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00004233}
4234
Kees Cook647416f2013-03-10 14:10:06 -07004235DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4236 i915_drop_caches_get, i915_drop_caches_set,
4237 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00004238
Kees Cook647416f2013-03-10 14:10:06 -07004239static int
Kees Cook647416f2013-03-10 14:10:06 -07004240i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004241{
David Weinehall36cdd012016-08-22 13:59:31 +03004242 struct drm_i915_private *dev_priv = data;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004243 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004244
David Weinehall36cdd012016-08-22 13:59:31 +03004245 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004246 return -ENODEV;
4247
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004248 intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02004249
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004250 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004251
4252 intel_runtime_pm_put(dev_priv);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004253
Kees Cook647416f2013-03-10 14:10:06 -07004254 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004255
Kees Cook647416f2013-03-10 14:10:06 -07004256 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004257}
4258
Kees Cook647416f2013-03-10 14:10:06 -07004259static int
4260i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004261{
David Weinehall36cdd012016-08-22 13:59:31 +03004262 struct drm_i915_private *dev_priv = data;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004263 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004264
David Weinehall36cdd012016-08-22 13:59:31 +03004265 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004266 return -ENODEV;
4267
Kees Cook647416f2013-03-10 14:10:06 -07004268 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004269 return -EINVAL;
4270
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004271 intel_runtime_pm_get(dev_priv);
Kees Cook647416f2013-03-10 14:10:06 -07004272 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004273
4274 /* Update the cache sharing policy here as well */
4275 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4276 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4277 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4278 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4279
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004280 intel_runtime_pm_put(dev_priv);
Kees Cook647416f2013-03-10 14:10:06 -07004281 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004282}
4283
Kees Cook647416f2013-03-10 14:10:06 -07004284DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4285 i915_cache_sharing_get, i915_cache_sharing_set,
4286 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004287
David Weinehall36cdd012016-08-22 13:59:31 +03004288static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004289 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004290{
Chris Wilson7aa0b142018-03-13 00:40:54 +00004291#define SS_MAX 2
4292 const int ss_max = SS_MAX;
4293 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07004294 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004295
4296 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4297 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4298 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4299 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4300
4301 for (ss = 0; ss < ss_max; ss++) {
4302 unsigned int eu_cnt;
4303
4304 if (sig1[ss] & CHV_SS_PG_ENABLE)
4305 /* skip disabled subslice */
4306 continue;
4307
Imre Deakf08a0c92016-08-31 19:13:04 +03004308 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004309 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07004310 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4311 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4312 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4313 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03004314 sseu->eu_total += eu_cnt;
4315 sseu->eu_per_subslice = max_t(unsigned int,
4316 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004317 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00004318#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004319}
4320
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004321static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4322 struct sseu_dev_info *sseu)
4323{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004324#define SS_MAX 6
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004325 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004326 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004327 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004328
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004329 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004330 /*
4331 * FIXME: Valid SS Mask respects the spec and read
4332 * only valid bits for those registers, excluding reserverd
4333 * although this seems wrong because it would leave many
4334 * subslices without ACK.
4335 */
4336 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4337 GEN10_PGCTL_VALID_SS_MASK(s);
4338 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4339 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4340 }
4341
4342 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4343 GEN9_PGCTL_SSA_EU19_ACK |
4344 GEN9_PGCTL_SSA_EU210_ACK |
4345 GEN9_PGCTL_SSA_EU311_ACK;
4346 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4347 GEN9_PGCTL_SSB_EU19_ACK |
4348 GEN9_PGCTL_SSB_EU210_ACK |
4349 GEN9_PGCTL_SSB_EU311_ACK;
4350
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004351 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004352 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4353 /* skip disabled slice */
4354 continue;
4355
4356 sseu->slice_mask |= BIT(s);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004357 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004358
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004359 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004360 unsigned int eu_cnt;
4361
4362 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4363 /* skip disabled subslice */
4364 continue;
4365
4366 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4367 eu_mask[ss % 2]);
4368 sseu->eu_total += eu_cnt;
4369 sseu->eu_per_subslice = max_t(unsigned int,
4370 sseu->eu_per_subslice,
4371 eu_cnt);
4372 }
4373 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004374#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004375}
4376
David Weinehall36cdd012016-08-22 13:59:31 +03004377static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004378 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004379{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004380#define SS_MAX 3
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004381 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004382 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07004383 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004384
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004385 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004386 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4387 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4388 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4389 }
4390
Jeff McGee5d395252015-04-03 18:13:17 -07004391 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4392 GEN9_PGCTL_SSA_EU19_ACK |
4393 GEN9_PGCTL_SSA_EU210_ACK |
4394 GEN9_PGCTL_SSA_EU311_ACK;
4395 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4396 GEN9_PGCTL_SSB_EU19_ACK |
4397 GEN9_PGCTL_SSB_EU210_ACK |
4398 GEN9_PGCTL_SSB_EU311_ACK;
4399
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004400 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004401 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4402 /* skip disabled slice */
4403 continue;
4404
Imre Deakf08a0c92016-08-31 19:13:04 +03004405 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004406
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004407 if (IS_GEN9_BC(dev_priv))
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004408 sseu->subslice_mask[s] =
4409 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07004410
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004411 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004412 unsigned int eu_cnt;
4413
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004414 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004415 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4416 /* skip disabled subslice */
4417 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004418
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004419 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03004420 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004421
Jeff McGee5d395252015-04-03 18:13:17 -07004422 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4423 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004424 sseu->eu_total += eu_cnt;
4425 sseu->eu_per_subslice = max_t(unsigned int,
4426 sseu->eu_per_subslice,
4427 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004428 }
4429 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004430#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004431}
4432
David Weinehall36cdd012016-08-22 13:59:31 +03004433static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004434 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004435{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004436 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004437 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004438
Imre Deakf08a0c92016-08-31 19:13:04 +03004439 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004440
Imre Deakf08a0c92016-08-31 19:13:04 +03004441 if (sseu->slice_mask) {
Imre Deak43b67992016-08-31 19:13:02 +03004442 sseu->eu_per_subslice =
4443 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004444 for (s = 0; s < fls(sseu->slice_mask); s++) {
4445 sseu->subslice_mask[s] =
4446 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4447 }
Imre Deak57ec1712016-08-31 19:13:05 +03004448 sseu->eu_total = sseu->eu_per_subslice *
4449 sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004450
4451 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004452 for (s = 0; s < fls(sseu->slice_mask); s++) {
Imre Deak43b67992016-08-31 19:13:02 +03004453 u8 subslice_7eu =
4454 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004455
Imre Deak915490d2016-08-31 19:13:01 +03004456 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004457 }
4458 }
4459}
4460
Imre Deak615d8902016-08-31 19:13:03 +03004461static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4462 const struct sseu_dev_info *sseu)
4463{
4464 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4465 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004466 int s;
Imre Deak615d8902016-08-31 19:13:03 +03004467
Imre Deakc67ba532016-08-31 19:13:06 +03004468 seq_printf(m, " %s Slice Mask: %04x\n", type,
4469 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004470 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004471 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004472 seq_printf(m, " %s Subslice Total: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004473 sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004474 for (s = 0; s < fls(sseu->slice_mask); s++) {
4475 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4476 s, hweight8(sseu->subslice_mask[s]));
4477 }
Imre Deak615d8902016-08-31 19:13:03 +03004478 seq_printf(m, " %s EU Total: %u\n", type,
4479 sseu->eu_total);
4480 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4481 sseu->eu_per_subslice);
4482
4483 if (!is_available_info)
4484 return;
4485
4486 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4487 if (HAS_POOLED_EU(dev_priv))
4488 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4489
4490 seq_printf(m, " Has Slice Power Gating: %s\n",
4491 yesno(sseu->has_slice_pg));
4492 seq_printf(m, " Has Subslice Power Gating: %s\n",
4493 yesno(sseu->has_subslice_pg));
4494 seq_printf(m, " Has EU Power Gating: %s\n",
4495 yesno(sseu->has_eu_pg));
4496}
4497
Jeff McGee38732182015-02-13 10:27:54 -06004498static int i915_sseu_status(struct seq_file *m, void *unused)
4499{
David Weinehall36cdd012016-08-22 13:59:31 +03004500 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004501 struct sseu_dev_info sseu;
Jeff McGee38732182015-02-13 10:27:54 -06004502
David Weinehall36cdd012016-08-22 13:59:31 +03004503 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004504 return -ENODEV;
4505
4506 seq_puts(m, "SSEU Device Info\n");
Imre Deak615d8902016-08-31 19:13:03 +03004507 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004508
Jeff McGee7f992ab2015-02-13 10:27:55 -06004509 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004510 memset(&sseu, 0, sizeof(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004511 sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4512 sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4513 sseu.max_eus_per_subslice =
4514 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03004515
4516 intel_runtime_pm_get(dev_priv);
4517
David Weinehall36cdd012016-08-22 13:59:31 +03004518 if (IS_CHERRYVIEW(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004519 cherryview_sseu_device_status(dev_priv, &sseu);
David Weinehall36cdd012016-08-22 13:59:31 +03004520 } else if (IS_BROADWELL(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004521 broadwell_sseu_device_status(dev_priv, &sseu);
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004522 } else if (IS_GEN9(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004523 gen9_sseu_device_status(dev_priv, &sseu);
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004524 } else if (INTEL_GEN(dev_priv) >= 10) {
4525 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004526 }
David Weinehall238010e2016-08-01 17:33:27 +03004527
4528 intel_runtime_pm_put(dev_priv);
4529
Imre Deak615d8902016-08-31 19:13:03 +03004530 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004531
Jeff McGee38732182015-02-13 10:27:54 -06004532 return 0;
4533}
4534
Ben Widawsky6d794d42011-04-25 11:25:56 -07004535static int i915_forcewake_open(struct inode *inode, struct file *file)
4536{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004537 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004538
Chris Wilsond7a133d2017-09-07 14:44:41 +01004539 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004540 return 0;
4541
Chris Wilsond7a133d2017-09-07 14:44:41 +01004542 intel_runtime_pm_get(i915);
4543 intel_uncore_forcewake_user_get(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004544
4545 return 0;
4546}
4547
Ben Widawskyc43b5632012-04-16 14:07:40 -07004548static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004549{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004550 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004551
Chris Wilsond7a133d2017-09-07 14:44:41 +01004552 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004553 return 0;
4554
Chris Wilsond7a133d2017-09-07 14:44:41 +01004555 intel_uncore_forcewake_user_put(i915);
4556 intel_runtime_pm_put(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004557
4558 return 0;
4559}
4560
4561static const struct file_operations i915_forcewake_fops = {
4562 .owner = THIS_MODULE,
4563 .open = i915_forcewake_open,
4564 .release = i915_forcewake_release,
4565};
4566
Lyude317eaa92017-02-03 21:18:25 -05004567static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4568{
4569 struct drm_i915_private *dev_priv = m->private;
4570 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4571
4572 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4573 seq_printf(m, "Detected: %s\n",
4574 yesno(delayed_work_pending(&hotplug->reenable_work)));
4575
4576 return 0;
4577}
4578
4579static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4580 const char __user *ubuf, size_t len,
4581 loff_t *offp)
4582{
4583 struct seq_file *m = file->private_data;
4584 struct drm_i915_private *dev_priv = m->private;
4585 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4586 unsigned int new_threshold;
4587 int i;
4588 char *newline;
4589 char tmp[16];
4590
4591 if (len >= sizeof(tmp))
4592 return -EINVAL;
4593
4594 if (copy_from_user(tmp, ubuf, len))
4595 return -EFAULT;
4596
4597 tmp[len] = '\0';
4598
4599 /* Strip newline, if any */
4600 newline = strchr(tmp, '\n');
4601 if (newline)
4602 *newline = '\0';
4603
4604 if (strcmp(tmp, "reset") == 0)
4605 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4606 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4607 return -EINVAL;
4608
4609 if (new_threshold > 0)
4610 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4611 new_threshold);
4612 else
4613 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4614
4615 spin_lock_irq(&dev_priv->irq_lock);
4616 hotplug->hpd_storm_threshold = new_threshold;
4617 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4618 for_each_hpd_pin(i)
4619 hotplug->stats[i].count = 0;
4620 spin_unlock_irq(&dev_priv->irq_lock);
4621
4622 /* Re-enable hpd immediately if we were in an irq storm */
4623 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4624
4625 return len;
4626}
4627
4628static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4629{
4630 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4631}
4632
4633static const struct file_operations i915_hpd_storm_ctl_fops = {
4634 .owner = THIS_MODULE,
4635 .open = i915_hpd_storm_ctl_open,
4636 .read = seq_read,
4637 .llseek = seq_lseek,
4638 .release = single_release,
4639 .write = i915_hpd_storm_ctl_write
4640};
4641
C, Ramalingam35954e82017-11-08 00:08:23 +05304642static int i915_drrs_ctl_set(void *data, u64 val)
4643{
4644 struct drm_i915_private *dev_priv = data;
4645 struct drm_device *dev = &dev_priv->drm;
4646 struct intel_crtc *intel_crtc;
4647 struct intel_encoder *encoder;
4648 struct intel_dp *intel_dp;
4649
4650 if (INTEL_GEN(dev_priv) < 7)
4651 return -ENODEV;
4652
4653 drm_modeset_lock_all(dev);
4654 for_each_intel_crtc(dev, intel_crtc) {
4655 if (!intel_crtc->base.state->active ||
4656 !intel_crtc->config->has_drrs)
4657 continue;
4658
4659 for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4660 if (encoder->type != INTEL_OUTPUT_EDP)
4661 continue;
4662
4663 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4664 val ? "en" : "dis", val);
4665
4666 intel_dp = enc_to_intel_dp(&encoder->base);
4667 if (val)
4668 intel_edp_drrs_enable(intel_dp,
4669 intel_crtc->config);
4670 else
4671 intel_edp_drrs_disable(intel_dp,
4672 intel_crtc->config);
4673 }
4674 }
4675 drm_modeset_unlock_all(dev);
4676
4677 return 0;
4678}
4679
4680DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4681
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004682static ssize_t
4683i915_fifo_underrun_reset_write(struct file *filp,
4684 const char __user *ubuf,
4685 size_t cnt, loff_t *ppos)
4686{
4687 struct drm_i915_private *dev_priv = filp->private_data;
4688 struct intel_crtc *intel_crtc;
4689 struct drm_device *dev = &dev_priv->drm;
4690 int ret;
4691 bool reset;
4692
4693 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4694 if (ret)
4695 return ret;
4696
4697 if (!reset)
4698 return cnt;
4699
4700 for_each_intel_crtc(dev, intel_crtc) {
4701 struct drm_crtc_commit *commit;
4702 struct intel_crtc_state *crtc_state;
4703
4704 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4705 if (ret)
4706 return ret;
4707
4708 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4709 commit = crtc_state->base.commit;
4710 if (commit) {
4711 ret = wait_for_completion_interruptible(&commit->hw_done);
4712 if (!ret)
4713 ret = wait_for_completion_interruptible(&commit->flip_done);
4714 }
4715
4716 if (!ret && crtc_state->base.active) {
4717 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4718 pipe_name(intel_crtc->pipe));
4719
4720 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4721 }
4722
4723 drm_modeset_unlock(&intel_crtc->base.mutex);
4724
4725 if (ret)
4726 return ret;
4727 }
4728
4729 ret = intel_fbc_reset_underrun(dev_priv);
4730 if (ret)
4731 return ret;
4732
4733 return cnt;
4734}
4735
4736static const struct file_operations i915_fifo_underrun_reset_ops = {
4737 .owner = THIS_MODULE,
4738 .open = simple_open,
4739 .write = i915_fifo_underrun_reset_write,
4740 .llseek = default_llseek,
4741};
4742
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004743static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004744 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004745 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00004746 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004747 {"i915_gem_stolen", i915_gem_stolen_list_info },
Chris Wilsona6172a82009-02-11 14:26:38 +00004748 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004749 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004750 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004751 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004752 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004753 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004754 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004755 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004756 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304757 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004758 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Michel Thierry061d06a2017-06-20 10:57:49 +01004759 {"i915_reset_info", i915_reset_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004760 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004761 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004762 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004763 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004764 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004765 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004766 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004767 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004768 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004769 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004770 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004771 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004772 {"i915_swizzle_info", i915_swizzle_info, 0},
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01004773 {"i915_ppgtt_info", i915_ppgtt_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004774 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004775 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004776 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004777 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004778 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004779 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004780 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004781 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004782 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004783 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004784 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004785 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004786 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004787 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004788 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004789 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304790 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004791 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004792};
Ben Gamari27c202a2009-07-01 22:26:52 -04004793#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004794
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004795static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004796 const char *name;
4797 const struct file_operations *fops;
4798} i915_debugfs_files[] = {
4799 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004800 {"i915_cache_sharing", &i915_cache_sharing_fops},
Chris Wilson094f9a52013-09-25 17:34:55 +01004801 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4802 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004803 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004804#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004805 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004806 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004807#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004808 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004809 {"i915_next_seqno", &i915_next_seqno_fops},
Damien Lespiaubd9db022013-10-15 18:55:36 +01004810 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004811 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4812 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4813 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004814 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004815 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4816 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304817 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004818 {"i915_guc_log_level", &i915_guc_log_level_fops},
4819 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304820 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304821 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004822 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4823 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004824};
4825
Chris Wilson1dac8912016-06-24 14:00:17 +01004826int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004827{
Chris Wilson91c8a322016-07-05 10:40:23 +01004828 struct drm_minor *minor = dev_priv->drm.primary;
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004829 struct dentry *ent;
Daniel Vetter34b96742013-07-04 20:49:44 +02004830 int ret, i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004831
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004832 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4833 minor->debugfs_root, to_i915(minor->dev),
4834 &i915_forcewake_fops);
4835 if (!ent)
4836 return -ENOMEM;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004837
Tomeu Vizoso731035f2016-12-12 13:29:48 +01004838 ret = intel_pipe_crc_create(minor);
4839 if (ret)
4840 return ret;
Damien Lespiau07144422013-10-15 18:55:40 +01004841
Daniel Vetter34b96742013-07-04 20:49:44 +02004842 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004843 ent = debugfs_create_file(i915_debugfs_files[i].name,
4844 S_IRUGO | S_IWUSR,
4845 minor->debugfs_root,
4846 to_i915(minor->dev),
Daniel Vetter34b96742013-07-04 20:49:44 +02004847 i915_debugfs_files[i].fops);
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004848 if (!ent)
4849 return -ENOMEM;
Daniel Vetter34b96742013-07-04 20:49:44 +02004850 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004851
Ben Gamari27c202a2009-07-01 22:26:52 -04004852 return drm_debugfs_create_files(i915_debugfs_list,
4853 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004854 minor->debugfs_root, minor);
4855}
4856
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004857struct dpcd_block {
4858 /* DPCD dump start address. */
4859 unsigned int offset;
4860 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4861 unsigned int end;
4862 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4863 size_t size;
4864 /* Only valid for eDP. */
4865 bool edp;
4866};
4867
4868static const struct dpcd_block i915_dpcd_debug[] = {
4869 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4870 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4871 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4872 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4873 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4874 { .offset = DP_SET_POWER },
4875 { .offset = DP_EDP_DPCD_REV },
4876 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4877 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4878 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4879};
4880
4881static int i915_dpcd_show(struct seq_file *m, void *data)
4882{
4883 struct drm_connector *connector = m->private;
4884 struct intel_dp *intel_dp =
4885 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4886 uint8_t buf[16];
4887 ssize_t err;
4888 int i;
4889
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004890 if (connector->status != connector_status_connected)
4891 return -ENODEV;
4892
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004893 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4894 const struct dpcd_block *b = &i915_dpcd_debug[i];
4895 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4896
4897 if (b->edp &&
4898 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4899 continue;
4900
4901 /* low tech for now */
4902 if (WARN_ON(size > sizeof(buf)))
4903 continue;
4904
4905 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4906 if (err <= 0) {
4907 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4908 size, b->offset, err);
4909 continue;
4910 }
4911
4912 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004913 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004914
4915 return 0;
4916}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004917DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004918
David Weinehallecbd6782016-08-23 12:23:56 +03004919static int i915_panel_show(struct seq_file *m, void *data)
4920{
4921 struct drm_connector *connector = m->private;
4922 struct intel_dp *intel_dp =
4923 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4924
4925 if (connector->status != connector_status_connected)
4926 return -ENODEV;
4927
4928 seq_printf(m, "Panel power up delay: %d\n",
4929 intel_dp->panel_power_up_delay);
4930 seq_printf(m, "Panel power down delay: %d\n",
4931 intel_dp->panel_power_down_delay);
4932 seq_printf(m, "Backlight on delay: %d\n",
4933 intel_dp->backlight_on_delay);
4934 seq_printf(m, "Backlight off delay: %d\n",
4935 intel_dp->backlight_off_delay);
4936
4937 return 0;
4938}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004939DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004940
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004941/**
4942 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4943 * @connector: pointer to a registered drm_connector
4944 *
4945 * Cleanup will be done by drm_connector_unregister() through a call to
4946 * drm_debugfs_connector_remove().
4947 *
4948 * Returns 0 on success, negative error codes on error.
4949 */
4950int i915_debugfs_connector_add(struct drm_connector *connector)
4951{
4952 struct dentry *root = connector->debugfs_entry;
4953
4954 /* The connector must have been registered beforehands. */
4955 if (!root)
4956 return -ENODEV;
4957
4958 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4959 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03004960 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4961 connector, &i915_dpcd_fops);
4962
4963 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4964 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4965 connector, &i915_panel_fops);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004966
4967 return 0;
4968}