blob: cb1a804bf72e5836e8bbfbfcc2db8d644bc7ce2b [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Chris Wilsonf3cd4742009-10-13 22:20:20 +010029#include <linux/debugfs.h>
Chris Wilsone637d2c2017-03-16 13:19:57 +000030#include <linux/sort.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010031#include <linux/sched/mm.h>
Simon Farnsworth4e5359c2010-09-01 17:47:52 +010032#include "intel_drv.h"
Sagar Arun Kamblea2695742017-11-16 19:02:41 +053033#include "intel_guc_submission.h"
Ben Gamari20172632009-02-17 20:08:50 -050034
David Weinehall36cdd012016-08-22 13:59:31 +030035static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36{
37 return to_i915(node->minor->dev);
38}
39
Chris Wilson70d39fe2010-08-25 16:03:34 +010040static int i915_capabilities(struct seq_file *m, void *data)
41{
David Weinehall36cdd012016-08-22 13:59:31 +030042 struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000044 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010045
David Weinehall36cdd012016-08-22 13:59:31 +030046 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020047 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030048 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000049
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000050 intel_device_info_dump_flags(info, &p);
Michal Wajdeczko5fbbe8d2017-12-21 21:57:34 +000051 intel_device_info_dump_runtime(info, &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000052 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010053
Chris Wilson418e3cd2017-02-06 21:36:08 +000054 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000055 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000056 kernel_param_unlock(THIS_MODULE);
57
Chris Wilson70d39fe2010-08-25 16:03:34 +010058 return 0;
59}
Ben Gamari433e12f2009-02-17 20:08:51 -050060
Imre Deaka7363de2016-05-12 16:18:52 +030061static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000062{
Chris Wilson573adb32016-08-04 16:32:39 +010063 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000064}
65
Imre Deaka7363de2016-05-12 16:18:52 +030066static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010067{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010068 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010069}
70
Imre Deaka7363de2016-05-12 16:18:52 +030071static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000072{
Chris Wilson3e510a82016-08-05 10:14:23 +010073 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040074 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010075 case I915_TILING_NONE: return ' ';
76 case I915_TILING_X: return 'X';
77 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040078 }
Chris Wilsona6172a82009-02-11 14:26:38 +000079}
80
Imre Deaka7363de2016-05-12 16:18:52 +030081static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070082{
Chris Wilsona65adaf2017-10-09 09:43:57 +010083 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010084}
85
Imre Deaka7363de2016-05-12 16:18:52 +030086static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010087{
Chris Wilsona4f5ea62016-10-28 13:58:35 +010088 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -070089}
90
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010091static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92{
93 u64 size = 0;
94 struct i915_vma *vma;
95
Chris Wilsone2189dd2017-12-07 21:14:07 +000096 for_each_ggtt_vma(vma, obj) {
97 if (drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010098 size += vma->node.size;
99 }
100
101 return size;
102}
103
Matthew Auld7393b7e2017-10-06 23:18:28 +0100104static const char *
105stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106{
107 size_t x = 0;
108
109 switch (page_sizes) {
110 case 0:
111 return "";
112 case I915_GTT_PAGE_SIZE_4K:
113 return "4K";
114 case I915_GTT_PAGE_SIZE_64K:
115 return "64K";
116 case I915_GTT_PAGE_SIZE_2M:
117 return "2M";
118 default:
119 if (!buf)
120 return "M";
121
122 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 x += snprintf(buf + x, len - x, "2M, ");
124 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 x += snprintf(buf + x, len - x, "64K, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 x += snprintf(buf + x, len - x, "4K, ");
128 buf[x-2] = '\0';
129
130 return buf;
131 }
132}
133
Chris Wilson37811fc2010-08-25 22:45:57 +0100134static void
135describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136{
Chris Wilsonb4716182015-04-27 13:41:17 +0100137 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000138 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700139 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100140 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800141 int pin_count = 0;
142
Chris Wilson188c1ab2016-04-03 14:14:20 +0100143 lockdep_assert_held(&obj->base.dev->struct_mutex);
144
Chris Wilsond07f0e52016-10-28 13:58:44 +0100145 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100146 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100147 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100148 get_pin_flag(obj),
149 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700150 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100151 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800152 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100153 obj->read_domains,
154 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300155 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100156 obj->mm.dirty ? " dirty" : "",
157 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100158 if (obj->base.name)
159 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000160 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100161 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800162 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300163 }
164 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100165 if (obj->pin_global)
166 seq_printf(m, " (global)");
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000167 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100168 if (!drm_mm_node_allocated(&vma->node))
169 continue;
170
Matthew Auld7393b7e2017-10-06 23:18:28 +0100171 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100172 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100173 vma->node.start, vma->node.size,
174 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000175 if (i915_vma_is_ggtt(vma)) {
176 switch (vma->ggtt_view.type) {
177 case I915_GGTT_VIEW_NORMAL:
178 seq_puts(m, ", normal");
179 break;
180
181 case I915_GGTT_VIEW_PARTIAL:
182 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000183 vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000185 break;
186
187 case I915_GGTT_VIEW_ROTATED:
188 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000189 vma->ggtt_view.rotated.plane[0].width,
190 vma->ggtt_view.rotated.plane[0].height,
191 vma->ggtt_view.rotated.plane[0].stride,
192 vma->ggtt_view.rotated.plane[0].offset,
193 vma->ggtt_view.rotated.plane[1].width,
194 vma->ggtt_view.rotated.plane[1].height,
195 vma->ggtt_view.rotated.plane[1].stride,
196 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000197 break;
198
199 default:
200 MISSING_CASE(vma->ggtt_view.type);
201 break;
202 }
203 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100204 if (vma->fence)
205 seq_printf(m, " , fence: %d%s",
206 vma->fence->id,
207 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000208 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700209 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000210 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100211 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100212
Chris Wilsond07f0e52016-10-28 13:58:44 +0100213 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100214 if (engine)
215 seq_printf(m, " (%s)", engine->name);
216
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100217 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 if (frontbuffer_bits)
219 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100220}
221
Chris Wilsone637d2c2017-03-16 13:19:57 +0000222static int obj_rank_by_stolen(const void *A, const void *B)
Chris Wilson6d2b88852013-08-07 18:30:54 +0100223{
Chris Wilsone637d2c2017-03-16 13:19:57 +0000224 const struct drm_i915_gem_object *a =
225 *(const struct drm_i915_gem_object **)A;
226 const struct drm_i915_gem_object *b =
227 *(const struct drm_i915_gem_object **)B;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100228
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200229 if (a->stolen->start < b->stolen->start)
230 return -1;
231 if (a->stolen->start > b->stolen->start)
232 return 1;
233 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100234}
235
236static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237{
David Weinehall36cdd012016-08-22 13:59:31 +0300238 struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000240 struct drm_i915_gem_object **objects;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100241 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300242 u64 total_obj_size, total_gtt_size;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000243 unsigned long total, count, n;
244 int ret;
245
246 total = READ_ONCE(dev_priv->mm.object_count);
Michal Hocko20981052017-05-17 14:23:12 +0200247 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000248 if (!objects)
249 return -ENOMEM;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100250
251 ret = mutex_lock_interruptible(&dev->struct_mutex);
252 if (ret)
Chris Wilsone637d2c2017-03-16 13:19:57 +0000253 goto out;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100254
255 total_obj_size = total_gtt_size = count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100256
257 spin_lock(&dev_priv->mm.obj_lock);
258 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000259 if (count == total)
260 break;
261
Chris Wilson6d2b88852013-08-07 18:30:54 +0100262 if (obj->stolen == NULL)
263 continue;
264
Chris Wilsone637d2c2017-03-16 13:19:57 +0000265 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100266 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100267 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000268
Chris Wilson6d2b88852013-08-07 18:30:54 +0100269 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100270 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000271 if (count == total)
272 break;
273
Chris Wilson6d2b88852013-08-07 18:30:54 +0100274 if (obj->stolen == NULL)
275 continue;
276
Chris Wilsone637d2c2017-03-16 13:19:57 +0000277 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100278 total_obj_size += obj->base.size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100279 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100280 spin_unlock(&dev_priv->mm.obj_lock);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100281
Chris Wilsone637d2c2017-03-16 13:19:57 +0000282 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
284 seq_puts(m, "Stolen:\n");
285 for (n = 0; n < count; n++) {
286 seq_puts(m, " ");
287 describe_obj(m, objects[n]);
288 seq_putc(m, '\n');
289 }
290 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100291 count, total_obj_size, total_gtt_size);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000292
293 mutex_unlock(&dev->struct_mutex);
294out:
Michal Hocko20981052017-05-17 14:23:12 +0200295 kvfree(objects);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000296 return ret;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100297}
298
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100299struct file_stats {
Chris Wilson6313c202014-03-19 13:45:45 +0000300 struct drm_i915_file_private *file_priv;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300301 unsigned long count;
302 u64 total, unbound;
303 u64 global, shared;
304 u64 active, inactive;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100305};
306
307static int per_file_stats(int id, void *ptr, void *data)
308{
309 struct drm_i915_gem_object *obj = ptr;
310 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000311 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100312
Chris Wilson0caf81b2017-06-17 12:57:44 +0100313 lockdep_assert_held(&obj->base.dev->struct_mutex);
314
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100315 stats->count++;
316 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100317 if (!obj->bind_count)
318 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000319 if (obj->base.name || obj->base.dma_buf)
320 stats->shared += obj->base.size;
321
Chris Wilson894eeec2016-08-04 07:52:20 +0100322 list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 if (!drm_mm_node_allocated(&vma->node))
324 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000325
Chris Wilson3272db52016-08-04 16:32:32 +0100326 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100327 stats->global += vma->node.size;
328 } else {
329 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
Chris Wilson6313c202014-03-19 13:45:45 +0000330
Chris Wilson2bfa9962016-08-04 07:52:25 +0100331 if (ppgtt->base.file != stats->file_priv)
Chris Wilson6313c202014-03-19 13:45:45 +0000332 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000333 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100334
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100335 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100336 stats->active += vma->node.size;
337 else
338 stats->inactive += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100339 }
340
341 return 0;
342}
343
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100344#define print_file_stats(m, name, stats) do { \
345 if (stats.count) \
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300346 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100347 name, \
348 stats.count, \
349 stats.total, \
350 stats.active, \
351 stats.inactive, \
352 stats.global, \
353 stats.shared, \
354 stats.unbound); \
355} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800356
357static void print_batch_pool_stats(struct seq_file *m,
358 struct drm_i915_private *dev_priv)
359{
360 struct drm_i915_gem_object *obj;
361 struct file_stats stats;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000362 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530363 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000364 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800365
366 memset(&stats, 0, sizeof(stats));
367
Akash Goel3b3f1652016-10-13 22:44:48 +0530368 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000369 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100370 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000371 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100372 batch_pool_link)
373 per_file_stats(0, obj, &stats);
374 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100375 }
Brad Volkin493018d2014-12-11 12:13:08 -0800376
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100377 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800378}
379
Chris Wilson15da9562016-05-24 14:53:43 +0100380static int per_file_ctx_stats(int id, void *ptr, void *data)
381{
382 struct i915_gem_context *ctx = ptr;
383 int n;
384
385 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
386 if (ctx->engine[n].state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +0100387 per_file_stats(0, ctx->engine[n].state->obj, data);
Chris Wilsondca33ec2016-08-02 22:50:20 +0100388 if (ctx->engine[n].ring)
Chris Wilson57e88532016-08-15 10:48:57 +0100389 per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
Chris Wilson15da9562016-05-24 14:53:43 +0100390 }
391
392 return 0;
393}
394
395static void print_context_stats(struct seq_file *m,
396 struct drm_i915_private *dev_priv)
397{
David Weinehall36cdd012016-08-22 13:59:31 +0300398 struct drm_device *dev = &dev_priv->drm;
Chris Wilson15da9562016-05-24 14:53:43 +0100399 struct file_stats stats;
400 struct drm_file *file;
401
402 memset(&stats, 0, sizeof(stats));
403
David Weinehall36cdd012016-08-22 13:59:31 +0300404 mutex_lock(&dev->struct_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100405 if (dev_priv->kernel_context)
406 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
407
David Weinehall36cdd012016-08-22 13:59:31 +0300408 list_for_each_entry(file, &dev->filelist, lhead) {
Chris Wilson15da9562016-05-24 14:53:43 +0100409 struct drm_i915_file_private *fpriv = file->driver_priv;
410 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
411 }
David Weinehall36cdd012016-08-22 13:59:31 +0300412 mutex_unlock(&dev->struct_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100413
414 print_file_stats(m, "[k]contexts", stats);
415}
416
David Weinehall36cdd012016-08-22 13:59:31 +0300417static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100418{
David Weinehall36cdd012016-08-22 13:59:31 +0300419 struct drm_i915_private *dev_priv = node_to_i915(m->private);
420 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300421 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100422 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
423 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000424 struct drm_i915_gem_object *obj;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100425 unsigned int page_sizes = 0;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100426 struct drm_file *file;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100427 char buf[80];
Chris Wilson73aa8082010-09-30 11:46:12 +0100428 int ret;
429
430 ret = mutex_lock_interruptible(&dev->struct_mutex);
431 if (ret)
432 return ret;
433
Chris Wilson3ef7f222016-10-18 13:02:48 +0100434 seq_printf(m, "%u objects, %llu bytes\n",
Chris Wilson6299f992010-11-24 12:23:44 +0000435 dev_priv->mm.object_count,
436 dev_priv->mm.object_memory);
437
Chris Wilson1544c422016-08-15 13:18:16 +0100438 size = count = 0;
439 mapped_size = mapped_count = 0;
440 purgeable_size = purgeable_count = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100441 huge_size = huge_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100442
443 spin_lock(&dev_priv->mm.obj_lock);
444 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100445 size += obj->base.size;
446 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200447
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100448 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200449 purgeable_size += obj->base.size;
450 ++purgeable_count;
451 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100452
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100453 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100454 mapped_count++;
455 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100456 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100457
458 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
459 huge_count++;
460 huge_size += obj->base.size;
461 page_sizes |= obj->mm.page_sizes.sg;
462 }
Chris Wilson6299f992010-11-24 12:23:44 +0000463 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100464 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
465
466 size = count = dpy_size = dpy_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100467 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100468 size += obj->base.size;
469 ++count;
470
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100471 if (obj->pin_global) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100472 dpy_size += obj->base.size;
473 ++dpy_count;
474 }
475
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100476 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100477 purgeable_size += obj->base.size;
478 ++purgeable_count;
479 }
480
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100481 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100482 mapped_count++;
483 mapped_size += obj->base.size;
484 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100485
486 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
487 huge_count++;
488 huge_size += obj->base.size;
489 page_sizes |= obj->mm.page_sizes.sg;
490 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100491 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100492 spin_unlock(&dev_priv->mm.obj_lock);
493
Chris Wilson2bd160a2016-08-15 10:48:45 +0100494 seq_printf(m, "%u bound objects, %llu bytes\n",
495 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300496 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200497 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100498 seq_printf(m, "%u mapped objects, %llu bytes\n",
499 mapped_count, mapped_size);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100500 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
501 huge_count,
502 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
503 huge_size);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100504 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
Chris Wilson2bd160a2016-08-15 10:48:45 +0100505 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000506
Matthew Auldb7128ef2017-12-11 15:18:22 +0000507 seq_printf(m, "%llu [%pa] gtt total\n",
508 ggtt->base.total, &ggtt->mappable_end);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100509 seq_printf(m, "Supported page sizes: %s\n",
510 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
511 buf, sizeof(buf)));
Chris Wilson73aa8082010-09-30 11:46:12 +0100512
Damien Lespiau267f0c92013-06-24 22:59:48 +0100513 seq_putc(m, '\n');
Brad Volkin493018d2014-12-11 12:13:08 -0800514 print_batch_pool_stats(m, dev_priv);
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200515 mutex_unlock(&dev->struct_mutex);
516
517 mutex_lock(&dev->filelist_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100518 print_context_stats(m, dev_priv);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100519 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
520 struct file_stats stats;
Chris Wilsonc84455b2016-08-15 10:49:08 +0100521 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsone61e0f52018-02-21 09:56:36 +0000522 struct i915_request *request;
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900523 struct task_struct *task;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100524
Chris Wilson0caf81b2017-06-17 12:57:44 +0100525 mutex_lock(&dev->struct_mutex);
526
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100527 memset(&stats, 0, sizeof(stats));
Chris Wilson6313c202014-03-19 13:45:45 +0000528 stats.file_priv = file->driver_priv;
Chris Wilson5b5ffff2014-06-17 09:56:24 +0100529 spin_lock(&file->table_lock);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100530 idr_for_each(&file->object_idr, per_file_stats, &stats);
Chris Wilson5b5ffff2014-06-17 09:56:24 +0100531 spin_unlock(&file->table_lock);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900532 /*
533 * Although we have a valid reference on file->pid, that does
534 * not guarantee that the task_struct who called get_pid() is
535 * still alive (e.g. get_pid(current) => fork() => exit()).
536 * Therefore, we need to protect this ->comm access using RCU.
537 */
Chris Wilsonc84455b2016-08-15 10:49:08 +0100538 request = list_first_entry_or_null(&file_priv->mm.request_list,
Chris Wilsone61e0f52018-02-21 09:56:36 +0000539 struct i915_request,
Chris Wilsonc8659ef2017-03-02 12:25:25 +0000540 client_link);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900541 rcu_read_lock();
Chris Wilsonc84455b2016-08-15 10:49:08 +0100542 task = pid_task(request && request->ctx->pid ?
543 request->ctx->pid : file->pid,
544 PIDTYPE_PID);
Brad Volkin493018d2014-12-11 12:13:08 -0800545 print_file_stats(m, task ? task->comm : "<unknown>", stats);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900546 rcu_read_unlock();
Chris Wilson0caf81b2017-06-17 12:57:44 +0100547
Chris Wilsonc84455b2016-08-15 10:49:08 +0100548 mutex_unlock(&dev->struct_mutex);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100549 }
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200550 mutex_unlock(&dev->filelist_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100551
552 return 0;
553}
554
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100555static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000556{
Damien Lespiau9f25d002014-05-13 15:30:28 +0100557 struct drm_info_node *node = m->private;
David Weinehall36cdd012016-08-22 13:59:31 +0300558 struct drm_i915_private *dev_priv = node_to_i915(node);
559 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonf2123812017-10-16 12:40:37 +0100560 struct drm_i915_gem_object **objects;
Chris Wilson08c18322011-01-10 00:00:24 +0000561 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300562 u64 total_obj_size, total_gtt_size;
Chris Wilsonf2123812017-10-16 12:40:37 +0100563 unsigned long nobject, n;
Chris Wilson08c18322011-01-10 00:00:24 +0000564 int count, ret;
565
Chris Wilsonf2123812017-10-16 12:40:37 +0100566 nobject = READ_ONCE(dev_priv->mm.object_count);
567 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
568 if (!objects)
569 return -ENOMEM;
570
Chris Wilson08c18322011-01-10 00:00:24 +0000571 ret = mutex_lock_interruptible(&dev->struct_mutex);
572 if (ret)
573 return ret;
574
Chris Wilsonf2123812017-10-16 12:40:37 +0100575 count = 0;
576 spin_lock(&dev_priv->mm.obj_lock);
577 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
578 objects[count++] = obj;
579 if (count == nobject)
580 break;
581 }
582 spin_unlock(&dev_priv->mm.obj_lock);
583
584 total_obj_size = total_gtt_size = 0;
585 for (n = 0; n < count; n++) {
586 obj = objects[n];
587
Damien Lespiau267f0c92013-06-24 22:59:48 +0100588 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000589 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100590 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000591 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100592 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000593 }
594
595 mutex_unlock(&dev->struct_mutex);
596
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300597 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson08c18322011-01-10 00:00:24 +0000598 count, total_obj_size, total_gtt_size);
Chris Wilsonf2123812017-10-16 12:40:37 +0100599 kvfree(objects);
Chris Wilson08c18322011-01-10 00:00:24 +0000600
601 return 0;
602}
603
Brad Volkin493018d2014-12-11 12:13:08 -0800604static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
605{
David Weinehall36cdd012016-08-22 13:59:31 +0300606 struct drm_i915_private *dev_priv = node_to_i915(m->private);
607 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800608 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000609 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530610 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100611 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000612 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800613
614 ret = mutex_lock_interruptible(&dev->struct_mutex);
615 if (ret)
616 return ret;
617
Akash Goel3b3f1652016-10-13 22:44:48 +0530618 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000619 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100620 int count;
621
622 count = 0;
623 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000624 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100625 batch_pool_link)
626 count++;
627 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000628 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100629
630 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000631 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100632 batch_pool_link) {
633 seq_puts(m, " ");
634 describe_obj(m, obj);
635 seq_putc(m, '\n');
636 }
637
638 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100639 }
Brad Volkin493018d2014-12-11 12:13:08 -0800640 }
641
Chris Wilson8d9d5742015-04-07 16:20:38 +0100642 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800643
644 mutex_unlock(&dev->struct_mutex);
645
646 return 0;
647}
648
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200649static void gen8_display_interrupt_info(struct seq_file *m)
650{
651 struct drm_i915_private *dev_priv = node_to_i915(m->private);
652 int pipe;
653
654 for_each_pipe(dev_priv, pipe) {
655 enum intel_display_power_domain power_domain;
656
657 power_domain = POWER_DOMAIN_PIPE(pipe);
658 if (!intel_display_power_get_if_enabled(dev_priv,
659 power_domain)) {
660 seq_printf(m, "Pipe %c power disabled\n",
661 pipe_name(pipe));
662 continue;
663 }
664 seq_printf(m, "Pipe %c IMR:\t%08x\n",
665 pipe_name(pipe),
666 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
667 seq_printf(m, "Pipe %c IIR:\t%08x\n",
668 pipe_name(pipe),
669 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
670 seq_printf(m, "Pipe %c IER:\t%08x\n",
671 pipe_name(pipe),
672 I915_READ(GEN8_DE_PIPE_IER(pipe)));
673
674 intel_display_power_put(dev_priv, power_domain);
675 }
676
677 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
678 I915_READ(GEN8_DE_PORT_IMR));
679 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
680 I915_READ(GEN8_DE_PORT_IIR));
681 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
682 I915_READ(GEN8_DE_PORT_IER));
683
684 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
685 I915_READ(GEN8_DE_MISC_IMR));
686 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
687 I915_READ(GEN8_DE_MISC_IIR));
688 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
689 I915_READ(GEN8_DE_MISC_IER));
690
691 seq_printf(m, "PCU interrupt mask:\t%08x\n",
692 I915_READ(GEN8_PCU_IMR));
693 seq_printf(m, "PCU interrupt identity:\t%08x\n",
694 I915_READ(GEN8_PCU_IIR));
695 seq_printf(m, "PCU interrupt enable:\t%08x\n",
696 I915_READ(GEN8_PCU_IER));
697}
698
Ben Gamari20172632009-02-17 20:08:50 -0500699static int i915_interrupt_info(struct seq_file *m, void *data)
700{
David Weinehall36cdd012016-08-22 13:59:31 +0300701 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000702 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530703 enum intel_engine_id id;
Chris Wilson4bb05042016-09-03 07:53:43 +0100704 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100705
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200706 intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500707
David Weinehall36cdd012016-08-22 13:59:31 +0300708 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300709 seq_printf(m, "Master Interrupt Control:\t%08x\n",
710 I915_READ(GEN8_MASTER_IRQ));
711
712 seq_printf(m, "Display IER:\t%08x\n",
713 I915_READ(VLV_IER));
714 seq_printf(m, "Display IIR:\t%08x\n",
715 I915_READ(VLV_IIR));
716 seq_printf(m, "Display IIR_RW:\t%08x\n",
717 I915_READ(VLV_IIR_RW));
718 seq_printf(m, "Display IMR:\t%08x\n",
719 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100720 for_each_pipe(dev_priv, pipe) {
721 enum intel_display_power_domain power_domain;
722
723 power_domain = POWER_DOMAIN_PIPE(pipe);
724 if (!intel_display_power_get_if_enabled(dev_priv,
725 power_domain)) {
726 seq_printf(m, "Pipe %c power disabled\n",
727 pipe_name(pipe));
728 continue;
729 }
730
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300731 seq_printf(m, "Pipe %c stat:\t%08x\n",
732 pipe_name(pipe),
733 I915_READ(PIPESTAT(pipe)));
734
Chris Wilson9c870d02016-10-24 13:42:15 +0100735 intel_display_power_put(dev_priv, power_domain);
736 }
737
738 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300739 seq_printf(m, "Port hotplug:\t%08x\n",
740 I915_READ(PORT_HOTPLUG_EN));
741 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
742 I915_READ(VLV_DPFLIPSTAT));
743 seq_printf(m, "DPINVGTT:\t%08x\n",
744 I915_READ(DPINVGTT));
Chris Wilson9c870d02016-10-24 13:42:15 +0100745 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300746
747 for (i = 0; i < 4; i++) {
748 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
749 i, I915_READ(GEN8_GT_IMR(i)));
750 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
751 i, I915_READ(GEN8_GT_IIR(i)));
752 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
753 i, I915_READ(GEN8_GT_IER(i)));
754 }
755
756 seq_printf(m, "PCU interrupt mask:\t%08x\n",
757 I915_READ(GEN8_PCU_IMR));
758 seq_printf(m, "PCU interrupt identity:\t%08x\n",
759 I915_READ(GEN8_PCU_IIR));
760 seq_printf(m, "PCU interrupt enable:\t%08x\n",
761 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200762 } else if (INTEL_GEN(dev_priv) >= 11) {
763 seq_printf(m, "Master Interrupt Control: %08x\n",
764 I915_READ(GEN11_GFX_MSTR_IRQ));
765
766 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
767 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
768 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
769 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
770 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
771 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
772 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
773 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
774 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
775 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
776 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
777 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
778
779 seq_printf(m, "Display Interrupt Control:\t%08x\n",
780 I915_READ(GEN11_DISPLAY_INT_CTL));
781
782 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300783 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700784 seq_printf(m, "Master Interrupt Control:\t%08x\n",
785 I915_READ(GEN8_MASTER_IRQ));
786
787 for (i = 0; i < 4; i++) {
788 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
789 i, I915_READ(GEN8_GT_IMR(i)));
790 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
791 i, I915_READ(GEN8_GT_IIR(i)));
792 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
793 i, I915_READ(GEN8_GT_IER(i)));
794 }
795
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200796 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300797 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700798 seq_printf(m, "Display IER:\t%08x\n",
799 I915_READ(VLV_IER));
800 seq_printf(m, "Display IIR:\t%08x\n",
801 I915_READ(VLV_IIR));
802 seq_printf(m, "Display IIR_RW:\t%08x\n",
803 I915_READ(VLV_IIR_RW));
804 seq_printf(m, "Display IMR:\t%08x\n",
805 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000806 for_each_pipe(dev_priv, pipe) {
807 enum intel_display_power_domain power_domain;
808
809 power_domain = POWER_DOMAIN_PIPE(pipe);
810 if (!intel_display_power_get_if_enabled(dev_priv,
811 power_domain)) {
812 seq_printf(m, "Pipe %c power disabled\n",
813 pipe_name(pipe));
814 continue;
815 }
816
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700817 seq_printf(m, "Pipe %c stat:\t%08x\n",
818 pipe_name(pipe),
819 I915_READ(PIPESTAT(pipe)));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000820 intel_display_power_put(dev_priv, power_domain);
821 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700822
823 seq_printf(m, "Master IER:\t%08x\n",
824 I915_READ(VLV_MASTER_IER));
825
826 seq_printf(m, "Render IER:\t%08x\n",
827 I915_READ(GTIER));
828 seq_printf(m, "Render IIR:\t%08x\n",
829 I915_READ(GTIIR));
830 seq_printf(m, "Render IMR:\t%08x\n",
831 I915_READ(GTIMR));
832
833 seq_printf(m, "PM IER:\t\t%08x\n",
834 I915_READ(GEN6_PMIER));
835 seq_printf(m, "PM IIR:\t\t%08x\n",
836 I915_READ(GEN6_PMIIR));
837 seq_printf(m, "PM IMR:\t\t%08x\n",
838 I915_READ(GEN6_PMIMR));
839
840 seq_printf(m, "Port hotplug:\t%08x\n",
841 I915_READ(PORT_HOTPLUG_EN));
842 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
843 I915_READ(VLV_DPFLIPSTAT));
844 seq_printf(m, "DPINVGTT:\t%08x\n",
845 I915_READ(DPINVGTT));
846
David Weinehall36cdd012016-08-22 13:59:31 +0300847 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800848 seq_printf(m, "Interrupt enable: %08x\n",
849 I915_READ(IER));
850 seq_printf(m, "Interrupt identity: %08x\n",
851 I915_READ(IIR));
852 seq_printf(m, "Interrupt mask: %08x\n",
853 I915_READ(IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100854 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800855 seq_printf(m, "Pipe %c stat: %08x\n",
856 pipe_name(pipe),
857 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800858 } else {
859 seq_printf(m, "North Display Interrupt enable: %08x\n",
860 I915_READ(DEIER));
861 seq_printf(m, "North Display Interrupt identity: %08x\n",
862 I915_READ(DEIIR));
863 seq_printf(m, "North Display Interrupt mask: %08x\n",
864 I915_READ(DEIMR));
865 seq_printf(m, "South Display Interrupt enable: %08x\n",
866 I915_READ(SDEIER));
867 seq_printf(m, "South Display Interrupt identity: %08x\n",
868 I915_READ(SDEIIR));
869 seq_printf(m, "South Display Interrupt mask: %08x\n",
870 I915_READ(SDEIMR));
871 seq_printf(m, "Graphics Interrupt enable: %08x\n",
872 I915_READ(GTIER));
873 seq_printf(m, "Graphics Interrupt identity: %08x\n",
874 I915_READ(GTIIR));
875 seq_printf(m, "Graphics Interrupt mask: %08x\n",
876 I915_READ(GTIMR));
877 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200878
879 if (INTEL_GEN(dev_priv) >= 11) {
880 seq_printf(m, "RCS Intr Mask:\t %08x\n",
881 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
882 seq_printf(m, "BCS Intr Mask:\t %08x\n",
883 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
884 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
885 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
886 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
887 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
888 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
889 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
890 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
891 I915_READ(GEN11_GUC_SG_INTR_MASK));
892 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
893 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
894 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
895 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
896 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
897 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
898
899 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsond5acadf2017-12-09 10:44:18 +0000900 for_each_engine(engine, dev_priv, id) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100901 seq_printf(m,
902 "Graphics Interrupt mask (%s): %08x\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000903 engine->name, I915_READ_IMR(engine));
Chris Wilson9862e602011-01-04 22:22:17 +0000904 }
Chris Wilson9862e602011-01-04 22:22:17 +0000905 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200906
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200907 intel_runtime_pm_put(dev_priv);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100908
Ben Gamari20172632009-02-17 20:08:50 -0500909 return 0;
910}
911
Chris Wilsona6172a82009-02-11 14:26:38 +0000912static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
913{
David Weinehall36cdd012016-08-22 13:59:31 +0300914 struct drm_i915_private *dev_priv = node_to_i915(m->private);
915 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100916 int i, ret;
917
918 ret = mutex_lock_interruptible(&dev->struct_mutex);
919 if (ret)
920 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000921
Chris Wilsona6172a82009-02-11 14:26:38 +0000922 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
923 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100924 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000925
Chris Wilson6c085a72012-08-20 11:40:46 +0200926 seq_printf(m, "Fence %d, pin count = %d, object = ",
927 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100928 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100929 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100930 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100931 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100932 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000933 }
934
Chris Wilson05394f32010-11-08 19:18:58 +0000935 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000936 return 0;
937}
938
Chris Wilson98a2f412016-10-12 10:05:18 +0100939#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000940static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
941 size_t count, loff_t *pos)
942{
943 struct i915_gpu_state *error = file->private_data;
944 struct drm_i915_error_state_buf str;
945 ssize_t ret;
946 loff_t tmp;
947
948 if (!error)
949 return 0;
950
951 ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
952 if (ret)
953 return ret;
954
955 ret = i915_error_state_to_str(&str, error);
956 if (ret)
957 goto out;
958
959 tmp = 0;
960 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
961 if (ret < 0)
962 goto out;
963
964 *pos = str.start + ret;
965out:
966 i915_error_state_buf_release(&str);
967 return ret;
968}
969
970static int gpu_state_release(struct inode *inode, struct file *file)
971{
972 i915_gpu_state_put(file->private_data);
973 return 0;
974}
975
976static int i915_gpu_info_open(struct inode *inode, struct file *file)
977{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100978 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000979 struct i915_gpu_state *gpu;
980
Chris Wilson090e5fe2017-03-28 14:14:07 +0100981 intel_runtime_pm_get(i915);
982 gpu = i915_capture_gpu_state(i915);
983 intel_runtime_pm_put(i915);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000984 if (!gpu)
985 return -ENOMEM;
986
987 file->private_data = gpu;
988 return 0;
989}
990
991static const struct file_operations i915_gpu_info_fops = {
992 .owner = THIS_MODULE,
993 .open = i915_gpu_info_open,
994 .read = gpu_state_read,
995 .llseek = default_llseek,
996 .release = gpu_state_release,
997};
Chris Wilson98a2f412016-10-12 10:05:18 +0100998
Daniel Vetterd5442302012-04-27 15:17:40 +0200999static ssize_t
1000i915_error_state_write(struct file *filp,
1001 const char __user *ubuf,
1002 size_t cnt,
1003 loff_t *ppos)
1004{
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001005 struct i915_gpu_state *error = filp->private_data;
1006
1007 if (!error)
1008 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001009
1010 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001011 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +02001012
1013 return cnt;
1014}
1015
1016static int i915_error_state_open(struct inode *inode, struct file *file)
1017{
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001018 file->private_data = i915_first_error_state(inode->i_private);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001019 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001020}
1021
Daniel Vetterd5442302012-04-27 15:17:40 +02001022static const struct file_operations i915_error_state_fops = {
1023 .owner = THIS_MODULE,
1024 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001025 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +02001026 .write = i915_error_state_write,
1027 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001028 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +02001029};
Chris Wilson98a2f412016-10-12 10:05:18 +01001030#endif
1031
Kees Cook647416f2013-03-10 14:10:06 -07001032static int
Kees Cook647416f2013-03-10 14:10:06 -07001033i915_next_seqno_set(void *data, u64 val)
Mika Kuoppala40633212012-12-04 15:12:00 +02001034{
David Weinehall36cdd012016-08-22 13:59:31 +03001035 struct drm_i915_private *dev_priv = data;
1036 struct drm_device *dev = &dev_priv->drm;
Mika Kuoppala40633212012-12-04 15:12:00 +02001037 int ret;
1038
Mika Kuoppala40633212012-12-04 15:12:00 +02001039 ret = mutex_lock_interruptible(&dev->struct_mutex);
1040 if (ret)
1041 return ret;
1042
Chris Wilson65c475c2018-01-02 15:12:31 +00001043 intel_runtime_pm_get(dev_priv);
Chris Wilson73cb9702016-10-28 13:58:46 +01001044 ret = i915_gem_set_global_seqno(dev, val);
Chris Wilson65c475c2018-01-02 15:12:31 +00001045 intel_runtime_pm_put(dev_priv);
1046
Mika Kuoppala40633212012-12-04 15:12:00 +02001047 mutex_unlock(&dev->struct_mutex);
1048
Kees Cook647416f2013-03-10 14:10:06 -07001049 return ret;
Mika Kuoppala40633212012-12-04 15:12:00 +02001050}
1051
Kees Cook647416f2013-03-10 14:10:06 -07001052DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
Chris Wilson9b6586a2017-02-23 07:44:08 +00001053 NULL, i915_next_seqno_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03001054 "0x%llx\n");
Mika Kuoppala40633212012-12-04 15:12:00 +02001055
Deepak Sadb4bd12014-03-31 11:30:02 +05301056static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001057{
David Weinehall36cdd012016-08-22 13:59:31 +03001058 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001059 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001060 int ret = 0;
1061
1062 intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001063
David Weinehall36cdd012016-08-22 13:59:31 +03001064 if (IS_GEN5(dev_priv)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001065 u16 rgvswctl = I915_READ16(MEMSWCTL);
1066 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1067
1068 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1069 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1070 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1071 MEMSTAT_VID_SHIFT);
1072 seq_printf(m, "Current P-state: %d\n",
1073 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001074 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001075 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -08001076
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001077 mutex_lock(&dev_priv->pcu_lock);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001078
1079 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1080 seq_printf(m, "Video Turbo Mode: %s\n",
1081 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1082 seq_printf(m, "HW control enabled: %s\n",
1083 yesno(rpmodectl & GEN6_RP_ENABLE));
1084 seq_printf(m, "SW control enabled: %s\n",
1085 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1086 GEN6_RP_MEDIA_SW_MODE));
1087
Wayne Boyer666a4532015-12-09 12:29:35 -08001088 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1089 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1090 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1091
1092 seq_printf(m, "actual GPU freq: %d MHz\n",
1093 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1094
1095 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001096 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001097
1098 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001099 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001100
1101 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001102 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001103
1104 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001105 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001106
1107 seq_printf(m,
1108 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001109 intel_gpu_freq(dev_priv, rps->efficient_freq));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001110 mutex_unlock(&dev_priv->pcu_lock);
David Weinehall36cdd012016-08-22 13:59:31 +03001111 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001112 u32 rp_state_limits;
1113 u32 gt_perf_status;
1114 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001115 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001116 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001117 u32 rpupei, rpcurup, rpprevup;
1118 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001119 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001120 int max_freq;
1121
Bob Paauwe35040562015-06-25 14:54:07 -07001122 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001123 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001124 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1125 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1126 } else {
1127 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1128 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1129 }
1130
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001131 /* RPSTAT1 is in the GT power well */
Mika Kuoppala59bad942015-01-16 11:34:40 +02001132 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001133
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001134 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001135 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +05301136 reqf >>= 23;
1137 else {
1138 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001139 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301140 reqf >>= 24;
1141 else
1142 reqf >>= 25;
1143 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001144 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001145
Chris Wilson0d8f9492014-03-27 09:06:14 +00001146 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1147 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1148 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1149
Jesse Barnesccab5c82011-01-18 15:49:25 -08001150 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301151 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1152 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1153 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1154 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1155 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1156 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00001157 cagf = intel_gpu_freq(dev_priv,
1158 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -08001159
Mika Kuoppala59bad942015-01-16 11:34:40 +02001160 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001161
David Weinehall36cdd012016-08-22 13:59:31 +03001162 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001163 pm_ier = I915_READ(GEN6_PMIER);
1164 pm_imr = I915_READ(GEN6_PMIMR);
1165 pm_isr = I915_READ(GEN6_PMISR);
1166 pm_iir = I915_READ(GEN6_PMIIR);
1167 pm_mask = I915_READ(GEN6_PMINTRMSK);
1168 } else {
1169 pm_ier = I915_READ(GEN8_GT_IER(2));
1170 pm_imr = I915_READ(GEN8_GT_IMR(2));
1171 pm_isr = I915_READ(GEN8_GT_ISR(2));
1172 pm_iir = I915_READ(GEN8_GT_IIR(2));
1173 pm_mask = I915_READ(GEN6_PMINTRMSK);
1174 }
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001175 seq_printf(m, "Video Turbo Mode: %s\n",
1176 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1177 seq_printf(m, "HW control enabled: %s\n",
1178 yesno(rpmodectl & GEN6_RP_ENABLE));
1179 seq_printf(m, "SW control enabled: %s\n",
1180 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1181 GEN6_RP_MEDIA_SW_MODE));
Chris Wilson0d8f9492014-03-27 09:06:14 +00001182 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001183 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05301184 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001185 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001186 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001187 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001188 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001189 seq_printf(m, "Render p-state VID: %d\n",
1190 gt_perf_status & 0xff);
1191 seq_printf(m, "Render p-state limit: %d\n",
1192 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001193 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1194 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1195 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1196 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001197 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001198 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301199 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1200 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1201 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1202 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1203 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1204 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001205 seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +01001206
Akash Goeld6cda9c2016-04-23 00:05:46 +05301207 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1208 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1209 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1210 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1211 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1212 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001213 seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001214
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001215 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001216 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001217 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001218 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001219 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001220 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001221
1222 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001223 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001224 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001225 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001226 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001227
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001228 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001229 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001230 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001231 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001232 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001233 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001234 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001235 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001236
Chris Wilsond86ed342015-04-27 13:41:19 +01001237 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001238 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001239 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001240 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001241 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001242 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001243 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001244 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001245 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001246 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001247 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001248 seq_printf(m,
1249 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001250 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001251 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001252 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001253 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001254
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001255 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +03001256 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1257 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1258
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001259 intel_runtime_pm_put(dev_priv);
1260 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001261}
1262
Ben Widawskyd6369512016-09-20 16:54:32 +03001263static void i915_instdone_info(struct drm_i915_private *dev_priv,
1264 struct seq_file *m,
1265 struct intel_instdone *instdone)
1266{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001267 int slice;
1268 int subslice;
1269
Ben Widawskyd6369512016-09-20 16:54:32 +03001270 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1271 instdone->instdone);
1272
1273 if (INTEL_GEN(dev_priv) <= 3)
1274 return;
1275
1276 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1277 instdone->slice_common);
1278
1279 if (INTEL_GEN(dev_priv) <= 6)
1280 return;
1281
Ben Widawskyf9e61372016-09-20 16:54:33 +03001282 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1283 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1284 slice, subslice, instdone->sampler[slice][subslice]);
1285
1286 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1287 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1288 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001289}
1290
Chris Wilsonf6544492015-01-26 18:03:04 +02001291static int i915_hangcheck_info(struct seq_file *m, void *unused)
1292{
David Weinehall36cdd012016-08-22 13:59:31 +03001293 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001294 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001295 u64 acthd[I915_NUM_ENGINES];
1296 u32 seqno[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001297 struct intel_instdone instdone;
Dave Gordonc3232b12016-03-23 18:19:53 +00001298 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001299
Chris Wilson8af29b02016-09-09 14:11:47 +01001300 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001301 seq_puts(m, "Wedged\n");
1302 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1303 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1304 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1305 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001306 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001307 seq_puts(m, "Waiter holding struct mutex\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001308 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001309 seq_puts(m, "struct_mutex blocked for reset\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001310
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001311 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001312 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001313 return 0;
1314 }
1315
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001316 intel_runtime_pm_get(dev_priv);
1317
Akash Goel3b3f1652016-10-13 22:44:48 +05301318 for_each_engine(engine, dev_priv, id) {
Chris Wilson7e37f882016-08-02 22:50:21 +01001319 acthd[id] = intel_engine_get_active_head(engine);
Chris Wilson1b7744e2016-07-01 17:23:17 +01001320 seqno[id] = intel_engine_get_seqno(engine);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001321 }
1322
Akash Goel3b3f1652016-10-13 22:44:48 +05301323 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001324
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001325 intel_runtime_pm_put(dev_priv);
1326
Chris Wilson8352aea2017-03-03 09:00:56 +00001327 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1328 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsonf6544492015-01-26 18:03:04 +02001329 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1330 jiffies));
Chris Wilson8352aea2017-03-03 09:00:56 +00001331 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1332 seq_puts(m, "Hangcheck active, work pending\n");
1333 else
1334 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001335
Chris Wilsonf73b5672017-03-02 15:03:56 +00001336 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1337
Akash Goel3b3f1652016-10-13 22:44:48 +05301338 for_each_engine(engine, dev_priv, id) {
Chris Wilson33f53712016-10-04 21:11:32 +01001339 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1340 struct rb_node *rb;
1341
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001342 seq_printf(m, "%s:\n", engine->name);
Chris Wilsonf73b5672017-03-02 15:03:56 +00001343 seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
Chris Wilsoncb399ea2016-11-01 10:03:16 +00001344 engine->hangcheck.seqno, seqno[id],
Chris Wilsonf73b5672017-03-02 15:03:56 +00001345 intel_engine_last_submit(engine),
1346 engine->timeline->inflight_seqnos);
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001347 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
Chris Wilson83348ba2016-08-09 17:47:51 +01001348 yesno(intel_engine_has_waiter(engine)),
1349 yesno(test_bit(engine->id,
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001350 &dev_priv->gpu_error.missed_irq_rings)),
1351 yesno(engine->hangcheck.stalled));
1352
Chris Wilson61d3dc72017-03-03 19:08:24 +00001353 spin_lock_irq(&b->rb_lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001354 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
Geliang Tangf802cf72016-12-19 22:43:49 +08001355 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
Chris Wilson33f53712016-10-04 21:11:32 +01001356
1357 seq_printf(m, "\t%s [%d] waiting for %x\n",
1358 w->tsk->comm, w->tsk->pid, w->seqno);
1359 }
Chris Wilson61d3dc72017-03-03 19:08:24 +00001360 spin_unlock_irq(&b->rb_lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001361
Chris Wilsonf6544492015-01-26 18:03:04 +02001362 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001363 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001364 (long long)acthd[id]);
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001365 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1366 hangcheck_action_to_str(engine->hangcheck.action),
1367 engine->hangcheck.action,
1368 jiffies_to_msecs(jiffies -
1369 engine->hangcheck.action_timestamp));
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001370
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001371 if (engine->id == RCS) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001372 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001373
Ben Widawskyd6369512016-09-20 16:54:32 +03001374 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001375
Ben Widawskyd6369512016-09-20 16:54:32 +03001376 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001377
Ben Widawskyd6369512016-09-20 16:54:32 +03001378 i915_instdone_info(dev_priv, m,
1379 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001380 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001381 }
1382
1383 return 0;
1384}
1385
Michel Thierry061d06a2017-06-20 10:57:49 +01001386static int i915_reset_info(struct seq_file *m, void *unused)
1387{
1388 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1389 struct i915_gpu_error *error = &dev_priv->gpu_error;
1390 struct intel_engine_cs *engine;
1391 enum intel_engine_id id;
1392
1393 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1394
1395 for_each_engine(engine, dev_priv, id) {
1396 seq_printf(m, "%s = %u\n", engine->name,
1397 i915_reset_engine_count(error, engine));
1398 }
1399
1400 return 0;
1401}
1402
Ben Widawsky4d855292011-12-12 19:34:16 -08001403static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001404{
David Weinehall36cdd012016-08-22 13:59:31 +03001405 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001406 u32 rgvmodectl, rstdbyctl;
1407 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001408
Ben Widawsky616fdb52011-10-05 11:44:54 -07001409 rgvmodectl = I915_READ(MEMMODECTL);
1410 rstdbyctl = I915_READ(RSTDBYCTL);
1411 crstandvid = I915_READ16(CRSTANDVID);
1412
Jani Nikula742f4912015-09-03 11:16:09 +03001413 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001414 seq_printf(m, "Boost freq: %d\n",
1415 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1416 MEMMODE_BOOST_FREQ_SHIFT);
1417 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001418 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001419 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001420 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001421 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001422 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001423 seq_printf(m, "Starting frequency: P%d\n",
1424 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001425 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001426 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001427 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1428 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1429 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1430 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001431 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001432 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001433 switch (rstdbyctl & RSX_STATUS_MASK) {
1434 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001435 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001436 break;
1437 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001438 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001439 break;
1440 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001441 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001442 break;
1443 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001444 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001445 break;
1446 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001447 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001448 break;
1449 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001450 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001451 break;
1452 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001453 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001454 break;
1455 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001456
1457 return 0;
1458}
1459
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001460static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001461{
Chris Wilson233ebf52017-03-23 10:19:44 +00001462 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001463 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001464 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001465
Chris Wilsond7a133d2017-09-07 14:44:41 +01001466 seq_printf(m, "user.bypass_count = %u\n",
1467 i915->uncore.user_forcewake.count);
1468
Chris Wilson233ebf52017-03-23 10:19:44 +00001469 for_each_fw_domain(fw_domain, i915, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001470 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001471 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001472 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001473
1474 return 0;
1475}
1476
Mika Kuoppala13628772017-03-15 17:43:02 +02001477static void print_rc6_res(struct seq_file *m,
1478 const char *title,
1479 const i915_reg_t reg)
1480{
1481 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1482
1483 seq_printf(m, "%s %u (%llu us)\n",
1484 title, I915_READ(reg),
1485 intel_rc6_residency_us(dev_priv, reg));
1486}
1487
Deepak S669ab5a2014-01-10 15:18:26 +05301488static int vlv_drpc_info(struct seq_file *m)
1489{
David Weinehall36cdd012016-08-22 13:59:31 +03001490 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001491 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301492
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001493 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301494 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1495
Deepak S669ab5a2014-01-10 15:18:26 +05301496 seq_printf(m, "RC6 Enabled: %s\n",
1497 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1498 GEN6_RC_CTL_EI_MODE(1))));
1499 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001500 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301501 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001502 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301503
Mika Kuoppala13628772017-03-15 17:43:02 +02001504 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1505 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001506
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001507 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301508}
1509
Ben Widawsky4d855292011-12-12 19:34:16 -08001510static int gen6_drpc_info(struct seq_file *m)
1511{
David Weinehall36cdd012016-08-22 13:59:31 +03001512 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001513 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301514 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001515
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001516 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001517 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001518
Ben Widawsky4d855292011-12-12 19:34:16 -08001519 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001520 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301521 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1522 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1523 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001524
Imre Deak51cc9ad2018-02-08 19:41:02 +02001525 if (INTEL_GEN(dev_priv) <= 7) {
1526 mutex_lock(&dev_priv->pcu_lock);
1527 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1528 &rc6vids);
1529 mutex_unlock(&dev_priv->pcu_lock);
1530 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001531
Eric Anholtfff24e22012-01-23 16:14:05 -08001532 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001533 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1534 seq_printf(m, "RC6 Enabled: %s\n",
1535 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001536 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301537 seq_printf(m, "Render Well Gating Enabled: %s\n",
1538 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1539 seq_printf(m, "Media Well Gating Enabled: %s\n",
1540 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1541 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001542 seq_printf(m, "Deep RC6 Enabled: %s\n",
1543 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1544 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1545 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001546 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001547 switch (gt_core_status & GEN6_RCn_MASK) {
1548 case GEN6_RC0:
1549 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001550 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001551 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001552 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001553 break;
1554 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001555 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001556 break;
1557 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001558 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001559 break;
1560 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001561 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001562 break;
1563 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001564 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001565 break;
1566 }
1567
1568 seq_printf(m, "Core Power Down: %s\n",
1569 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001570 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301571 seq_printf(m, "Render Power Well: %s\n",
1572 (gen9_powergate_status &
1573 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1574 seq_printf(m, "Media Power Well: %s\n",
1575 (gen9_powergate_status &
1576 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1577 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001578
1579 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001580 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1581 GEN6_GT_GFX_RC6_LOCKED);
1582 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1583 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1584 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001585
Imre Deak51cc9ad2018-02-08 19:41:02 +02001586 if (INTEL_GEN(dev_priv) <= 7) {
1587 seq_printf(m, "RC6 voltage: %dmV\n",
1588 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1589 seq_printf(m, "RC6+ voltage: %dmV\n",
1590 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1591 seq_printf(m, "RC6++ voltage: %dmV\n",
1592 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1593 }
1594
Akash Goelf2dd7572016-06-27 20:10:01 +05301595 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001596}
1597
1598static int i915_drpc_info(struct seq_file *m, void *unused)
1599{
David Weinehall36cdd012016-08-22 13:59:31 +03001600 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001601 int err;
1602
1603 intel_runtime_pm_get(dev_priv);
Ben Widawsky4d855292011-12-12 19:34:16 -08001604
David Weinehall36cdd012016-08-22 13:59:31 +03001605 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001606 err = vlv_drpc_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +03001607 else if (INTEL_GEN(dev_priv) >= 6)
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001608 err = gen6_drpc_info(m);
Ben Widawsky4d855292011-12-12 19:34:16 -08001609 else
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001610 err = ironlake_drpc_info(m);
1611
1612 intel_runtime_pm_put(dev_priv);
1613
1614 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001615}
1616
Daniel Vetter9a851782015-06-18 10:30:22 +02001617static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1618{
David Weinehall36cdd012016-08-22 13:59:31 +03001619 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001620
1621 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1622 dev_priv->fb_tracking.busy_bits);
1623
1624 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1625 dev_priv->fb_tracking.flip_bits);
1626
1627 return 0;
1628}
1629
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001630static int i915_fbc_status(struct seq_file *m, void *unused)
1631{
David Weinehall36cdd012016-08-22 13:59:31 +03001632 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001633 struct intel_fbc *fbc = &dev_priv->fbc;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001634
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001635 if (!HAS_FBC(dev_priv))
1636 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001637
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001638 intel_runtime_pm_get(dev_priv);
Chris Wilson31388722017-12-20 20:58:48 +00001639 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001640
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001641 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001642 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001643 else
Chris Wilson31388722017-12-20 20:58:48 +00001644 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1645
1646 if (fbc->work.scheduled)
Dhinakaran Pandiyan1b29b7c2018-02-02 21:12:55 -08001647 seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
Chris Wilson31388722017-12-20 20:58:48 +00001648 fbc->work.scheduled_vblank,
1649 drm_crtc_vblank_count(&fbc->crtc->base));
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001650
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001651 if (intel_fbc_is_active(dev_priv)) {
1652 u32 mask;
1653
1654 if (INTEL_GEN(dev_priv) >= 8)
1655 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1656 else if (INTEL_GEN(dev_priv) >= 7)
1657 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1658 else if (INTEL_GEN(dev_priv) >= 5)
1659 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1660 else if (IS_G4X(dev_priv))
1661 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1662 else
1663 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1664 FBC_STAT_COMPRESSED);
1665
1666 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001667 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001668
Chris Wilson31388722017-12-20 20:58:48 +00001669 mutex_unlock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001670 intel_runtime_pm_put(dev_priv);
1671
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001672 return 0;
1673}
1674
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001675static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001676{
David Weinehall36cdd012016-08-22 13:59:31 +03001677 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001678
David Weinehall36cdd012016-08-22 13:59:31 +03001679 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001680 return -ENODEV;
1681
Rodrigo Vivida46f932014-08-01 02:04:45 -07001682 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001683
1684 return 0;
1685}
1686
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001687static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001688{
David Weinehall36cdd012016-08-22 13:59:31 +03001689 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001690 u32 reg;
1691
David Weinehall36cdd012016-08-22 13:59:31 +03001692 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001693 return -ENODEV;
1694
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001695 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001696
1697 reg = I915_READ(ILK_DPFC_CONTROL);
1698 dev_priv->fbc.false_color = val;
1699
1700 I915_WRITE(ILK_DPFC_CONTROL, val ?
1701 (reg | FBC_CTL_FALSE_COLOR) :
1702 (reg & ~FBC_CTL_FALSE_COLOR));
1703
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001704 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001705 return 0;
1706}
1707
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001708DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1709 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001710 "%llu\n");
1711
Paulo Zanoni92d44622013-05-31 16:33:24 -03001712static int i915_ips_status(struct seq_file *m, void *unused)
1713{
David Weinehall36cdd012016-08-22 13:59:31 +03001714 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Paulo Zanoni92d44622013-05-31 16:33:24 -03001715
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001716 if (!HAS_IPS(dev_priv))
1717 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001718
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001719 intel_runtime_pm_get(dev_priv);
1720
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001721 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001722 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001723
David Weinehall36cdd012016-08-22 13:59:31 +03001724 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001725 seq_puts(m, "Currently: unknown\n");
1726 } else {
1727 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1728 seq_puts(m, "Currently: enabled\n");
1729 else
1730 seq_puts(m, "Currently: disabled\n");
1731 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001732
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001733 intel_runtime_pm_put(dev_priv);
1734
Paulo Zanoni92d44622013-05-31 16:33:24 -03001735 return 0;
1736}
1737
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001738static int i915_sr_status(struct seq_file *m, void *unused)
1739{
David Weinehall36cdd012016-08-22 13:59:31 +03001740 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001741 bool sr_enabled = false;
1742
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001743 intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01001744 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001745
Chris Wilson7342a722017-03-09 14:20:49 +00001746 if (INTEL_GEN(dev_priv) >= 9)
1747 /* no global SR status; inspect per-plane WM */;
1748 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001749 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001750 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001751 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001752 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001753 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001754 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001755 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001756 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001757 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001758 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001759
Chris Wilson9c870d02016-10-24 13:42:15 +01001760 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001761 intel_runtime_pm_put(dev_priv);
1762
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001763 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001764
1765 return 0;
1766}
1767
Jesse Barnes7648fa92010-05-20 14:28:11 -07001768static int i915_emon_status(struct seq_file *m, void *unused)
1769{
David Weinehall36cdd012016-08-22 13:59:31 +03001770 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1771 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001772 unsigned long temp, chipset, gfx;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001773 int ret;
1774
David Weinehall36cdd012016-08-22 13:59:31 +03001775 if (!IS_GEN5(dev_priv))
Chris Wilson582be6b2012-04-30 19:35:02 +01001776 return -ENODEV;
1777
Chris Wilsonde227ef2010-07-03 07:58:38 +01001778 ret = mutex_lock_interruptible(&dev->struct_mutex);
1779 if (ret)
1780 return ret;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001781
1782 temp = i915_mch_val(dev_priv);
1783 chipset = i915_chipset_val(dev_priv);
1784 gfx = i915_gfx_val(dev_priv);
Chris Wilsonde227ef2010-07-03 07:58:38 +01001785 mutex_unlock(&dev->struct_mutex);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001786
1787 seq_printf(m, "GMCH temp: %ld\n", temp);
1788 seq_printf(m, "Chipset power: %ld\n", chipset);
1789 seq_printf(m, "GFX power: %ld\n", gfx);
1790 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1791
1792 return 0;
1793}
1794
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001795static int i915_ring_freq_table(struct seq_file *m, void *unused)
1796{
David Weinehall36cdd012016-08-22 13:59:31 +03001797 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001798 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301799 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001800 int gpu_freq, ia_freq;
1801 int ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001802
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001803 if (!HAS_LLC(dev_priv))
1804 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001805
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001806 intel_runtime_pm_get(dev_priv);
1807
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001808 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001809 if (ret)
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001810 goto out;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001811
Chris Wilsond586b5f2018-03-08 14:26:48 +00001812 min_gpu_freq = rps->min_freq;
1813 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001814 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301815 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001816 min_gpu_freq /= GEN9_FREQ_SCALER;
1817 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301818 }
1819
Damien Lespiau267f0c92013-06-24 22:59:48 +01001820 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001821
Akash Goelf936ec32015-06-29 14:50:22 +05301822 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001823 ia_freq = gpu_freq;
1824 sandybridge_pcode_read(dev_priv,
1825 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1826 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001827 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301828 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001829 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001830 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001831 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001832 ((ia_freq >> 0) & 0xff) * 100,
1833 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001834 }
1835
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001836 mutex_unlock(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001837
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001838out:
1839 intel_runtime_pm_put(dev_priv);
1840 return ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001841}
1842
Chris Wilson44834a62010-08-19 16:09:23 +01001843static int i915_opregion(struct seq_file *m, void *unused)
1844{
David Weinehall36cdd012016-08-22 13:59:31 +03001845 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1846 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001847 struct intel_opregion *opregion = &dev_priv->opregion;
1848 int ret;
1849
1850 ret = mutex_lock_interruptible(&dev->struct_mutex);
1851 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001852 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001853
Jani Nikula2455a8e2015-12-14 12:50:53 +02001854 if (opregion->header)
1855 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001856
1857 mutex_unlock(&dev->struct_mutex);
1858
Daniel Vetter0d38f002012-04-21 22:49:10 +02001859out:
Chris Wilson44834a62010-08-19 16:09:23 +01001860 return 0;
1861}
1862
Jani Nikulaada8f952015-12-15 13:17:12 +02001863static int i915_vbt(struct seq_file *m, void *unused)
1864{
David Weinehall36cdd012016-08-22 13:59:31 +03001865 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001866
1867 if (opregion->vbt)
1868 seq_write(m, opregion->vbt, opregion->vbt_size);
1869
1870 return 0;
1871}
1872
Chris Wilson37811fc2010-08-25 22:45:57 +01001873static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1874{
David Weinehall36cdd012016-08-22 13:59:31 +03001875 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1876 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301877 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001878 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001879 int ret;
1880
1881 ret = mutex_lock_interruptible(&dev->struct_mutex);
1882 if (ret)
1883 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001884
Daniel Vetter06957262015-08-10 13:34:08 +02001885#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001886 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001887 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001888
Chris Wilson25bcce92016-07-02 15:36:00 +01001889 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1890 fbdev_fb->base.width,
1891 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001892 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001893 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001894 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001895 drm_framebuffer_read_refcount(&fbdev_fb->base));
1896 describe_obj(m, fbdev_fb->obj);
1897 seq_putc(m, '\n');
1898 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001899#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001900
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001901 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001902 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301903 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1904 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001905 continue;
1906
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001907 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001908 fb->base.width,
1909 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001910 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001911 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001912 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001913 drm_framebuffer_read_refcount(&fb->base));
Chris Wilson05394f32010-11-08 19:18:58 +00001914 describe_obj(m, fb->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +01001915 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001916 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001917 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001918 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001919
1920 return 0;
1921}
1922
Chris Wilson7e37f882016-08-02 22:50:21 +01001923static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001924{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001925 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1926 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001927}
1928
Ben Widawskye76d3632011-03-19 18:14:29 -07001929static int i915_context_status(struct seq_file *m, void *unused)
1930{
David Weinehall36cdd012016-08-22 13:59:31 +03001931 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1932 struct drm_device *dev = &dev_priv->drm;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001933 struct intel_engine_cs *engine;
Chris Wilsone2efd132016-05-24 14:53:34 +01001934 struct i915_gem_context *ctx;
Akash Goel3b3f1652016-10-13 22:44:48 +05301935 enum intel_engine_id id;
Dave Gordonc3232b12016-03-23 18:19:53 +00001936 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001937
Daniel Vetterf3d28872014-05-29 23:23:08 +02001938 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001939 if (ret)
1940 return ret;
1941
Chris Wilson829a0af2017-06-20 12:05:45 +01001942 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson5d1808e2016-04-28 09:56:51 +01001943 seq_printf(m, "HW context %u ", ctx->hw_id);
Chris Wilsonc84455b2016-08-15 10:49:08 +01001944 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001945 struct task_struct *task;
1946
Chris Wilsonc84455b2016-08-15 10:49:08 +01001947 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001948 if (task) {
1949 seq_printf(m, "(%s [%d]) ",
1950 task->comm, task->pid);
1951 put_task_struct(task);
1952 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001953 } else if (IS_ERR(ctx->file_priv)) {
1954 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001955 } else {
1956 seq_puts(m, "(kernel) ");
1957 }
1958
Chris Wilsonbca44d82016-05-24 14:53:41 +01001959 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1960 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001961
Akash Goel3b3f1652016-10-13 22:44:48 +05301962 for_each_engine(engine, dev_priv, id) {
Chris Wilsonbca44d82016-05-24 14:53:41 +01001963 struct intel_context *ce = &ctx->engine[engine->id];
1964
1965 seq_printf(m, "%s: ", engine->name);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001966 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001967 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001968 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001969 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001970 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001971 }
1972
Ben Widawskya33afea2013-09-17 21:12:45 -07001973 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001974 }
1975
Daniel Vetterf3d28872014-05-29 23:23:08 +02001976 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001977
1978 return 0;
1979}
1980
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001981static const char *swizzle_string(unsigned swizzle)
1982{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001983 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001984 case I915_BIT_6_SWIZZLE_NONE:
1985 return "none";
1986 case I915_BIT_6_SWIZZLE_9:
1987 return "bit9";
1988 case I915_BIT_6_SWIZZLE_9_10:
1989 return "bit9/bit10";
1990 case I915_BIT_6_SWIZZLE_9_11:
1991 return "bit9/bit11";
1992 case I915_BIT_6_SWIZZLE_9_10_11:
1993 return "bit9/bit10/bit11";
1994 case I915_BIT_6_SWIZZLE_9_17:
1995 return "bit9/bit17";
1996 case I915_BIT_6_SWIZZLE_9_10_17:
1997 return "bit9/bit10/bit17";
1998 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09001999 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002000 }
2001
2002 return "bug";
2003}
2004
2005static int i915_swizzle_info(struct seq_file *m, void *data)
2006{
David Weinehall36cdd012016-08-22 13:59:31 +03002007 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002008
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002009 intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02002010
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002011 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2012 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2013 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2014 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2015
David Weinehall36cdd012016-08-22 13:59:31 +03002016 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002017 seq_printf(m, "DDC = 0x%08x\n",
2018 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01002019 seq_printf(m, "DDC2 = 0x%08x\n",
2020 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002021 seq_printf(m, "C0DRB3 = 0x%04x\n",
2022 I915_READ16(C0DRB3));
2023 seq_printf(m, "C1DRB3 = 0x%04x\n",
2024 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03002025 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002026 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2027 I915_READ(MAD_DIMM_C0));
2028 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2029 I915_READ(MAD_DIMM_C1));
2030 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2031 I915_READ(MAD_DIMM_C2));
2032 seq_printf(m, "TILECTL = 0x%08x\n",
2033 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03002034 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07002035 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2036 I915_READ(GAMTARBMODE));
2037 else
2038 seq_printf(m, "ARB_MODE = 0x%08x\n",
2039 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002040 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2041 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002042 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01002043
2044 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2045 seq_puts(m, "L-shaped memory detected\n");
2046
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002047 intel_runtime_pm_put(dev_priv);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002048
2049 return 0;
2050}
2051
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002052static int per_file_ctx(int id, void *ptr, void *data)
2053{
Chris Wilsone2efd132016-05-24 14:53:34 +01002054 struct i915_gem_context *ctx = ptr;
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002055 struct seq_file *m = data;
Daniel Vetterae6c4802014-08-06 15:04:53 +02002056 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2057
2058 if (!ppgtt) {
2059 seq_printf(m, " no ppgtt for context %d\n",
2060 ctx->user_handle);
2061 return 0;
2062 }
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002063
Oscar Mateof83d6512014-05-22 14:13:38 +01002064 if (i915_gem_context_is_default(ctx))
2065 seq_puts(m, " default context:\n");
2066 else
Oscar Mateo821d66d2014-07-03 16:28:00 +01002067 seq_printf(m, " context %d:\n", ctx->user_handle);
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002068 ppgtt->debug_dump(ppgtt, m);
2069
2070 return 0;
2071}
2072
David Weinehall36cdd012016-08-22 13:59:31 +03002073static void gen8_ppgtt_info(struct seq_file *m,
2074 struct drm_i915_private *dev_priv)
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002075{
Ben Widawsky77df6772013-11-02 21:07:30 -07002076 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Akash Goel3b3f1652016-10-13 22:44:48 +05302077 struct intel_engine_cs *engine;
2078 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002079 int i;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002080
Ben Widawsky77df6772013-11-02 21:07:30 -07002081 if (!ppgtt)
2082 return;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002083
Akash Goel3b3f1652016-10-13 22:44:48 +05302084 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002085 seq_printf(m, "%s\n", engine->name);
Ben Widawsky77df6772013-11-02 21:07:30 -07002086 for (i = 0; i < 4; i++) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002087 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
Ben Widawsky77df6772013-11-02 21:07:30 -07002088 pdp <<= 32;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002089 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
Ville Syrjäläa2a5b152014-03-31 18:17:16 +03002090 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
Ben Widawsky77df6772013-11-02 21:07:30 -07002091 }
2092 }
2093}
2094
David Weinehall36cdd012016-08-22 13:59:31 +03002095static void gen6_ppgtt_info(struct seq_file *m,
2096 struct drm_i915_private *dev_priv)
Ben Widawsky77df6772013-11-02 21:07:30 -07002097{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002098 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302099 enum intel_engine_id id;
Ben Widawsky77df6772013-11-02 21:07:30 -07002100
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002101 if (IS_GEN6(dev_priv))
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002102 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2103
Akash Goel3b3f1652016-10-13 22:44:48 +05302104 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002105 seq_printf(m, "%s\n", engine->name);
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002106 if (IS_GEN7(dev_priv))
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002107 seq_printf(m, "GFX_MODE: 0x%08x\n",
2108 I915_READ(RING_MODE_GEN7(engine)));
2109 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2110 I915_READ(RING_PP_DIR_BASE(engine)));
2111 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2112 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2113 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2114 I915_READ(RING_PP_DIR_DCLV(engine)));
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002115 }
2116 if (dev_priv->mm.aliasing_ppgtt) {
2117 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2118
Damien Lespiau267f0c92013-06-24 22:59:48 +01002119 seq_puts(m, "aliasing PPGTT:\n");
Mika Kuoppala44159dd2015-06-25 18:35:07 +03002120 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002121
Ben Widawsky87d60b62013-12-06 14:11:29 -08002122 ppgtt->debug_dump(ppgtt, m);
Daniel Vetterae6c4802014-08-06 15:04:53 +02002123 }
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002124
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002125 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
Ben Widawsky77df6772013-11-02 21:07:30 -07002126}
2127
2128static int i915_ppgtt_info(struct seq_file *m, void *data)
2129{
David Weinehall36cdd012016-08-22 13:59:31 +03002130 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2131 struct drm_device *dev = &dev_priv->drm;
Michel Thierryea91e402015-07-29 17:23:57 +01002132 struct drm_file *file;
Chris Wilson637ee292016-08-22 14:28:20 +01002133 int ret;
Ben Widawsky77df6772013-11-02 21:07:30 -07002134
Chris Wilson637ee292016-08-22 14:28:20 +01002135 mutex_lock(&dev->filelist_mutex);
2136 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawsky77df6772013-11-02 21:07:30 -07002137 if (ret)
Chris Wilson637ee292016-08-22 14:28:20 +01002138 goto out_unlock;
2139
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002140 intel_runtime_pm_get(dev_priv);
Ben Widawsky77df6772013-11-02 21:07:30 -07002141
David Weinehall36cdd012016-08-22 13:59:31 +03002142 if (INTEL_GEN(dev_priv) >= 8)
2143 gen8_ppgtt_info(m, dev_priv);
2144 else if (INTEL_GEN(dev_priv) >= 6)
2145 gen6_ppgtt_info(m, dev_priv);
Ben Widawsky77df6772013-11-02 21:07:30 -07002146
Michel Thierryea91e402015-07-29 17:23:57 +01002147 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2148 struct drm_i915_file_private *file_priv = file->driver_priv;
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002149 struct task_struct *task;
Michel Thierryea91e402015-07-29 17:23:57 +01002150
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002151 task = get_pid_task(file->pid, PIDTYPE_PID);
Dan Carpenter06812762015-10-02 18:14:22 +03002152 if (!task) {
2153 ret = -ESRCH;
Chris Wilson637ee292016-08-22 14:28:20 +01002154 goto out_rpm;
Dan Carpenter06812762015-10-02 18:14:22 +03002155 }
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002156 seq_printf(m, "\nproc: %s\n", task->comm);
2157 put_task_struct(task);
Michel Thierryea91e402015-07-29 17:23:57 +01002158 idr_for_each(&file_priv->context_idr, per_file_ctx,
2159 (void *)(unsigned long)m);
2160 }
2161
Chris Wilson637ee292016-08-22 14:28:20 +01002162out_rpm:
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002163 intel_runtime_pm_put(dev_priv);
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002164 mutex_unlock(&dev->struct_mutex);
Chris Wilson637ee292016-08-22 14:28:20 +01002165out_unlock:
2166 mutex_unlock(&dev->filelist_mutex);
Dan Carpenter06812762015-10-02 18:14:22 +03002167 return ret;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002168}
2169
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002170static int count_irq_waiters(struct drm_i915_private *i915)
2171{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002172 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302173 enum intel_engine_id id;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002174 int count = 0;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002175
Akash Goel3b3f1652016-10-13 22:44:48 +05302176 for_each_engine(engine, i915, id)
Chris Wilson688e6c72016-07-01 17:23:15 +01002177 count += intel_engine_has_waiter(engine);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002178
2179 return count;
2180}
2181
Chris Wilson7466c292016-08-15 09:49:33 +01002182static const char *rps_power_to_str(unsigned int power)
2183{
2184 static const char * const strings[] = {
2185 [LOW_POWER] = "low power",
2186 [BETWEEN] = "mixed",
2187 [HIGH_POWER] = "high power",
2188 };
2189
2190 if (power >= ARRAY_SIZE(strings) || !strings[power])
2191 return "unknown";
2192
2193 return strings[power];
2194}
2195
Chris Wilson1854d5c2015-04-07 16:20:32 +01002196static int i915_rps_boost_info(struct seq_file *m, void *data)
2197{
David Weinehall36cdd012016-08-22 13:59:31 +03002198 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2199 struct drm_device *dev = &dev_priv->drm;
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002200 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002201 struct drm_file *file;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002202
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002203 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson28176ef2016-10-28 13:58:56 +01002204 seq_printf(m, "GPU busy? %s [%d requests]\n",
2205 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002206 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002207 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002208 atomic_read(&rps->num_waiters));
Chris Wilson7466c292016-08-15 09:49:33 +01002209 seq_printf(m, "Frequency requested %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002210 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002211 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002212 intel_gpu_freq(dev_priv, rps->min_freq),
2213 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2214 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2215 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002216 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002217 intel_gpu_freq(dev_priv, rps->idle_freq),
2218 intel_gpu_freq(dev_priv, rps->efficient_freq),
2219 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002220
2221 mutex_lock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002222 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2223 struct drm_i915_file_private *file_priv = file->driver_priv;
2224 struct task_struct *task;
2225
2226 rcu_read_lock();
2227 task = pid_task(file->pid, PIDTYPE_PID);
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002228 seq_printf(m, "%s [%d]: %d boosts\n",
Chris Wilson1854d5c2015-04-07 16:20:32 +01002229 task ? task->comm : "<unknown>",
2230 task ? task->pid : -1,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002231 atomic_read(&file_priv->rps_client.boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01002232 rcu_read_unlock();
2233 }
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002234 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002235 atomic_read(&rps->boosts));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002236 mutex_unlock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002237
Chris Wilson7466c292016-08-15 09:49:33 +01002238 if (INTEL_GEN(dev_priv) >= 6 &&
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002239 rps->enabled &&
Chris Wilson28176ef2016-10-28 13:58:56 +01002240 dev_priv->gt.active_requests) {
Chris Wilson7466c292016-08-15 09:49:33 +01002241 u32 rpup, rpupei;
2242 u32 rpdown, rpdownei;
2243
2244 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2245 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2246 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2247 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2248 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2249 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2250
2251 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002252 rps_power_to_str(rps->power));
Chris Wilson7466c292016-08-15 09:49:33 +01002253 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002254 rpup && rpupei ? 100 * rpup / rpupei : 0,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002255 rps->up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002256 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002257 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002258 rps->down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002259 } else {
2260 seq_puts(m, "\nRPS Autotuning inactive\n");
2261 }
2262
Chris Wilson8d3afd72015-05-21 21:01:47 +01002263 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002264}
2265
Ben Widawsky63573eb2013-07-04 11:02:07 -07002266static int i915_llc(struct seq_file *m, void *data)
2267{
David Weinehall36cdd012016-08-22 13:59:31 +03002268 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002269 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002270
David Weinehall36cdd012016-08-22 13:59:31 +03002271 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002272 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2273 intel_uncore_edram_size(dev_priv)/1024/1024);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002274
2275 return 0;
2276}
2277
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002278static int i915_huc_load_status_info(struct seq_file *m, void *data)
2279{
2280 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002281 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002282
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002283 if (!HAS_HUC(dev_priv))
2284 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002285
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002286 p = drm_seq_file_printer(m);
2287 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002288
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302289 intel_runtime_pm_get(dev_priv);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002290 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302291 intel_runtime_pm_put(dev_priv);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002292
2293 return 0;
2294}
2295
Alex Daifdf5d352015-08-12 15:43:37 +01002296static int i915_guc_load_status_info(struct seq_file *m, void *data)
2297{
David Weinehall36cdd012016-08-22 13:59:31 +03002298 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002299 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01002300 u32 tmp, i;
2301
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002302 if (!HAS_GUC(dev_priv))
2303 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01002304
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002305 p = drm_seq_file_printer(m);
2306 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01002307
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302308 intel_runtime_pm_get(dev_priv);
2309
Alex Daifdf5d352015-08-12 15:43:37 +01002310 tmp = I915_READ(GUC_STATUS);
2311
2312 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2313 seq_printf(m, "\tBootrom status = 0x%x\n",
2314 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2315 seq_printf(m, "\tuKernel status = 0x%x\n",
2316 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2317 seq_printf(m, "\tMIA Core status = 0x%x\n",
2318 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2319 seq_puts(m, "\nScratch registers:\n");
2320 for (i = 0; i < 16; i++)
2321 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2322
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302323 intel_runtime_pm_put(dev_priv);
2324
Alex Daifdf5d352015-08-12 15:43:37 +01002325 return 0;
2326}
2327
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002328static const char *
2329stringify_guc_log_type(enum guc_log_buffer_type type)
2330{
2331 switch (type) {
2332 case GUC_ISR_LOG_BUFFER:
2333 return "ISR";
2334 case GUC_DPC_LOG_BUFFER:
2335 return "DPC";
2336 case GUC_CRASH_DUMP_LOG_BUFFER:
2337 return "CRASH";
2338 default:
2339 MISSING_CASE(type);
2340 }
2341
2342 return "";
2343}
2344
Akash Goel5aa1ee42016-10-12 21:54:36 +05302345static void i915_guc_log_info(struct seq_file *m,
2346 struct drm_i915_private *dev_priv)
2347{
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002348 struct intel_guc_log *log = &dev_priv->guc.log;
2349 enum guc_log_buffer_type type;
2350
2351 if (!intel_guc_log_relay_enabled(log)) {
2352 seq_puts(m, "GuC log relay disabled\n");
2353 return;
2354 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302355
Michał Winiarskidb557992018-03-19 10:53:43 +01002356 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05302357
Michał Winiarski6a96be22018-03-19 10:53:42 +01002358 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002359 log->relay.full_count);
2360
2361 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2362 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2363 stringify_guc_log_type(type),
2364 log->stats[type].flush,
2365 log->stats[type].sampled_overflow);
2366 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302367}
2368
Dave Gordon8b417c22015-08-12 15:43:44 +01002369static void i915_guc_client_info(struct seq_file *m,
2370 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302371 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01002372{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002373 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002374 enum intel_engine_id id;
Dave Gordon8b417c22015-08-12 15:43:44 +01002375 uint64_t tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002376
Oscar Mateob09935a2017-03-22 10:39:53 -07002377 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2378 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02002379 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2380 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01002381
Akash Goel3b3f1652016-10-13 22:44:48 +05302382 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002383 u64 submissions = client->submissions[id];
2384 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002385 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002386 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002387 }
2388 seq_printf(m, "\tTotal: %llu\n", tot);
2389}
2390
2391static int i915_guc_info(struct seq_file *m, void *data)
2392{
David Weinehall36cdd012016-08-22 13:59:31 +03002393 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002394 const struct intel_guc *guc = &dev_priv->guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01002395
Michał Winiarskidb557992018-03-19 10:53:43 +01002396 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002397 return -ENODEV;
2398
Michał Winiarskidb557992018-03-19 10:53:43 +01002399 i915_guc_log_info(m, dev_priv);
2400
2401 if (!USES_GUC_SUBMISSION(dev_priv))
2402 return 0;
2403
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002404 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002405
Michał Winiarskidb557992018-03-19 10:53:43 +01002406 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07002407 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01002408 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002409
Chris Wilson334636c2016-11-29 12:10:20 +00002410 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2411 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Chris Wilsone78c9172018-02-07 21:05:42 +00002412 if (guc->preempt_client) {
2413 seq_printf(m, "\nGuC preempt client @ %p:\n",
2414 guc->preempt_client);
2415 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2416 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002417
2418 /* Add more as required ... */
2419
2420 return 0;
2421}
2422
Oscar Mateoa8b93702017-05-10 15:04:51 +00002423static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002424{
David Weinehall36cdd012016-08-22 13:59:31 +03002425 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Oscar Mateoa8b93702017-05-10 15:04:51 +00002426 const struct intel_guc *guc = &dev_priv->guc;
2427 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302428 struct intel_guc_client *client = guc->execbuf_client;
Oscar Mateoa8b93702017-05-10 15:04:51 +00002429 unsigned int tmp;
2430 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002431
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002432 if (!USES_GUC_SUBMISSION(dev_priv))
2433 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002434
Oscar Mateoa8b93702017-05-10 15:04:51 +00002435 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2436 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002437
Oscar Mateoa8b93702017-05-10 15:04:51 +00002438 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2439 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002440
Oscar Mateoa8b93702017-05-10 15:04:51 +00002441 seq_printf(m, "GuC stage descriptor %u:\n", index);
2442 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2443 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2444 seq_printf(m, "\tPriority: %d\n", desc->priority);
2445 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2446 seq_printf(m, "\tEngines used: 0x%x\n",
2447 desc->engines_used);
2448 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2449 desc->db_trigger_phy,
2450 desc->db_trigger_cpu,
2451 desc->db_trigger_uk);
2452 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2453 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01002454 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00002455 desc->wq_addr, desc->wq_size);
2456 seq_putc(m, '\n');
2457
2458 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2459 u32 guc_engine_id = engine->guc_id;
2460 struct guc_execlist_context *lrc =
2461 &desc->lrc[guc_engine_id];
2462
2463 seq_printf(m, "\t%s LRC:\n", engine->name);
2464 seq_printf(m, "\t\tContext desc: 0x%x\n",
2465 lrc->context_desc);
2466 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2467 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2468 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2469 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2470 seq_putc(m, '\n');
2471 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01002472 }
2473
Oscar Mateoa8b93702017-05-10 15:04:51 +00002474 return 0;
2475}
2476
Alex Dai4c7e77f2015-08-12 15:43:40 +01002477static int i915_guc_log_dump(struct seq_file *m, void *data)
2478{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002479 struct drm_info_node *node = m->private;
2480 struct drm_i915_private *dev_priv = node_to_i915(node);
2481 bool dump_load_err = !!node->info_ent->data;
2482 struct drm_i915_gem_object *obj = NULL;
2483 u32 *log;
2484 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002485
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002486 if (!HAS_GUC(dev_priv))
2487 return -ENODEV;
2488
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002489 if (dump_load_err)
2490 obj = dev_priv->guc.load_err_log;
2491 else if (dev_priv->guc.log.vma)
2492 obj = dev_priv->guc.log.vma->obj;
2493
2494 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002495 return 0;
2496
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002497 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2498 if (IS_ERR(log)) {
2499 DRM_DEBUG("Failed to pin object\n");
2500 seq_puts(m, "(log data unaccessible)\n");
2501 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002502 }
2503
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002504 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2505 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2506 *(log + i), *(log + i + 1),
2507 *(log + i + 2), *(log + i + 3));
2508
Alex Dai4c7e77f2015-08-12 15:43:40 +01002509 seq_putc(m, '\n');
2510
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002511 i915_gem_object_unpin_map(obj);
2512
Alex Dai4c7e77f2015-08-12 15:43:40 +01002513 return 0;
2514}
2515
Michał Winiarski4977a282018-03-19 10:53:40 +01002516static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302517{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002518 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302519
Michał Winiarski86aa8242018-03-08 16:46:53 +01002520 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002521 return -ENODEV;
2522
Michał Winiarski4977a282018-03-19 10:53:40 +01002523 *val = intel_guc_log_level_get(&dev_priv->guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302524
2525 return 0;
2526}
2527
Michał Winiarski4977a282018-03-19 10:53:40 +01002528static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302529{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002530 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302531
Michał Winiarski86aa8242018-03-08 16:46:53 +01002532 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002533 return -ENODEV;
2534
Michał Winiarski4977a282018-03-19 10:53:40 +01002535 return intel_guc_log_level_set(&dev_priv->guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302536}
2537
Michał Winiarski4977a282018-03-19 10:53:40 +01002538DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2539 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302540 "%lld\n");
2541
Michał Winiarski4977a282018-03-19 10:53:40 +01002542static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2543{
2544 struct drm_i915_private *dev_priv = inode->i_private;
2545
2546 if (!USES_GUC(dev_priv))
2547 return -ENODEV;
2548
2549 file->private_data = &dev_priv->guc.log;
2550
2551 return intel_guc_log_relay_open(&dev_priv->guc.log);
2552}
2553
2554static ssize_t
2555i915_guc_log_relay_write(struct file *filp,
2556 const char __user *ubuf,
2557 size_t cnt,
2558 loff_t *ppos)
2559{
2560 struct intel_guc_log *log = filp->private_data;
2561
2562 intel_guc_log_relay_flush(log);
2563
2564 return cnt;
2565}
2566
2567static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2568{
2569 struct drm_i915_private *dev_priv = inode->i_private;
2570
2571 intel_guc_log_relay_close(&dev_priv->guc.log);
2572
2573 return 0;
2574}
2575
2576static const struct file_operations i915_guc_log_relay_fops = {
2577 .owner = THIS_MODULE,
2578 .open = i915_guc_log_relay_open,
2579 .write = i915_guc_log_relay_write,
2580 .release = i915_guc_log_relay_release,
2581};
2582
Chris Wilsonb86bef202017-01-16 13:06:21 +00002583static const char *psr2_live_status(u32 val)
2584{
2585 static const char * const live_status[] = {
2586 "IDLE",
2587 "CAPTURE",
2588 "CAPTURE_FS",
2589 "SLEEP",
2590 "BUFON_FW",
2591 "ML_UP",
2592 "SU_STANDBY",
2593 "FAST_SLEEP",
2594 "DEEP_SLEEP",
2595 "BUF_ON",
2596 "TG_ON"
2597 };
2598
2599 val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
2600 if (val < ARRAY_SIZE(live_status))
2601 return live_status[val];
2602
2603 return "unknown";
2604}
2605
José Roberto de Souzad0bc8622018-04-25 14:23:33 -07002606static const char *psr_sink_status(u8 val)
2607{
2608 static const char * const sink_status[] = {
2609 "inactive",
2610 "transition to active, capture and display",
2611 "active, display from RFB",
2612 "active, capture and display on sink device timings",
2613 "transition to inactive, capture and display, timing re-sync",
2614 "reserved",
2615 "reserved",
2616 "sink internal error"
2617 };
2618
2619 val &= DP_PSR_SINK_STATE_MASK;
2620 if (val < ARRAY_SIZE(sink_status))
2621 return sink_status[val];
2622
2623 return "unknown";
2624}
2625
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002626static int i915_edp_psr_status(struct seq_file *m, void *data)
2627{
David Weinehall36cdd012016-08-22 13:59:31 +03002628 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Rodrigo Vivia031d702013-10-03 16:15:06 -03002629 u32 psrperf = 0;
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002630 u32 stat[3];
2631 enum pipe pipe;
Rodrigo Vivia031d702013-10-03 16:15:06 -03002632 bool enabled = false;
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002633 bool sink_support;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002634
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002635 if (!HAS_PSR(dev_priv))
2636 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002637
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002638 sink_support = dev_priv->psr.sink_support;
2639 seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2640 if (!sink_support)
2641 return 0;
2642
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002643 intel_runtime_pm_get(dev_priv);
2644
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002645 mutex_lock(&dev_priv->psr.lock);
Daniel Vetter2807cf62014-07-11 10:30:11 -07002646 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002647 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2648 dev_priv->psr.busy_frontbuffer_bits);
2649 seq_printf(m, "Re-enable work scheduled: %s\n",
2650 yesno(work_busy(&dev_priv->psr.work.work)));
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002651
Nagaraju, Vathsala7e3eb592016-12-09 23:42:09 +05302652 if (HAS_DDI(dev_priv)) {
José Roberto de Souza95f28d22018-03-28 15:30:42 -07002653 if (dev_priv->psr.psr2_enabled)
Nagaraju, Vathsala7e3eb592016-12-09 23:42:09 +05302654 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2655 else
2656 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2657 } else {
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002658 for_each_pipe(dev_priv, pipe) {
Chris Wilson9c870d02016-10-24 13:42:15 +01002659 enum transcoder cpu_transcoder =
2660 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
2661 enum intel_display_power_domain power_domain;
2662
2663 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
2664 if (!intel_display_power_get_if_enabled(dev_priv,
2665 power_domain))
2666 continue;
2667
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002668 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2669 VLV_EDP_PSR_CURR_STATE_MASK;
2670 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2671 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2672 enabled = true;
Chris Wilson9c870d02016-10-24 13:42:15 +01002673
2674 intel_display_power_put(dev_priv, power_domain);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002675 }
2676 }
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002677
2678 seq_printf(m, "Main link in standby mode: %s\n",
2679 yesno(dev_priv->psr.link_standby));
2680
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002681 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002682
David Weinehall36cdd012016-08-22 13:59:31 +03002683 if (!HAS_DDI(dev_priv))
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002684 for_each_pipe(dev_priv, pipe) {
2685 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2686 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2687 seq_printf(m, " pipe %c", pipe_name(pipe));
2688 }
2689 seq_puts(m, "\n");
2690
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002691 /*
2692 * VLV/CHV PSR has no kind of performance counter
2693 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2694 */
David Weinehall36cdd012016-08-22 13:59:31 +03002695 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Ville Syrjälä443a3892015-11-11 20:34:15 +02002696 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
Rodrigo Vivia031d702013-10-03 16:15:06 -03002697 EDP_PSR_PERF_CNT_MASK;
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002698
2699 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2700 }
José Roberto de Souza95f28d22018-03-28 15:30:42 -07002701 if (dev_priv->psr.psr2_enabled) {
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -08002702 u32 psr2 = I915_READ(EDP_PSR2_STATUS);
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302703
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -08002704 seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
Chris Wilsonb86bef202017-01-16 13:06:21 +00002705 psr2, psr2_live_status(psr2));
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302706 }
José Roberto de Souzad0bc8622018-04-25 14:23:33 -07002707
2708 if (dev_priv->psr.enabled) {
2709 struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
2710 u8 val;
2711
2712 if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
2713 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
2714 psr_sink_status(val));
2715 }
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002716 mutex_unlock(&dev_priv->psr.lock);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002717
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002718 if (READ_ONCE(dev_priv->psr.debug)) {
2719 seq_printf(m, "Last attempted entry at: %lld\n",
2720 dev_priv->psr.last_entry_attempt);
2721 seq_printf(m, "Last exit at: %lld\n",
2722 dev_priv->psr.last_exit);
2723 }
2724
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002725 intel_runtime_pm_put(dev_priv);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002726 return 0;
2727}
2728
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002729static int
2730i915_edp_psr_debug_set(void *data, u64 val)
2731{
2732 struct drm_i915_private *dev_priv = data;
2733
2734 if (!CAN_PSR(dev_priv))
2735 return -ENODEV;
2736
2737 DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
2738
2739 intel_runtime_pm_get(dev_priv);
2740 intel_psr_irq_control(dev_priv, !!val);
2741 intel_runtime_pm_put(dev_priv);
2742
2743 return 0;
2744}
2745
2746static int
2747i915_edp_psr_debug_get(void *data, u64 *val)
2748{
2749 struct drm_i915_private *dev_priv = data;
2750
2751 if (!CAN_PSR(dev_priv))
2752 return -ENODEV;
2753
2754 *val = READ_ONCE(dev_priv->psr.debug);
2755 return 0;
2756}
2757
2758DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2759 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2760 "%llu\n");
2761
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002762static int i915_sink_crc(struct seq_file *m, void *data)
2763{
David Weinehall36cdd012016-08-22 13:59:31 +03002764 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2765 struct drm_device *dev = &dev_priv->drm;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002766 struct intel_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002767 struct drm_connector_list_iter conn_iter;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002768 struct intel_dp *intel_dp = NULL;
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002769 struct drm_modeset_acquire_ctx ctx;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002770 int ret;
2771 u8 crc[6];
2772
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002773 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2774
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002775 drm_connector_list_iter_begin(dev, &conn_iter);
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002776
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002777 for_each_intel_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26c17cf2016-06-20 15:57:38 +02002778 struct drm_crtc *crtc;
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002779 struct drm_connector_state *state;
Maarten Lankhorst93313532017-11-10 12:34:59 +01002780 struct intel_crtc_state *crtc_state;
Paulo Zanonib6ae3c72014-02-13 17:51:33 -02002781
Maarten Lankhorst26c17cf2016-06-20 15:57:38 +02002782 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002783 continue;
2784
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002785retry:
2786 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
2787 if (ret)
2788 goto err;
2789
2790 state = connector->base.state;
2791 if (!state->best_encoder)
2792 continue;
2793
2794 crtc = state->crtc;
2795 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2796 if (ret)
2797 goto err;
2798
Maarten Lankhorst93313532017-11-10 12:34:59 +01002799 crtc_state = to_intel_crtc_state(crtc->state);
2800 if (!crtc_state->base.active)
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002801 continue;
2802
Maarten Lankhorst93313532017-11-10 12:34:59 +01002803 /*
2804 * We need to wait for all crtc updates to complete, to make
2805 * sure any pending modesets and plane updates are completed.
2806 */
2807 if (crtc_state->base.commit) {
2808 ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
2809
2810 if (ret)
2811 goto err;
2812 }
2813
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002814 intel_dp = enc_to_intel_dp(state->best_encoder);
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002815
Maarten Lankhorst93313532017-11-10 12:34:59 +01002816 ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002817 if (ret)
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002818 goto err;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002819
2820 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2821 crc[0], crc[1], crc[2],
2822 crc[3], crc[4], crc[5]);
2823 goto out;
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002824
2825err:
2826 if (ret == -EDEADLK) {
2827 ret = drm_modeset_backoff(&ctx);
2828 if (!ret)
2829 goto retry;
2830 }
2831 goto out;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002832 }
2833 ret = -ENODEV;
2834out:
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002835 drm_connector_list_iter_end(&conn_iter);
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002836 drm_modeset_drop_locks(&ctx);
2837 drm_modeset_acquire_fini(&ctx);
2838
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002839 return ret;
2840}
2841
Jesse Barnesec013e72013-08-20 10:29:23 +01002842static int i915_energy_uJ(struct seq_file *m, void *data)
2843{
David Weinehall36cdd012016-08-22 13:59:31 +03002844 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002845 unsigned long long power;
Jesse Barnesec013e72013-08-20 10:29:23 +01002846 u32 units;
2847
David Weinehall36cdd012016-08-22 13:59:31 +03002848 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002849 return -ENODEV;
2850
Paulo Zanoni36623ef2014-02-21 13:52:23 -03002851 intel_runtime_pm_get(dev_priv);
2852
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002853 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2854 intel_runtime_pm_put(dev_priv);
2855 return -ENODEV;
2856 }
2857
2858 units = (power & 0x1f00) >> 8;
Jesse Barnesec013e72013-08-20 10:29:23 +01002859 power = I915_READ(MCH_SECP_NRG_STTS);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002860 power = (1000000 * power) >> units; /* convert to uJ */
Jesse Barnesec013e72013-08-20 10:29:23 +01002861
Paulo Zanoni36623ef2014-02-21 13:52:23 -03002862 intel_runtime_pm_put(dev_priv);
2863
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002864 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002865
2866 return 0;
2867}
2868
Damien Lespiau6455c872015-06-04 18:23:57 +01002869static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002870{
David Weinehall36cdd012016-08-22 13:59:31 +03002871 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002872 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002873
Chris Wilsona156e642016-04-03 14:14:21 +01002874 if (!HAS_RUNTIME_PM(dev_priv))
2875 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002876
Chris Wilson6f561032018-01-24 11:36:07 +00002877 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2878 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
Paulo Zanoni371db662013-08-19 13:18:10 -03002879 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002880 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002881#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002882 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002883 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002884#else
2885 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2886#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002887 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002888 pci_power_name(pdev->current_state),
2889 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002890
Jesse Barnesec013e72013-08-20 10:29:23 +01002891 return 0;
2892}
2893
Imre Deak1da51582013-11-25 17:15:35 +02002894static int i915_power_domain_info(struct seq_file *m, void *unused)
2895{
David Weinehall36cdd012016-08-22 13:59:31 +03002896 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002897 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2898 int i;
2899
2900 mutex_lock(&power_domains->lock);
2901
2902 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2903 for (i = 0; i < power_domains->power_well_count; i++) {
2904 struct i915_power_well *power_well;
2905 enum intel_display_power_domain power_domain;
2906
2907 power_well = &power_domains->power_wells[i];
2908 seq_printf(m, "%-25s %d\n", power_well->name,
2909 power_well->count);
2910
Joonas Lahtinen8385c2e2017-02-08 15:12:10 +02002911 for_each_power_domain(power_domain, power_well->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002912 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002913 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002914 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002915 }
2916
2917 mutex_unlock(&power_domains->lock);
2918
2919 return 0;
2920}
2921
Damien Lespiaub7cec662015-10-27 14:47:01 +02002922static int i915_dmc_info(struct seq_file *m, void *unused)
2923{
David Weinehall36cdd012016-08-22 13:59:31 +03002924 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Damien Lespiaub7cec662015-10-27 14:47:01 +02002925 struct intel_csr *csr;
2926
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002927 if (!HAS_CSR(dev_priv))
2928 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002929
2930 csr = &dev_priv->csr;
2931
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002932 intel_runtime_pm_get(dev_priv);
2933
Damien Lespiaub7cec662015-10-27 14:47:01 +02002934 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2935 seq_printf(m, "path: %s\n", csr->fw_path);
2936
2937 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002938 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002939
2940 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2941 CSR_VERSION_MINOR(csr->version));
2942
Mika Kuoppala48de5682017-05-09 13:05:22 +03002943 if (IS_KABYLAKE(dev_priv) ||
2944 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
Damien Lespiau83372062015-10-30 17:53:32 +02002945 seq_printf(m, "DC3 -> DC5 count: %d\n",
2946 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2947 seq_printf(m, "DC5 -> DC6 count: %d\n",
2948 I915_READ(SKL_CSR_DC5_DC6_COUNT));
David Weinehall36cdd012016-08-22 13:59:31 +03002949 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
Mika Kuoppala16e11b92015-10-27 14:47:03 +02002950 seq_printf(m, "DC3 -> DC5 count: %d\n",
2951 I915_READ(BXT_CSR_DC3_DC5_COUNT));
Damien Lespiau83372062015-10-30 17:53:32 +02002952 }
2953
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002954out:
2955 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2956 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2957 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2958
Damien Lespiau83372062015-10-30 17:53:32 +02002959 intel_runtime_pm_put(dev_priv);
2960
Damien Lespiaub7cec662015-10-27 14:47:01 +02002961 return 0;
2962}
2963
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002964static void intel_seq_print_mode(struct seq_file *m, int tabs,
2965 struct drm_display_mode *mode)
2966{
2967 int i;
2968
2969 for (i = 0; i < tabs; i++)
2970 seq_putc(m, '\t');
2971
2972 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2973 mode->base.id, mode->name,
2974 mode->vrefresh, mode->clock,
2975 mode->hdisplay, mode->hsync_start,
2976 mode->hsync_end, mode->htotal,
2977 mode->vdisplay, mode->vsync_start,
2978 mode->vsync_end, mode->vtotal,
2979 mode->type, mode->flags);
2980}
2981
2982static void intel_encoder_info(struct seq_file *m,
2983 struct intel_crtc *intel_crtc,
2984 struct intel_encoder *intel_encoder)
2985{
David Weinehall36cdd012016-08-22 13:59:31 +03002986 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2987 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002988 struct drm_crtc *crtc = &intel_crtc->base;
2989 struct intel_connector *intel_connector;
2990 struct drm_encoder *encoder;
2991
2992 encoder = &intel_encoder->base;
2993 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002994 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002995 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2996 struct drm_connector *connector = &intel_connector->base;
2997 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2998 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03002999 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003000 drm_get_connector_status_name(connector->status));
3001 if (connector->status == connector_status_connected) {
3002 struct drm_display_mode *mode = &crtc->mode;
3003 seq_printf(m, ", mode:\n");
3004 intel_seq_print_mode(m, 2, mode);
3005 } else {
3006 seq_putc(m, '\n');
3007 }
3008 }
3009}
3010
3011static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3012{
David Weinehall36cdd012016-08-22 13:59:31 +03003013 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3014 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003015 struct drm_crtc *crtc = &intel_crtc->base;
3016 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02003017 struct drm_plane_state *plane_state = crtc->primary->state;
3018 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003019
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02003020 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07003021 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02003022 fb->base.id, plane_state->src_x >> 16,
3023 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07003024 else
3025 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003026 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3027 intel_encoder_info(m, intel_crtc, intel_encoder);
3028}
3029
3030static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3031{
3032 struct drm_display_mode *mode = panel->fixed_mode;
3033
3034 seq_printf(m, "\tfixed mode:\n");
3035 intel_seq_print_mode(m, 2, mode);
3036}
3037
3038static void intel_dp_info(struct seq_file *m,
3039 struct intel_connector *intel_connector)
3040{
3041 struct intel_encoder *intel_encoder = intel_connector->encoder;
3042 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3043
3044 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03003045 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003046 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003047 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03003048
3049 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3050 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003051}
3052
Libin Yang9a148a92016-11-28 20:07:05 +08003053static void intel_dp_mst_info(struct seq_file *m,
3054 struct intel_connector *intel_connector)
3055{
3056 struct intel_encoder *intel_encoder = intel_connector->encoder;
3057 struct intel_dp_mst_encoder *intel_mst =
3058 enc_to_mst(&intel_encoder->base);
3059 struct intel_digital_port *intel_dig_port = intel_mst->primary;
3060 struct intel_dp *intel_dp = &intel_dig_port->dp;
3061 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3062 intel_connector->port);
3063
3064 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3065}
3066
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003067static void intel_hdmi_info(struct seq_file *m,
3068 struct intel_connector *intel_connector)
3069{
3070 struct intel_encoder *intel_encoder = intel_connector->encoder;
3071 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3072
Jani Nikula742f4912015-09-03 11:16:09 +03003073 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003074}
3075
3076static void intel_lvds_info(struct seq_file *m,
3077 struct intel_connector *intel_connector)
3078{
3079 intel_panel_info(m, &intel_connector->panel);
3080}
3081
3082static void intel_connector_info(struct seq_file *m,
3083 struct drm_connector *connector)
3084{
3085 struct intel_connector *intel_connector = to_intel_connector(connector);
3086 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08003087 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003088
3089 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03003090 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003091 drm_get_connector_status_name(connector->status));
3092 if (connector->status == connector_status_connected) {
3093 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3094 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3095 connector->display_info.width_mm,
3096 connector->display_info.height_mm);
3097 seq_printf(m, "\tsubpixel order: %s\n",
3098 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3099 seq_printf(m, "\tCEA rev: %d\n",
3100 connector->display_info.cea_rev);
3101 }
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003102
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02003103 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003104 return;
3105
3106 switch (connector->connector_type) {
3107 case DRM_MODE_CONNECTOR_DisplayPort:
3108 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08003109 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3110 intel_dp_mst_info(m, intel_connector);
3111 else
3112 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003113 break;
3114 case DRM_MODE_CONNECTOR_LVDS:
3115 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10003116 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003117 break;
3118 case DRM_MODE_CONNECTOR_HDMIA:
3119 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03003120 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003121 intel_hdmi_info(m, intel_connector);
3122 break;
3123 default:
3124 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10003125 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003126
Jesse Barnesf103fc72014-02-20 12:39:57 -08003127 seq_printf(m, "\tmodes:\n");
3128 list_for_each_entry(mode, &connector->modes, head)
3129 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003130}
3131
Robert Fekete3abc4e02015-10-27 16:58:32 +01003132static const char *plane_type(enum drm_plane_type type)
3133{
3134 switch (type) {
3135 case DRM_PLANE_TYPE_OVERLAY:
3136 return "OVL";
3137 case DRM_PLANE_TYPE_PRIMARY:
3138 return "PRI";
3139 case DRM_PLANE_TYPE_CURSOR:
3140 return "CUR";
3141 /*
3142 * Deliberately omitting default: to generate compiler warnings
3143 * when a new drm_plane_type gets added.
3144 */
3145 }
3146
3147 return "unknown";
3148}
3149
3150static const char *plane_rotation(unsigned int rotation)
3151{
3152 static char buf[48];
3153 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04003154 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01003155 * will print them all to visualize if the values are misused
3156 */
3157 snprintf(buf, sizeof(buf),
3158 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04003159 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3160 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3161 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3162 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3163 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3164 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01003165 rotation);
3166
3167 return buf;
3168}
3169
3170static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3171{
David Weinehall36cdd012016-08-22 13:59:31 +03003172 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3173 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01003174 struct intel_plane *intel_plane;
3175
3176 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3177 struct drm_plane_state *state;
3178 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003179 struct drm_format_name_buf format_name;
Robert Fekete3abc4e02015-10-27 16:58:32 +01003180
3181 if (!plane->state) {
3182 seq_puts(m, "plane->state is NULL!\n");
3183 continue;
3184 }
3185
3186 state = plane->state;
3187
Eric Engestrom90844f02016-08-15 01:02:38 +01003188 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003189 drm_get_format_name(state->fb->format->format,
3190 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01003191 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003192 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01003193 }
3194
Robert Fekete3abc4e02015-10-27 16:58:32 +01003195 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3196 plane->base.id,
3197 plane_type(intel_plane->base.type),
3198 state->crtc_x, state->crtc_y,
3199 state->crtc_w, state->crtc_h,
3200 (state->src_x >> 16),
3201 ((state->src_x & 0xffff) * 15625) >> 10,
3202 (state->src_y >> 16),
3203 ((state->src_y & 0xffff) * 15625) >> 10,
3204 (state->src_w >> 16),
3205 ((state->src_w & 0xffff) * 15625) >> 10,
3206 (state->src_h >> 16),
3207 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003208 format_name.str,
Robert Fekete3abc4e02015-10-27 16:58:32 +01003209 plane_rotation(state->rotation));
3210 }
3211}
3212
3213static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3214{
3215 struct intel_crtc_state *pipe_config;
3216 int num_scalers = intel_crtc->num_scalers;
3217 int i;
3218
3219 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3220
3221 /* Not all platformas have a scaler */
3222 if (num_scalers) {
3223 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3224 num_scalers,
3225 pipe_config->scaler_state.scaler_users,
3226 pipe_config->scaler_state.scaler_id);
3227
A.Sunil Kamath58415912016-11-20 23:20:26 +05303228 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01003229 struct intel_scaler *sc =
3230 &pipe_config->scaler_state.scalers[i];
3231
3232 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3233 i, yesno(sc->in_use), sc->mode);
3234 }
3235 seq_puts(m, "\n");
3236 } else {
3237 seq_puts(m, "\tNo scalers available on this platform\n");
3238 }
3239}
3240
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003241static int i915_display_info(struct seq_file *m, void *unused)
3242{
David Weinehall36cdd012016-08-22 13:59:31 +03003243 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3244 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec2014-03-12 09:13:13 +00003245 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003246 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003247 struct drm_connector_list_iter conn_iter;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003248
Paulo Zanonib0e5ddf2014-04-01 14:55:10 -03003249 intel_runtime_pm_get(dev_priv);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003250 seq_printf(m, "CRTC info\n");
3251 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01003252 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003253 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003254
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003255 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003256 pipe_config = to_intel_crtc_state(crtc->base.state);
3257
Robert Fekete3abc4e02015-10-27 16:58:32 +01003258 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec2014-03-12 09:13:13 +00003259 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003260 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01003261 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3262 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3263
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003264 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003265 struct intel_plane *cursor =
3266 to_intel_plane(crtc->base.cursor);
3267
Chris Wilson065f2ec2014-03-12 09:13:13 +00003268 intel_crtc_info(m, crtc);
3269
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003270 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3271 yesno(cursor->base.state->visible),
3272 cursor->base.state->crtc_x,
3273 cursor->base.state->crtc_y,
3274 cursor->base.state->crtc_w,
3275 cursor->base.state->crtc_h,
3276 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003277 intel_scaler_info(m, crtc);
3278 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003279 }
Daniel Vettercace8412014-05-22 17:56:31 +02003280
3281 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3282 yesno(!crtc->cpu_fifo_underrun_disabled),
3283 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003284 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003285 }
3286
3287 seq_printf(m, "\n");
3288 seq_printf(m, "Connector info\n");
3289 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003290 mutex_lock(&dev->mode_config.mutex);
3291 drm_connector_list_iter_begin(dev, &conn_iter);
3292 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003293 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003294 drm_connector_list_iter_end(&conn_iter);
3295 mutex_unlock(&dev->mode_config.mutex);
3296
Paulo Zanonib0e5ddf2014-04-01 14:55:10 -03003297 intel_runtime_pm_put(dev_priv);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003298
3299 return 0;
3300}
3301
Chris Wilson1b365952016-10-04 21:11:31 +01003302static int i915_engine_info(struct seq_file *m, void *unused)
3303{
3304 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3305 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05303306 enum intel_engine_id id;
Chris Wilsonf636edb2017-10-09 12:02:57 +01003307 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01003308
Chris Wilson9c870d02016-10-24 13:42:15 +01003309 intel_runtime_pm_get(dev_priv);
3310
Chris Wilson6f561032018-01-24 11:36:07 +00003311 seq_printf(m, "GT awake? %s (epoch %u)\n",
3312 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003313 seq_printf(m, "Global active requests: %d\n",
3314 dev_priv->gt.active_requests);
Lionel Landwerlinf577a032017-11-13 23:34:53 +00003315 seq_printf(m, "CS timestamp frequency: %u kHz\n",
3316 dev_priv->info.cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003317
Chris Wilsonf636edb2017-10-09 12:02:57 +01003318 p = drm_seq_file_printer(m);
3319 for_each_engine(engine, dev_priv, id)
Chris Wilson0db18b12017-12-08 01:23:00 +00003320 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01003321
Chris Wilson9c870d02016-10-24 13:42:15 +01003322 intel_runtime_pm_put(dev_priv);
3323
Chris Wilson1b365952016-10-04 21:11:31 +01003324 return 0;
3325}
3326
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003327static int i915_rcs_topology(struct seq_file *m, void *unused)
3328{
3329 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3330 struct drm_printer p = drm_seq_file_printer(m);
3331
3332 intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3333
3334 return 0;
3335}
3336
Chris Wilsonc5418a82017-10-13 21:26:19 +01003337static int i915_shrinker_info(struct seq_file *m, void *unused)
3338{
3339 struct drm_i915_private *i915 = node_to_i915(m->private);
3340
3341 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3342 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3343
3344 return 0;
3345}
3346
Daniel Vetter728e29d2014-06-25 22:01:53 +03003347static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3348{
David Weinehall36cdd012016-08-22 13:59:31 +03003349 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3350 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003351 int i;
3352
3353 drm_modeset_lock_all(dev);
3354 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3355 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3356
Lucas De Marchi72f775f2018-03-20 15:06:34 -07003357 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07003358 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003359 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003360 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003361 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003362 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003363 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003364 pll->state.hw_state.dpll_md);
3365 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3366 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3367 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003368 }
3369 drm_modeset_unlock_all(dev);
3370
3371 return 0;
3372}
3373
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003374static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003375{
David Weinehall36cdd012016-08-22 13:59:31 +03003376 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Arun Siluvery33136b02016-01-21 21:43:47 +00003377 struct i915_workarounds *workarounds = &dev_priv->workarounds;
Chris Wilsonf4ecfbf2018-04-14 13:27:54 +01003378 int i;
Arun Siluvery888b5992014-08-26 14:44:51 +01003379
3380 intel_runtime_pm_get(dev_priv);
3381
Arun Siluvery33136b02016-01-21 21:43:47 +00003382 seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
Arun Siluvery33136b02016-01-21 21:43:47 +00003383 for (i = 0; i < workarounds->count; ++i) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003384 i915_reg_t addr;
3385 u32 mask, value, read;
Mika Kuoppala2fa60f62014-10-07 17:21:27 +03003386 bool ok;
Arun Siluvery888b5992014-08-26 14:44:51 +01003387
Arun Siluvery33136b02016-01-21 21:43:47 +00003388 addr = workarounds->reg[i].addr;
3389 mask = workarounds->reg[i].mask;
3390 value = workarounds->reg[i].value;
Mika Kuoppala2fa60f62014-10-07 17:21:27 +03003391 read = I915_READ(addr);
3392 ok = (value & mask) == (read & mask);
3393 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003394 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
Arun Siluvery888b5992014-08-26 14:44:51 +01003395 }
3396
3397 intel_runtime_pm_put(dev_priv);
Arun Siluvery888b5992014-08-26 14:44:51 +01003398
3399 return 0;
3400}
3401
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303402static int i915_ipc_status_show(struct seq_file *m, void *data)
3403{
3404 struct drm_i915_private *dev_priv = m->private;
3405
3406 seq_printf(m, "Isochronous Priority Control: %s\n",
3407 yesno(dev_priv->ipc_enabled));
3408 return 0;
3409}
3410
3411static int i915_ipc_status_open(struct inode *inode, struct file *file)
3412{
3413 struct drm_i915_private *dev_priv = inode->i_private;
3414
3415 if (!HAS_IPC(dev_priv))
3416 return -ENODEV;
3417
3418 return single_open(file, i915_ipc_status_show, dev_priv);
3419}
3420
3421static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3422 size_t len, loff_t *offp)
3423{
3424 struct seq_file *m = file->private_data;
3425 struct drm_i915_private *dev_priv = m->private;
3426 int ret;
3427 bool enable;
3428
3429 ret = kstrtobool_from_user(ubuf, len, &enable);
3430 if (ret < 0)
3431 return ret;
3432
3433 intel_runtime_pm_get(dev_priv);
3434 if (!dev_priv->ipc_enabled && enable)
3435 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3436 dev_priv->wm.distrust_bios_wm = true;
3437 dev_priv->ipc_enabled = enable;
3438 intel_enable_ipc(dev_priv);
3439 intel_runtime_pm_put(dev_priv);
3440
3441 return len;
3442}
3443
3444static const struct file_operations i915_ipc_status_fops = {
3445 .owner = THIS_MODULE,
3446 .open = i915_ipc_status_open,
3447 .read = seq_read,
3448 .llseek = seq_lseek,
3449 .release = single_release,
3450 .write = i915_ipc_status_write
3451};
3452
Damien Lespiauc5511e42014-11-04 17:06:51 +00003453static int i915_ddb_info(struct seq_file *m, void *unused)
3454{
David Weinehall36cdd012016-08-22 13:59:31 +03003455 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3456 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003457 struct skl_ddb_allocation *ddb;
3458 struct skl_ddb_entry *entry;
3459 enum pipe pipe;
3460 int plane;
3461
David Weinehall36cdd012016-08-22 13:59:31 +03003462 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00003463 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003464
Damien Lespiauc5511e42014-11-04 17:06:51 +00003465 drm_modeset_lock_all(dev);
3466
3467 ddb = &dev_priv->wm.skl_hw.ddb;
3468
3469 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3470
3471 for_each_pipe(dev_priv, pipe) {
3472 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3473
Matt Roper8b364b42016-10-26 15:51:28 -07003474 for_each_universal_plane(dev_priv, pipe, plane) {
Damien Lespiauc5511e42014-11-04 17:06:51 +00003475 entry = &ddb->plane[pipe][plane];
3476 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
3477 entry->start, entry->end,
3478 skl_ddb_entry_size(entry));
3479 }
3480
Matt Roper4969d332015-09-24 15:53:10 -07003481 entry = &ddb->plane[pipe][PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003482 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3483 entry->end, skl_ddb_entry_size(entry));
3484 }
3485
3486 drm_modeset_unlock_all(dev);
3487
3488 return 0;
3489}
3490
Vandana Kannana54746e2015-03-03 20:53:10 +05303491static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003492 struct drm_device *dev,
3493 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303494{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003495 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303496 struct i915_drrs *drrs = &dev_priv->drrs;
3497 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003498 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003499 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303500
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003501 drm_connector_list_iter_begin(dev, &conn_iter);
3502 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003503 if (connector->state->crtc != &intel_crtc->base)
3504 continue;
3505
3506 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303507 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003508 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303509
3510 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3511 seq_puts(m, "\tVBT: DRRS_type: Static");
3512 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3513 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3514 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3515 seq_puts(m, "\tVBT: DRRS_type: None");
3516 else
3517 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3518
3519 seq_puts(m, "\n\n");
3520
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003521 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303522 struct intel_panel *panel;
3523
3524 mutex_lock(&drrs->mutex);
3525 /* DRRS Supported */
3526 seq_puts(m, "\tDRRS Supported: Yes\n");
3527
3528 /* disable_drrs() will make drrs->dp NULL */
3529 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303530 seq_puts(m, "Idleness DRRS: Disabled\n");
3531 if (dev_priv->psr.enabled)
3532 seq_puts(m,
3533 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303534 mutex_unlock(&drrs->mutex);
3535 return;
3536 }
3537
3538 panel = &drrs->dp->attached_connector->panel;
3539 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3540 drrs->busy_frontbuffer_bits);
3541
3542 seq_puts(m, "\n\t\t");
3543 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3544 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3545 vrefresh = panel->fixed_mode->vrefresh;
3546 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3547 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3548 vrefresh = panel->downclock_mode->vrefresh;
3549 } else {
3550 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3551 drrs->refresh_rate_type);
3552 mutex_unlock(&drrs->mutex);
3553 return;
3554 }
3555 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3556
3557 seq_puts(m, "\n\t\t");
3558 mutex_unlock(&drrs->mutex);
3559 } else {
3560 /* DRRS not supported. Print the VBT parameter*/
3561 seq_puts(m, "\tDRRS Supported : No");
3562 }
3563 seq_puts(m, "\n");
3564}
3565
3566static int i915_drrs_status(struct seq_file *m, void *unused)
3567{
David Weinehall36cdd012016-08-22 13:59:31 +03003568 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3569 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303570 struct intel_crtc *intel_crtc;
3571 int active_crtc_cnt = 0;
3572
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003573 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303574 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003575 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303576 active_crtc_cnt++;
3577 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3578
3579 drrs_status_per_crtc(m, dev, intel_crtc);
3580 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303581 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003582 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303583
3584 if (!active_crtc_cnt)
3585 seq_puts(m, "No active crtc found\n");
3586
3587 return 0;
3588}
3589
Dave Airlie11bed952014-05-12 15:22:27 +10003590static int i915_dp_mst_info(struct seq_file *m, void *unused)
3591{
David Weinehall36cdd012016-08-22 13:59:31 +03003592 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3593 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003594 struct intel_encoder *intel_encoder;
3595 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003596 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003597 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003598
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003599 drm_connector_list_iter_begin(dev, &conn_iter);
3600 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003601 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003602 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003603
3604 intel_encoder = intel_attached_encoder(connector);
3605 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3606 continue;
3607
3608 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003609 if (!intel_dig_port->dp.can_mst)
3610 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003611
Jim Bride40ae80c2016-04-14 10:18:37 -07003612 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003613 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003614 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3615 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003616 drm_connector_list_iter_end(&conn_iter);
3617
Dave Airlie11bed952014-05-12 15:22:27 +10003618 return 0;
3619}
3620
Todd Previteeb3394fa2015-04-18 00:04:19 -07003621static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003622 const char __user *ubuf,
3623 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003624{
3625 char *input_buffer;
3626 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003627 struct drm_device *dev;
3628 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003629 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003630 struct intel_dp *intel_dp;
3631 int val = 0;
3632
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303633 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003634
Todd Previteeb3394fa2015-04-18 00:04:19 -07003635 if (len == 0)
3636 return 0;
3637
Geliang Tang261aeba2017-05-06 23:40:17 +08003638 input_buffer = memdup_user_nul(ubuf, len);
3639 if (IS_ERR(input_buffer))
3640 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003641
Todd Previteeb3394fa2015-04-18 00:04:19 -07003642 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3643
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003644 drm_connector_list_iter_begin(dev, &conn_iter);
3645 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003646 struct intel_encoder *encoder;
3647
Todd Previteeb3394fa2015-04-18 00:04:19 -07003648 if (connector->connector_type !=
3649 DRM_MODE_CONNECTOR_DisplayPort)
3650 continue;
3651
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003652 encoder = to_intel_encoder(connector->encoder);
3653 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3654 continue;
3655
3656 if (encoder && connector->status == connector_status_connected) {
3657 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003658 status = kstrtoint(input_buffer, 10, &val);
3659 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003660 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003661 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3662 /* To prevent erroneous activation of the compliance
3663 * testing code, only accept an actual value of 1 here
3664 */
3665 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003666 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003667 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003668 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003669 }
3670 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003671 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003672 kfree(input_buffer);
3673 if (status < 0)
3674 return status;
3675
3676 *offp += len;
3677 return len;
3678}
3679
3680static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3681{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003682 struct drm_i915_private *dev_priv = m->private;
3683 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003684 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003685 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003686 struct intel_dp *intel_dp;
3687
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003688 drm_connector_list_iter_begin(dev, &conn_iter);
3689 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003690 struct intel_encoder *encoder;
3691
Todd Previteeb3394fa2015-04-18 00:04:19 -07003692 if (connector->connector_type !=
3693 DRM_MODE_CONNECTOR_DisplayPort)
3694 continue;
3695
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003696 encoder = to_intel_encoder(connector->encoder);
3697 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3698 continue;
3699
3700 if (encoder && connector->status == connector_status_connected) {
3701 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003702 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003703 seq_puts(m, "1");
3704 else
3705 seq_puts(m, "0");
3706 } else
3707 seq_puts(m, "0");
3708 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003709 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003710
3711 return 0;
3712}
3713
3714static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003715 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003716{
David Weinehall36cdd012016-08-22 13:59:31 +03003717 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003718 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003719}
3720
3721static const struct file_operations i915_displayport_test_active_fops = {
3722 .owner = THIS_MODULE,
3723 .open = i915_displayport_test_active_open,
3724 .read = seq_read,
3725 .llseek = seq_lseek,
3726 .release = single_release,
3727 .write = i915_displayport_test_active_write
3728};
3729
3730static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3731{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003732 struct drm_i915_private *dev_priv = m->private;
3733 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003734 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003735 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003736 struct intel_dp *intel_dp;
3737
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003738 drm_connector_list_iter_begin(dev, &conn_iter);
3739 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003740 struct intel_encoder *encoder;
3741
Todd Previteeb3394fa2015-04-18 00:04:19 -07003742 if (connector->connector_type !=
3743 DRM_MODE_CONNECTOR_DisplayPort)
3744 continue;
3745
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003746 encoder = to_intel_encoder(connector->encoder);
3747 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3748 continue;
3749
3750 if (encoder && connector->status == connector_status_connected) {
3751 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003752 if (intel_dp->compliance.test_type ==
3753 DP_TEST_LINK_EDID_READ)
3754 seq_printf(m, "%lx",
3755 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003756 else if (intel_dp->compliance.test_type ==
3757 DP_TEST_LINK_VIDEO_PATTERN) {
3758 seq_printf(m, "hdisplay: %d\n",
3759 intel_dp->compliance.test_data.hdisplay);
3760 seq_printf(m, "vdisplay: %d\n",
3761 intel_dp->compliance.test_data.vdisplay);
3762 seq_printf(m, "bpc: %u\n",
3763 intel_dp->compliance.test_data.bpc);
3764 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003765 } else
3766 seq_puts(m, "0");
3767 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003768 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003769
3770 return 0;
3771}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003772DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003773
3774static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3775{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003776 struct drm_i915_private *dev_priv = m->private;
3777 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003778 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003779 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003780 struct intel_dp *intel_dp;
3781
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003782 drm_connector_list_iter_begin(dev, &conn_iter);
3783 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003784 struct intel_encoder *encoder;
3785
Todd Previteeb3394fa2015-04-18 00:04:19 -07003786 if (connector->connector_type !=
3787 DRM_MODE_CONNECTOR_DisplayPort)
3788 continue;
3789
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003790 encoder = to_intel_encoder(connector->encoder);
3791 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3792 continue;
3793
3794 if (encoder && connector->status == connector_status_connected) {
3795 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003796 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003797 } else
3798 seq_puts(m, "0");
3799 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003800 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003801
3802 return 0;
3803}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003804DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003805
Damien Lespiau97e94b22014-11-04 17:06:50 +00003806static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003807{
David Weinehall36cdd012016-08-22 13:59:31 +03003808 struct drm_i915_private *dev_priv = m->private;
3809 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003810 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003811 int num_levels;
3812
David Weinehall36cdd012016-08-22 13:59:31 +03003813 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003814 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003815 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003816 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003817 else if (IS_G4X(dev_priv))
3818 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003819 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003820 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003821
3822 drm_modeset_lock_all(dev);
3823
3824 for (level = 0; level < num_levels; level++) {
3825 unsigned int latency = wm[level];
3826
Damien Lespiau97e94b22014-11-04 17:06:50 +00003827 /*
3828 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003829 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003830 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003831 if (INTEL_GEN(dev_priv) >= 9 ||
3832 IS_VALLEYVIEW(dev_priv) ||
3833 IS_CHERRYVIEW(dev_priv) ||
3834 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003835 latency *= 10;
3836 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003837 latency *= 5;
3838
3839 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003840 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003841 }
3842
3843 drm_modeset_unlock_all(dev);
3844}
3845
3846static int pri_wm_latency_show(struct seq_file *m, void *data)
3847{
David Weinehall36cdd012016-08-22 13:59:31 +03003848 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003849 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003850
David Weinehall36cdd012016-08-22 13:59:31 +03003851 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003852 latencies = dev_priv->wm.skl_latency;
3853 else
David Weinehall36cdd012016-08-22 13:59:31 +03003854 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003855
3856 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003857
3858 return 0;
3859}
3860
3861static int spr_wm_latency_show(struct seq_file *m, void *data)
3862{
David Weinehall36cdd012016-08-22 13:59:31 +03003863 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003864 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003865
David Weinehall36cdd012016-08-22 13:59:31 +03003866 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003867 latencies = dev_priv->wm.skl_latency;
3868 else
David Weinehall36cdd012016-08-22 13:59:31 +03003869 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003870
3871 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003872
3873 return 0;
3874}
3875
3876static int cur_wm_latency_show(struct seq_file *m, void *data)
3877{
David Weinehall36cdd012016-08-22 13:59:31 +03003878 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003879 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003880
David Weinehall36cdd012016-08-22 13:59:31 +03003881 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003882 latencies = dev_priv->wm.skl_latency;
3883 else
David Weinehall36cdd012016-08-22 13:59:31 +03003884 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003885
3886 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003887
3888 return 0;
3889}
3890
3891static int pri_wm_latency_open(struct inode *inode, struct file *file)
3892{
David Weinehall36cdd012016-08-22 13:59:31 +03003893 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003894
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003895 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003896 return -ENODEV;
3897
David Weinehall36cdd012016-08-22 13:59:31 +03003898 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003899}
3900
3901static int spr_wm_latency_open(struct inode *inode, struct file *file)
3902{
David Weinehall36cdd012016-08-22 13:59:31 +03003903 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003904
David Weinehall36cdd012016-08-22 13:59:31 +03003905 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003906 return -ENODEV;
3907
David Weinehall36cdd012016-08-22 13:59:31 +03003908 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003909}
3910
3911static int cur_wm_latency_open(struct inode *inode, struct file *file)
3912{
David Weinehall36cdd012016-08-22 13:59:31 +03003913 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003914
David Weinehall36cdd012016-08-22 13:59:31 +03003915 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003916 return -ENODEV;
3917
David Weinehall36cdd012016-08-22 13:59:31 +03003918 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003919}
3920
3921static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Damien Lespiau97e94b22014-11-04 17:06:50 +00003922 size_t len, loff_t *offp, uint16_t wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003923{
3924 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003925 struct drm_i915_private *dev_priv = m->private;
3926 struct drm_device *dev = &dev_priv->drm;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003927 uint16_t new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003928 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003929 int level;
3930 int ret;
3931 char tmp[32];
3932
David Weinehall36cdd012016-08-22 13:59:31 +03003933 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003934 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003935 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003936 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003937 else if (IS_G4X(dev_priv))
3938 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003939 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003940 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003941
Ville Syrjälä369a1342014-01-22 14:36:08 +02003942 if (len >= sizeof(tmp))
3943 return -EINVAL;
3944
3945 if (copy_from_user(tmp, ubuf, len))
3946 return -EFAULT;
3947
3948 tmp[len] = '\0';
3949
Damien Lespiau97e94b22014-11-04 17:06:50 +00003950 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3951 &new[0], &new[1], &new[2], &new[3],
3952 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003953 if (ret != num_levels)
3954 return -EINVAL;
3955
3956 drm_modeset_lock_all(dev);
3957
3958 for (level = 0; level < num_levels; level++)
3959 wm[level] = new[level];
3960
3961 drm_modeset_unlock_all(dev);
3962
3963 return len;
3964}
3965
3966
3967static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3968 size_t len, loff_t *offp)
3969{
3970 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003971 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003972 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003973
David Weinehall36cdd012016-08-22 13:59:31 +03003974 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003975 latencies = dev_priv->wm.skl_latency;
3976 else
David Weinehall36cdd012016-08-22 13:59:31 +03003977 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003978
3979 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003980}
3981
3982static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3983 size_t len, loff_t *offp)
3984{
3985 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003986 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003987 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003988
David Weinehall36cdd012016-08-22 13:59:31 +03003989 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003990 latencies = dev_priv->wm.skl_latency;
3991 else
David Weinehall36cdd012016-08-22 13:59:31 +03003992 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003993
3994 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003995}
3996
3997static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3998 size_t len, loff_t *offp)
3999{
4000 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03004001 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00004002 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02004003
David Weinehall36cdd012016-08-22 13:59:31 +03004004 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00004005 latencies = dev_priv->wm.skl_latency;
4006 else
David Weinehall36cdd012016-08-22 13:59:31 +03004007 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00004008
4009 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02004010}
4011
4012static const struct file_operations i915_pri_wm_latency_fops = {
4013 .owner = THIS_MODULE,
4014 .open = pri_wm_latency_open,
4015 .read = seq_read,
4016 .llseek = seq_lseek,
4017 .release = single_release,
4018 .write = pri_wm_latency_write
4019};
4020
4021static const struct file_operations i915_spr_wm_latency_fops = {
4022 .owner = THIS_MODULE,
4023 .open = spr_wm_latency_open,
4024 .read = seq_read,
4025 .llseek = seq_lseek,
4026 .release = single_release,
4027 .write = spr_wm_latency_write
4028};
4029
4030static const struct file_operations i915_cur_wm_latency_fops = {
4031 .owner = THIS_MODULE,
4032 .open = cur_wm_latency_open,
4033 .read = seq_read,
4034 .llseek = seq_lseek,
4035 .release = single_release,
4036 .write = cur_wm_latency_write
4037};
4038
Kees Cook647416f2013-03-10 14:10:06 -07004039static int
4040i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004041{
David Weinehall36cdd012016-08-22 13:59:31 +03004042 struct drm_i915_private *dev_priv = data;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004043
Chris Wilsond98c52c2016-04-13 17:35:05 +01004044 *val = i915_terminally_wedged(&dev_priv->gpu_error);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004045
Kees Cook647416f2013-03-10 14:10:06 -07004046 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004047}
4048
Kees Cook647416f2013-03-10 14:10:06 -07004049static int
4050i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004051{
Chris Wilson598b6b52017-03-25 13:47:35 +00004052 struct drm_i915_private *i915 = data;
4053 struct intel_engine_cs *engine;
4054 unsigned int tmp;
Imre Deakd46c0512014-04-14 20:24:27 +03004055
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02004056 /*
4057 * There is no safeguard against this debugfs entry colliding
4058 * with the hangcheck calling same i915_handle_error() in
4059 * parallel, causing an explosion. For now we assume that the
4060 * test harness is responsible enough not to inject gpu hangs
4061 * while it is writing to 'i915_wedged'
4062 */
4063
Chris Wilson598b6b52017-03-25 13:47:35 +00004064 if (i915_reset_backoff(&i915->gpu_error))
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02004065 return -EAGAIN;
4066
Chris Wilson598b6b52017-03-25 13:47:35 +00004067 for_each_engine_masked(engine, i915, val, tmp) {
4068 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4069 engine->hangcheck.stalled = true;
4070 }
Imre Deakd46c0512014-04-14 20:24:27 +03004071
Chris Wilsonce800752018-03-20 10:04:49 +00004072 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4073 "Manually set wedged engine mask = %llx", val);
Chris Wilson598b6b52017-03-25 13:47:35 +00004074
4075 wait_on_bit(&i915->gpu_error.flags,
Chris Wilsond3df42b2017-03-16 17:13:05 +00004076 I915_RESET_HANDOFF,
4077 TASK_UNINTERRUPTIBLE);
4078
Kees Cook647416f2013-03-10 14:10:06 -07004079 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004080}
4081
Kees Cook647416f2013-03-10 14:10:06 -07004082DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4083 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03004084 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004085
Kees Cook647416f2013-03-10 14:10:06 -07004086static int
Chris Wilson64486ae2017-03-07 15:59:08 +00004087fault_irq_set(struct drm_i915_private *i915,
4088 unsigned long *irq,
4089 unsigned long val)
4090{
4091 int err;
4092
4093 err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4094 if (err)
4095 return err;
4096
4097 err = i915_gem_wait_for_idle(i915,
4098 I915_WAIT_LOCKED |
4099 I915_WAIT_INTERRUPTIBLE);
4100 if (err)
4101 goto err_unlock;
4102
Chris Wilson64486ae2017-03-07 15:59:08 +00004103 *irq = val;
4104 mutex_unlock(&i915->drm.struct_mutex);
4105
4106 /* Flush idle worker to disarm irq */
Chris Wilson7c262402017-10-06 11:40:38 +01004107 drain_delayed_work(&i915->gt.idle_work);
Chris Wilson64486ae2017-03-07 15:59:08 +00004108
4109 return 0;
4110
4111err_unlock:
4112 mutex_unlock(&i915->drm.struct_mutex);
4113 return err;
4114}
4115
4116static int
Chris Wilson094f9a52013-09-25 17:34:55 +01004117i915_ring_missed_irq_get(void *data, u64 *val)
4118{
David Weinehall36cdd012016-08-22 13:59:31 +03004119 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004120
4121 *val = dev_priv->gpu_error.missed_irq_rings;
4122 return 0;
4123}
4124
4125static int
4126i915_ring_missed_irq_set(void *data, u64 val)
4127{
Chris Wilson64486ae2017-03-07 15:59:08 +00004128 struct drm_i915_private *i915 = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004129
Chris Wilson64486ae2017-03-07 15:59:08 +00004130 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004131}
4132
4133DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4134 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4135 "0x%08llx\n");
4136
4137static int
4138i915_ring_test_irq_get(void *data, u64 *val)
4139{
David Weinehall36cdd012016-08-22 13:59:31 +03004140 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004141
4142 *val = dev_priv->gpu_error.test_irq_rings;
4143
4144 return 0;
4145}
4146
4147static int
4148i915_ring_test_irq_set(void *data, u64 val)
4149{
Chris Wilson64486ae2017-03-07 15:59:08 +00004150 struct drm_i915_private *i915 = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004151
Chris Wilson64486ae2017-03-07 15:59:08 +00004152 val &= INTEL_INFO(i915)->ring_mask;
Chris Wilson094f9a52013-09-25 17:34:55 +01004153 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004154
Chris Wilson64486ae2017-03-07 15:59:08 +00004155 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004156}
4157
4158DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4159 i915_ring_test_irq_get, i915_ring_test_irq_set,
4160 "0x%08llx\n");
4161
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004162#define DROP_UNBOUND BIT(0)
4163#define DROP_BOUND BIT(1)
4164#define DROP_RETIRE BIT(2)
4165#define DROP_ACTIVE BIT(3)
4166#define DROP_FREED BIT(4)
4167#define DROP_SHRINK_ALL BIT(5)
4168#define DROP_IDLE BIT(6)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004169#define DROP_ALL (DROP_UNBOUND | \
4170 DROP_BOUND | \
4171 DROP_RETIRE | \
4172 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00004173 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004174 DROP_SHRINK_ALL |\
4175 DROP_IDLE)
Kees Cook647416f2013-03-10 14:10:06 -07004176static int
4177i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004178{
Kees Cook647416f2013-03-10 14:10:06 -07004179 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00004180
Kees Cook647416f2013-03-10 14:10:06 -07004181 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004182}
4183
Kees Cook647416f2013-03-10 14:10:06 -07004184static int
4185i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004186{
David Weinehall36cdd012016-08-22 13:59:31 +03004187 struct drm_i915_private *dev_priv = data;
4188 struct drm_device *dev = &dev_priv->drm;
Chris Wilson00c26cf2017-05-24 17:26:53 +01004189 int ret = 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004190
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004191 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4192 val, val & DROP_ALL);
Chris Wilsondd624af2013-01-15 12:39:35 +00004193
4194 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4195 * on ioctls on -EAGAIN. */
Chris Wilson00c26cf2017-05-24 17:26:53 +01004196 if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4197 ret = mutex_lock_interruptible(&dev->struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00004198 if (ret)
Chris Wilson00c26cf2017-05-24 17:26:53 +01004199 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00004200
Chris Wilson00c26cf2017-05-24 17:26:53 +01004201 if (val & DROP_ACTIVE)
4202 ret = i915_gem_wait_for_idle(dev_priv,
4203 I915_WAIT_INTERRUPTIBLE |
4204 I915_WAIT_LOCKED);
4205
4206 if (val & DROP_RETIRE)
Chris Wilsone61e0f52018-02-21 09:56:36 +00004207 i915_retire_requests(dev_priv);
Chris Wilson00c26cf2017-05-24 17:26:53 +01004208
4209 mutex_unlock(&dev->struct_mutex);
4210 }
Chris Wilsondd624af2013-01-15 12:39:35 +00004211
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004212 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01004213 if (val & DROP_BOUND)
Chris Wilson912d5722017-09-06 16:19:30 -07004214 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01004215
Chris Wilson21ab4e72014-09-09 11:16:08 +01004216 if (val & DROP_UNBOUND)
Chris Wilson912d5722017-09-06 16:19:30 -07004217 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00004218
Chris Wilson8eadc192017-03-08 14:46:22 +00004219 if (val & DROP_SHRINK_ALL)
4220 i915_gem_shrink_all(dev_priv);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004221 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00004222
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004223 if (val & DROP_IDLE)
4224 drain_delayed_work(&dev_priv->gt.idle_work);
4225
Chris Wilsonc9c70472018-02-19 22:06:31 +00004226 if (val & DROP_FREED)
Chris Wilsonbdeb9782016-12-23 14:57:56 +00004227 i915_gem_drain_freed_objects(dev_priv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004228
Kees Cook647416f2013-03-10 14:10:06 -07004229 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00004230}
4231
Kees Cook647416f2013-03-10 14:10:06 -07004232DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4233 i915_drop_caches_get, i915_drop_caches_set,
4234 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00004235
Kees Cook647416f2013-03-10 14:10:06 -07004236static int
Kees Cook647416f2013-03-10 14:10:06 -07004237i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004238{
David Weinehall36cdd012016-08-22 13:59:31 +03004239 struct drm_i915_private *dev_priv = data;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004240 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004241
David Weinehall36cdd012016-08-22 13:59:31 +03004242 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004243 return -ENODEV;
4244
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004245 intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02004246
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004247 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004248
4249 intel_runtime_pm_put(dev_priv);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004250
Kees Cook647416f2013-03-10 14:10:06 -07004251 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004252
Kees Cook647416f2013-03-10 14:10:06 -07004253 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004254}
4255
Kees Cook647416f2013-03-10 14:10:06 -07004256static int
4257i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004258{
David Weinehall36cdd012016-08-22 13:59:31 +03004259 struct drm_i915_private *dev_priv = data;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004260 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004261
David Weinehall36cdd012016-08-22 13:59:31 +03004262 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004263 return -ENODEV;
4264
Kees Cook647416f2013-03-10 14:10:06 -07004265 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004266 return -EINVAL;
4267
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004268 intel_runtime_pm_get(dev_priv);
Kees Cook647416f2013-03-10 14:10:06 -07004269 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004270
4271 /* Update the cache sharing policy here as well */
4272 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4273 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4274 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4275 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4276
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004277 intel_runtime_pm_put(dev_priv);
Kees Cook647416f2013-03-10 14:10:06 -07004278 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004279}
4280
Kees Cook647416f2013-03-10 14:10:06 -07004281DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4282 i915_cache_sharing_get, i915_cache_sharing_set,
4283 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004284
David Weinehall36cdd012016-08-22 13:59:31 +03004285static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004286 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004287{
Chris Wilson7aa0b142018-03-13 00:40:54 +00004288#define SS_MAX 2
4289 const int ss_max = SS_MAX;
4290 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07004291 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004292
4293 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4294 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4295 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4296 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4297
4298 for (ss = 0; ss < ss_max; ss++) {
4299 unsigned int eu_cnt;
4300
4301 if (sig1[ss] & CHV_SS_PG_ENABLE)
4302 /* skip disabled subslice */
4303 continue;
4304
Imre Deakf08a0c92016-08-31 19:13:04 +03004305 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004306 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07004307 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4308 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4309 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4310 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03004311 sseu->eu_total += eu_cnt;
4312 sseu->eu_per_subslice = max_t(unsigned int,
4313 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004314 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00004315#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004316}
4317
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004318static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4319 struct sseu_dev_info *sseu)
4320{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004321#define SS_MAX 6
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004322 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004323 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004324 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004325
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004326 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004327 /*
4328 * FIXME: Valid SS Mask respects the spec and read
4329 * only valid bits for those registers, excluding reserverd
4330 * although this seems wrong because it would leave many
4331 * subslices without ACK.
4332 */
4333 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4334 GEN10_PGCTL_VALID_SS_MASK(s);
4335 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4336 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4337 }
4338
4339 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4340 GEN9_PGCTL_SSA_EU19_ACK |
4341 GEN9_PGCTL_SSA_EU210_ACK |
4342 GEN9_PGCTL_SSA_EU311_ACK;
4343 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4344 GEN9_PGCTL_SSB_EU19_ACK |
4345 GEN9_PGCTL_SSB_EU210_ACK |
4346 GEN9_PGCTL_SSB_EU311_ACK;
4347
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004348 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004349 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4350 /* skip disabled slice */
4351 continue;
4352
4353 sseu->slice_mask |= BIT(s);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004354 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004355
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004356 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004357 unsigned int eu_cnt;
4358
4359 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4360 /* skip disabled subslice */
4361 continue;
4362
4363 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4364 eu_mask[ss % 2]);
4365 sseu->eu_total += eu_cnt;
4366 sseu->eu_per_subslice = max_t(unsigned int,
4367 sseu->eu_per_subslice,
4368 eu_cnt);
4369 }
4370 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004371#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004372}
4373
David Weinehall36cdd012016-08-22 13:59:31 +03004374static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004375 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004376{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004377#define SS_MAX 3
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004378 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004379 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07004380 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004381
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004382 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004383 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4384 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4385 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4386 }
4387
Jeff McGee5d395252015-04-03 18:13:17 -07004388 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4389 GEN9_PGCTL_SSA_EU19_ACK |
4390 GEN9_PGCTL_SSA_EU210_ACK |
4391 GEN9_PGCTL_SSA_EU311_ACK;
4392 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4393 GEN9_PGCTL_SSB_EU19_ACK |
4394 GEN9_PGCTL_SSB_EU210_ACK |
4395 GEN9_PGCTL_SSB_EU311_ACK;
4396
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004397 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004398 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4399 /* skip disabled slice */
4400 continue;
4401
Imre Deakf08a0c92016-08-31 19:13:04 +03004402 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004403
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004404 if (IS_GEN9_BC(dev_priv))
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004405 sseu->subslice_mask[s] =
4406 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07004407
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004408 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004409 unsigned int eu_cnt;
4410
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004411 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004412 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4413 /* skip disabled subslice */
4414 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004415
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004416 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03004417 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004418
Jeff McGee5d395252015-04-03 18:13:17 -07004419 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4420 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004421 sseu->eu_total += eu_cnt;
4422 sseu->eu_per_subslice = max_t(unsigned int,
4423 sseu->eu_per_subslice,
4424 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004425 }
4426 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004427#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004428}
4429
David Weinehall36cdd012016-08-22 13:59:31 +03004430static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004431 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004432{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004433 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004434 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004435
Imre Deakf08a0c92016-08-31 19:13:04 +03004436 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004437
Imre Deakf08a0c92016-08-31 19:13:04 +03004438 if (sseu->slice_mask) {
Imre Deak43b67992016-08-31 19:13:02 +03004439 sseu->eu_per_subslice =
4440 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004441 for (s = 0; s < fls(sseu->slice_mask); s++) {
4442 sseu->subslice_mask[s] =
4443 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4444 }
Imre Deak57ec1712016-08-31 19:13:05 +03004445 sseu->eu_total = sseu->eu_per_subslice *
4446 sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004447
4448 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004449 for (s = 0; s < fls(sseu->slice_mask); s++) {
Imre Deak43b67992016-08-31 19:13:02 +03004450 u8 subslice_7eu =
4451 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004452
Imre Deak915490d2016-08-31 19:13:01 +03004453 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004454 }
4455 }
4456}
4457
Imre Deak615d8902016-08-31 19:13:03 +03004458static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4459 const struct sseu_dev_info *sseu)
4460{
4461 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4462 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004463 int s;
Imre Deak615d8902016-08-31 19:13:03 +03004464
Imre Deakc67ba532016-08-31 19:13:06 +03004465 seq_printf(m, " %s Slice Mask: %04x\n", type,
4466 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004467 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004468 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004469 seq_printf(m, " %s Subslice Total: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004470 sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004471 for (s = 0; s < fls(sseu->slice_mask); s++) {
4472 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4473 s, hweight8(sseu->subslice_mask[s]));
4474 }
Imre Deak615d8902016-08-31 19:13:03 +03004475 seq_printf(m, " %s EU Total: %u\n", type,
4476 sseu->eu_total);
4477 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4478 sseu->eu_per_subslice);
4479
4480 if (!is_available_info)
4481 return;
4482
4483 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4484 if (HAS_POOLED_EU(dev_priv))
4485 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4486
4487 seq_printf(m, " Has Slice Power Gating: %s\n",
4488 yesno(sseu->has_slice_pg));
4489 seq_printf(m, " Has Subslice Power Gating: %s\n",
4490 yesno(sseu->has_subslice_pg));
4491 seq_printf(m, " Has EU Power Gating: %s\n",
4492 yesno(sseu->has_eu_pg));
4493}
4494
Jeff McGee38732182015-02-13 10:27:54 -06004495static int i915_sseu_status(struct seq_file *m, void *unused)
4496{
David Weinehall36cdd012016-08-22 13:59:31 +03004497 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004498 struct sseu_dev_info sseu;
Jeff McGee38732182015-02-13 10:27:54 -06004499
David Weinehall36cdd012016-08-22 13:59:31 +03004500 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004501 return -ENODEV;
4502
4503 seq_puts(m, "SSEU Device Info\n");
Imre Deak615d8902016-08-31 19:13:03 +03004504 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004505
Jeff McGee7f992ab2015-02-13 10:27:55 -06004506 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004507 memset(&sseu, 0, sizeof(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004508 sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4509 sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4510 sseu.max_eus_per_subslice =
4511 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03004512
4513 intel_runtime_pm_get(dev_priv);
4514
David Weinehall36cdd012016-08-22 13:59:31 +03004515 if (IS_CHERRYVIEW(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004516 cherryview_sseu_device_status(dev_priv, &sseu);
David Weinehall36cdd012016-08-22 13:59:31 +03004517 } else if (IS_BROADWELL(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004518 broadwell_sseu_device_status(dev_priv, &sseu);
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004519 } else if (IS_GEN9(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004520 gen9_sseu_device_status(dev_priv, &sseu);
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004521 } else if (INTEL_GEN(dev_priv) >= 10) {
4522 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004523 }
David Weinehall238010e2016-08-01 17:33:27 +03004524
4525 intel_runtime_pm_put(dev_priv);
4526
Imre Deak615d8902016-08-31 19:13:03 +03004527 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004528
Jeff McGee38732182015-02-13 10:27:54 -06004529 return 0;
4530}
4531
Ben Widawsky6d794d42011-04-25 11:25:56 -07004532static int i915_forcewake_open(struct inode *inode, struct file *file)
4533{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004534 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004535
Chris Wilsond7a133d2017-09-07 14:44:41 +01004536 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004537 return 0;
4538
Chris Wilsond7a133d2017-09-07 14:44:41 +01004539 intel_runtime_pm_get(i915);
4540 intel_uncore_forcewake_user_get(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004541
4542 return 0;
4543}
4544
Ben Widawskyc43b5632012-04-16 14:07:40 -07004545static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004546{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004547 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004548
Chris Wilsond7a133d2017-09-07 14:44:41 +01004549 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004550 return 0;
4551
Chris Wilsond7a133d2017-09-07 14:44:41 +01004552 intel_uncore_forcewake_user_put(i915);
4553 intel_runtime_pm_put(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004554
4555 return 0;
4556}
4557
4558static const struct file_operations i915_forcewake_fops = {
4559 .owner = THIS_MODULE,
4560 .open = i915_forcewake_open,
4561 .release = i915_forcewake_release,
4562};
4563
Lyude317eaa92017-02-03 21:18:25 -05004564static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4565{
4566 struct drm_i915_private *dev_priv = m->private;
4567 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4568
4569 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4570 seq_printf(m, "Detected: %s\n",
4571 yesno(delayed_work_pending(&hotplug->reenable_work)));
4572
4573 return 0;
4574}
4575
4576static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4577 const char __user *ubuf, size_t len,
4578 loff_t *offp)
4579{
4580 struct seq_file *m = file->private_data;
4581 struct drm_i915_private *dev_priv = m->private;
4582 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4583 unsigned int new_threshold;
4584 int i;
4585 char *newline;
4586 char tmp[16];
4587
4588 if (len >= sizeof(tmp))
4589 return -EINVAL;
4590
4591 if (copy_from_user(tmp, ubuf, len))
4592 return -EFAULT;
4593
4594 tmp[len] = '\0';
4595
4596 /* Strip newline, if any */
4597 newline = strchr(tmp, '\n');
4598 if (newline)
4599 *newline = '\0';
4600
4601 if (strcmp(tmp, "reset") == 0)
4602 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4603 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4604 return -EINVAL;
4605
4606 if (new_threshold > 0)
4607 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4608 new_threshold);
4609 else
4610 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4611
4612 spin_lock_irq(&dev_priv->irq_lock);
4613 hotplug->hpd_storm_threshold = new_threshold;
4614 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4615 for_each_hpd_pin(i)
4616 hotplug->stats[i].count = 0;
4617 spin_unlock_irq(&dev_priv->irq_lock);
4618
4619 /* Re-enable hpd immediately if we were in an irq storm */
4620 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4621
4622 return len;
4623}
4624
4625static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4626{
4627 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4628}
4629
4630static const struct file_operations i915_hpd_storm_ctl_fops = {
4631 .owner = THIS_MODULE,
4632 .open = i915_hpd_storm_ctl_open,
4633 .read = seq_read,
4634 .llseek = seq_lseek,
4635 .release = single_release,
4636 .write = i915_hpd_storm_ctl_write
4637};
4638
C, Ramalingam35954e82017-11-08 00:08:23 +05304639static int i915_drrs_ctl_set(void *data, u64 val)
4640{
4641 struct drm_i915_private *dev_priv = data;
4642 struct drm_device *dev = &dev_priv->drm;
4643 struct intel_crtc *intel_crtc;
4644 struct intel_encoder *encoder;
4645 struct intel_dp *intel_dp;
4646
4647 if (INTEL_GEN(dev_priv) < 7)
4648 return -ENODEV;
4649
4650 drm_modeset_lock_all(dev);
4651 for_each_intel_crtc(dev, intel_crtc) {
4652 if (!intel_crtc->base.state->active ||
4653 !intel_crtc->config->has_drrs)
4654 continue;
4655
4656 for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4657 if (encoder->type != INTEL_OUTPUT_EDP)
4658 continue;
4659
4660 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4661 val ? "en" : "dis", val);
4662
4663 intel_dp = enc_to_intel_dp(&encoder->base);
4664 if (val)
4665 intel_edp_drrs_enable(intel_dp,
4666 intel_crtc->config);
4667 else
4668 intel_edp_drrs_disable(intel_dp,
4669 intel_crtc->config);
4670 }
4671 }
4672 drm_modeset_unlock_all(dev);
4673
4674 return 0;
4675}
4676
4677DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4678
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004679static ssize_t
4680i915_fifo_underrun_reset_write(struct file *filp,
4681 const char __user *ubuf,
4682 size_t cnt, loff_t *ppos)
4683{
4684 struct drm_i915_private *dev_priv = filp->private_data;
4685 struct intel_crtc *intel_crtc;
4686 struct drm_device *dev = &dev_priv->drm;
4687 int ret;
4688 bool reset;
4689
4690 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4691 if (ret)
4692 return ret;
4693
4694 if (!reset)
4695 return cnt;
4696
4697 for_each_intel_crtc(dev, intel_crtc) {
4698 struct drm_crtc_commit *commit;
4699 struct intel_crtc_state *crtc_state;
4700
4701 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4702 if (ret)
4703 return ret;
4704
4705 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4706 commit = crtc_state->base.commit;
4707 if (commit) {
4708 ret = wait_for_completion_interruptible(&commit->hw_done);
4709 if (!ret)
4710 ret = wait_for_completion_interruptible(&commit->flip_done);
4711 }
4712
4713 if (!ret && crtc_state->base.active) {
4714 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4715 pipe_name(intel_crtc->pipe));
4716
4717 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4718 }
4719
4720 drm_modeset_unlock(&intel_crtc->base.mutex);
4721
4722 if (ret)
4723 return ret;
4724 }
4725
4726 ret = intel_fbc_reset_underrun(dev_priv);
4727 if (ret)
4728 return ret;
4729
4730 return cnt;
4731}
4732
4733static const struct file_operations i915_fifo_underrun_reset_ops = {
4734 .owner = THIS_MODULE,
4735 .open = simple_open,
4736 .write = i915_fifo_underrun_reset_write,
4737 .llseek = default_llseek,
4738};
4739
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004740static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004741 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004742 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00004743 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004744 {"i915_gem_stolen", i915_gem_stolen_list_info },
Chris Wilsona6172a82009-02-11 14:26:38 +00004745 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004746 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004747 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004748 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004749 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004750 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004751 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004752 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004753 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304754 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004755 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Michel Thierry061d06a2017-06-20 10:57:49 +01004756 {"i915_reset_info", i915_reset_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004757 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004758 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004759 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004760 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004761 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004762 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004763 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004764 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004765 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004766 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004767 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004768 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004769 {"i915_swizzle_info", i915_swizzle_info, 0},
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01004770 {"i915_ppgtt_info", i915_ppgtt_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004771 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004772 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004773 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004774 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004775 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004776 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004777 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004778 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004779 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004780 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004781 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004782 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004783 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004784 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004785 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004786 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304787 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004788 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004789};
Ben Gamari27c202a2009-07-01 22:26:52 -04004790#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004791
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004792static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004793 const char *name;
4794 const struct file_operations *fops;
4795} i915_debugfs_files[] = {
4796 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004797 {"i915_cache_sharing", &i915_cache_sharing_fops},
Chris Wilson094f9a52013-09-25 17:34:55 +01004798 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4799 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004800 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004801#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004802 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004803 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004804#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004805 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004806 {"i915_next_seqno", &i915_next_seqno_fops},
Damien Lespiaubd9db022013-10-15 18:55:36 +01004807 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004808 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4809 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4810 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004811 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004812 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4813 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304814 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004815 {"i915_guc_log_level", &i915_guc_log_level_fops},
4816 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304817 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304818 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004819 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4820 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004821};
4822
Chris Wilson1dac8912016-06-24 14:00:17 +01004823int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004824{
Chris Wilson91c8a322016-07-05 10:40:23 +01004825 struct drm_minor *minor = dev_priv->drm.primary;
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004826 struct dentry *ent;
Daniel Vetter34b96742013-07-04 20:49:44 +02004827 int ret, i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004828
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004829 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4830 minor->debugfs_root, to_i915(minor->dev),
4831 &i915_forcewake_fops);
4832 if (!ent)
4833 return -ENOMEM;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004834
Tomeu Vizoso731035f2016-12-12 13:29:48 +01004835 ret = intel_pipe_crc_create(minor);
4836 if (ret)
4837 return ret;
Damien Lespiau07144422013-10-15 18:55:40 +01004838
Daniel Vetter34b96742013-07-04 20:49:44 +02004839 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004840 ent = debugfs_create_file(i915_debugfs_files[i].name,
4841 S_IRUGO | S_IWUSR,
4842 minor->debugfs_root,
4843 to_i915(minor->dev),
Daniel Vetter34b96742013-07-04 20:49:44 +02004844 i915_debugfs_files[i].fops);
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004845 if (!ent)
4846 return -ENOMEM;
Daniel Vetter34b96742013-07-04 20:49:44 +02004847 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004848
Ben Gamari27c202a2009-07-01 22:26:52 -04004849 return drm_debugfs_create_files(i915_debugfs_list,
4850 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004851 minor->debugfs_root, minor);
4852}
4853
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004854struct dpcd_block {
4855 /* DPCD dump start address. */
4856 unsigned int offset;
4857 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4858 unsigned int end;
4859 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4860 size_t size;
4861 /* Only valid for eDP. */
4862 bool edp;
4863};
4864
4865static const struct dpcd_block i915_dpcd_debug[] = {
4866 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4867 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4868 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4869 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4870 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4871 { .offset = DP_SET_POWER },
4872 { .offset = DP_EDP_DPCD_REV },
4873 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4874 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4875 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4876};
4877
4878static int i915_dpcd_show(struct seq_file *m, void *data)
4879{
4880 struct drm_connector *connector = m->private;
4881 struct intel_dp *intel_dp =
4882 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4883 uint8_t buf[16];
4884 ssize_t err;
4885 int i;
4886
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004887 if (connector->status != connector_status_connected)
4888 return -ENODEV;
4889
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004890 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4891 const struct dpcd_block *b = &i915_dpcd_debug[i];
4892 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4893
4894 if (b->edp &&
4895 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4896 continue;
4897
4898 /* low tech for now */
4899 if (WARN_ON(size > sizeof(buf)))
4900 continue;
4901
4902 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4903 if (err <= 0) {
4904 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4905 size, b->offset, err);
4906 continue;
4907 }
4908
4909 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004910 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004911
4912 return 0;
4913}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004914DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004915
David Weinehallecbd6782016-08-23 12:23:56 +03004916static int i915_panel_show(struct seq_file *m, void *data)
4917{
4918 struct drm_connector *connector = m->private;
4919 struct intel_dp *intel_dp =
4920 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4921
4922 if (connector->status != connector_status_connected)
4923 return -ENODEV;
4924
4925 seq_printf(m, "Panel power up delay: %d\n",
4926 intel_dp->panel_power_up_delay);
4927 seq_printf(m, "Panel power down delay: %d\n",
4928 intel_dp->panel_power_down_delay);
4929 seq_printf(m, "Backlight on delay: %d\n",
4930 intel_dp->backlight_on_delay);
4931 seq_printf(m, "Backlight off delay: %d\n",
4932 intel_dp->backlight_off_delay);
4933
4934 return 0;
4935}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004936DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004937
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004938/**
4939 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4940 * @connector: pointer to a registered drm_connector
4941 *
4942 * Cleanup will be done by drm_connector_unregister() through a call to
4943 * drm_debugfs_connector_remove().
4944 *
4945 * Returns 0 on success, negative error codes on error.
4946 */
4947int i915_debugfs_connector_add(struct drm_connector *connector)
4948{
4949 struct dentry *root = connector->debugfs_entry;
4950
4951 /* The connector must have been registered beforehands. */
4952 if (!root)
4953 return -ENODEV;
4954
4955 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4956 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03004957 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4958 connector, &i915_dpcd_fops);
4959
4960 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4961 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4962 connector, &i915_panel_fops);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004963
4964 return 0;
4965}