blob: ab723e3403d3bc5d82d12ac46da6dd28272661b5 [file] [log] [blame]
Ben Widawsky0136db52012-04-10 21:17:01 -07001/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28#include <linux/device.h>
29#include <linux/module.h>
30#include <linux/stat.h>
31#include <linux/sysfs.h>
Ben Widawsky84bc7582012-05-25 16:56:25 -070032#include "intel_drv.h"
Ben Widawsky0136db52012-04-10 21:17:01 -070033#include "i915_drv.h"
34
David Weinehall694c2822016-08-22 13:32:43 +030035static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
David Weinehallc49d13e2016-08-22 13:32:42 +030036{
David Weinehall694c2822016-08-22 13:32:43 +030037 struct drm_minor *minor = dev_get_drvdata(kdev);
38 return to_i915(minor->dev);
David Weinehallc49d13e2016-08-22 13:32:42 +030039}
Dave Airlie14c8d1102013-10-11 14:45:30 +100040
Hunt Xu5ab36332012-07-01 03:45:07 +000041#ifdef CONFIG_PM
David Weinehall694c2822016-08-22 13:32:43 +030042static u32 calc_residency(struct drm_i915_private *dev_priv,
Ville Syrjäläf0f59a02015-11-18 15:33:26 +020043 i915_reg_t reg)
Ben Widawsky0136db52012-04-10 21:17:01 -070044{
Mika Kuoppala135bafa2017-03-15 17:42:59 +020045 return intel_rc6_residency(dev_priv, reg);
Ben Widawsky0136db52012-04-10 21:17:01 -070046}
47
48static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070049show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070050{
Chris Wilsondc979972016-05-10 14:10:04 +010051 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
Ben Widawsky0136db52012-04-10 21:17:01 -070052}
53
54static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070055show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070056{
David Weinehall694c2822016-08-22 13:32:43 +030057 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
58 u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
Jani Nikula3e2a1552013-02-14 10:42:11 +020059 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
Ben Widawsky0136db52012-04-10 21:17:01 -070060}
61
62static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070063show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070064{
David Weinehall694c2822016-08-22 13:32:43 +030065 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
66 u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
Jani Nikula3e2a1552013-02-14 10:42:11 +020067 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
Ben Widawsky0136db52012-04-10 21:17:01 -070068}
69
70static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070071show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070072{
David Weinehall694c2822016-08-22 13:32:43 +030073 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
74 u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
Jani Nikula3e2a1552013-02-14 10:42:11 +020075 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
Ben Widawsky0136db52012-04-10 21:17:01 -070076}
77
Ville Syrjälä626ad6f2015-02-26 21:10:27 +053078static ssize_t
79show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
80{
David Weinehall694c2822016-08-22 13:32:43 +030081 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
82 u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
Ville Syrjälä626ad6f2015-02-26 21:10:27 +053083 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
84}
85
Ben Widawsky0136db52012-04-10 21:17:01 -070086static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
87static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
88static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
89static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
Ville Syrjälä626ad6f2015-02-26 21:10:27 +053090static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
Ben Widawsky0136db52012-04-10 21:17:01 -070091
92static struct attribute *rc6_attrs[] = {
93 &dev_attr_rc6_enable.attr,
94 &dev_attr_rc6_residency_ms.attr,
Ben Widawsky0136db52012-04-10 21:17:01 -070095 NULL
96};
97
98static struct attribute_group rc6_attr_group = {
99 .name = power_group_name,
100 .attrs = rc6_attrs
101};
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700102
103static struct attribute *rc6p_attrs[] = {
104 &dev_attr_rc6p_residency_ms.attr,
105 &dev_attr_rc6pp_residency_ms.attr,
106 NULL
107};
108
109static struct attribute_group rc6p_attr_group = {
110 .name = power_group_name,
111 .attrs = rc6p_attrs
112};
Ville Syrjälä626ad6f2015-02-26 21:10:27 +0530113
114static struct attribute *media_rc6_attrs[] = {
115 &dev_attr_media_rc6_residency_ms.attr,
116 NULL
117};
118
119static struct attribute_group media_rc6_attr_group = {
120 .name = power_group_name,
121 .attrs = media_rc6_attrs
122};
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700123#endif
Ben Widawsky0136db52012-04-10 21:17:01 -0700124
David Weinehall694c2822016-08-22 13:32:43 +0300125static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
Ben Widawsky84bc7582012-05-25 16:56:25 -0700126{
David Weinehall694c2822016-08-22 13:32:43 +0300127 if (!HAS_L3_DPF(dev_priv))
Ben Widawsky84bc7582012-05-25 16:56:25 -0700128 return -EPERM;
129
130 if (offset % 4 != 0)
131 return -EINVAL;
132
133 if (offset >= GEN7_L3LOG_SIZE)
134 return -ENXIO;
135
136 return 0;
137}
138
139static ssize_t
140i915_l3_read(struct file *filp, struct kobject *kobj,
141 struct bin_attribute *attr, char *buf,
142 loff_t offset, size_t count)
143{
David Weinehallc49d13e2016-08-22 13:32:42 +0300144 struct device *kdev = kobj_to_dev(kobj);
David Weinehall694c2822016-08-22 13:32:43 +0300145 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
146 struct drm_device *dev = &dev_priv->drm;
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700147 int slice = (int)(uintptr_t)attr->private;
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700148 int ret;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700149
Ben Widawsky1c3dcd12013-09-12 22:28:28 -0700150 count = round_down(count, 4);
151
David Weinehall694c2822016-08-22 13:32:43 +0300152 ret = l3_access_valid(dev_priv, offset);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700153 if (ret)
154 return ret;
155
Dan Carpentere5ad4022013-09-20 14:20:18 +0300156 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
Ben Widawsky33618ea2013-09-12 22:28:29 -0700157
David Weinehallc49d13e2016-08-22 13:32:42 +0300158 ret = i915_mutex_lock_interruptible(dev);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700159 if (ret)
160 return ret;
161
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700162 if (dev_priv->l3_parity.remap_info[slice])
163 memcpy(buf,
164 dev_priv->l3_parity.remap_info[slice] + (offset/4),
165 count);
166 else
167 memset(buf, 0, count);
Ben Widawsky1c966dd2013-09-17 21:12:42 -0700168
David Weinehallc49d13e2016-08-22 13:32:42 +0300169 mutex_unlock(&dev->struct_mutex);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700170
Ben Widawsky1c966dd2013-09-17 21:12:42 -0700171 return count;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700172}
173
174static ssize_t
175i915_l3_write(struct file *filp, struct kobject *kobj,
176 struct bin_attribute *attr, char *buf,
177 loff_t offset, size_t count)
178{
David Weinehallc49d13e2016-08-22 13:32:42 +0300179 struct device *kdev = kobj_to_dev(kobj);
David Weinehall694c2822016-08-22 13:32:43 +0300180 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
181 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone2efd132016-05-24 14:53:34 +0100182 struct i915_gem_context *ctx;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700183 u32 *temp = NULL; /* Just here to make handling failures easy */
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700184 int slice = (int)(uintptr_t)attr->private;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700185 int ret;
186
David Weinehall694c2822016-08-22 13:32:43 +0300187 if (!HAS_HW_CONTEXTS(dev_priv))
Ben Widawsky8245be32013-11-06 13:56:29 -0200188 return -ENXIO;
189
David Weinehall694c2822016-08-22 13:32:43 +0300190 ret = l3_access_valid(dev_priv, offset);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700191 if (ret)
192 return ret;
193
David Weinehallc49d13e2016-08-22 13:32:42 +0300194 ret = i915_mutex_lock_interruptible(dev);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700195 if (ret)
196 return ret;
197
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700198 if (!dev_priv->l3_parity.remap_info[slice]) {
Ben Widawsky84bc7582012-05-25 16:56:25 -0700199 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
200 if (!temp) {
David Weinehallc49d13e2016-08-22 13:32:42 +0300201 mutex_unlock(&dev->struct_mutex);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700202 return -ENOMEM;
203 }
204 }
205
Ben Widawsky84bc7582012-05-25 16:56:25 -0700206 /* TODO: Ideally we really want a GPU reset here to make sure errors
207 * aren't propagated. Since I cannot find a stable way to reset the GPU
208 * at this point it is left as a TODO.
209 */
210 if (temp)
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700211 dev_priv->l3_parity.remap_info[slice] = temp;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700212
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700213 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700214
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700215 /* NB: We defer the remapping until we switch to the context */
216 list_for_each_entry(ctx, &dev_priv->context_list, link)
217 ctx->remap_slice |= (1<<slice);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700218
David Weinehallc49d13e2016-08-22 13:32:42 +0300219 mutex_unlock(&dev->struct_mutex);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700220
221 return count;
222}
223
224static struct bin_attribute dpf_attrs = {
225 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
226 .size = GEN7_L3LOG_SIZE,
227 .read = i915_l3_read,
228 .write = i915_l3_write,
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700229 .mmap = NULL,
230 .private = (void *)0
231};
232
233static struct bin_attribute dpf_attrs_1 = {
234 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
235 .size = GEN7_L3LOG_SIZE,
236 .read = i915_l3_read,
237 .write = i915_l3_write,
238 .mmap = NULL,
239 .private = (void *)1
Ben Widawsky84bc7582012-05-25 16:56:25 -0700240};
241
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200242static ssize_t gt_act_freq_mhz_show(struct device *kdev,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700243 struct device_attribute *attr, char *buf)
244{
David Weinehall694c2822016-08-22 13:32:43 +0300245 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700246 int ret;
247
Imre Deakd46c0512014-04-14 20:24:27 +0300248 intel_runtime_pm_get(dev_priv);
249
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700250 mutex_lock(&dev_priv->rps.hw_lock);
Wayne Boyer666a4532015-12-09 12:29:35 -0800251 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Jesse Barnes177006a2013-05-02 10:48:07 -0700252 u32 freq;
Jani Nikula64936252013-05-22 15:36:20 +0300253 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200254 ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
Jesse Barnes177006a2013-05-02 10:48:07 -0700255 } else {
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200256 u32 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeled64d662015-03-06 11:07:22 +0530257 if (IS_GEN9(dev_priv))
258 ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
259 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200260 ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
261 else
262 ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200263 ret = intel_gpu_freq(dev_priv, ret);
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200264 }
265 mutex_unlock(&dev_priv->rps.hw_lock);
266
267 intel_runtime_pm_put(dev_priv);
268
269 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
270}
271
272static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
273 struct device_attribute *attr, char *buf)
274{
David Weinehall694c2822016-08-22 13:32:43 +0300275 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200276
Chris Wilson62e1baa2016-07-13 09:10:36 +0100277 return snprintf(buf, PAGE_SIZE, "%d\n",
278 intel_gpu_freq(dev_priv,
279 dev_priv->rps.cur_freq));
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700280}
281
Chris Wilson29ecd78d2016-07-13 09:10:35 +0100282static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
283{
David Weinehall694c2822016-08-22 13:32:43 +0300284 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Chris Wilson29ecd78d2016-07-13 09:10:35 +0100285
286 return snprintf(buf, PAGE_SIZE, "%d\n",
Chris Wilson62e1baa2016-07-13 09:10:36 +0100287 intel_gpu_freq(dev_priv,
288 dev_priv->rps.boost_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +0100289}
290
291static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
292 struct device_attribute *attr,
293 const char *buf, size_t count)
294{
David Weinehall694c2822016-08-22 13:32:43 +0300295 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Chris Wilson29ecd78d2016-07-13 09:10:35 +0100296 u32 val;
297 ssize_t ret;
298
299 ret = kstrtou32(buf, 0, &val);
300 if (ret)
301 return ret;
302
303 /* Validate against (static) hardware limits */
304 val = intel_freq_opcode(dev_priv, val);
305 if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
306 return -EINVAL;
307
308 mutex_lock(&dev_priv->rps.hw_lock);
309 dev_priv->rps.boost_freq = val;
310 mutex_unlock(&dev_priv->rps.hw_lock);
311
312 return count;
313}
314
Chris Wilson97e4eed2013-08-26 16:18:54 +0100315static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
316 struct device_attribute *attr, char *buf)
317{
David Weinehall694c2822016-08-22 13:32:43 +0300318 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100319
Chris Wilson62e1baa2016-07-13 09:10:36 +0100320 return snprintf(buf, PAGE_SIZE, "%d\n",
321 intel_gpu_freq(dev_priv,
322 dev_priv->rps.efficient_freq));
Chris Wilson97e4eed2013-08-26 16:18:54 +0100323}
324
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700325static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
326{
David Weinehall694c2822016-08-22 13:32:43 +0300327 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700328
Chris Wilson62e1baa2016-07-13 09:10:36 +0100329 return snprintf(buf, PAGE_SIZE, "%d\n",
330 intel_gpu_freq(dev_priv,
331 dev_priv->rps.max_freq_softlimit));
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700332}
333
Ben Widawsky46ddf192012-09-12 18:12:07 -0700334static ssize_t gt_max_freq_mhz_store(struct device *kdev,
335 struct device_attribute *attr,
336 const char *buf, size_t count)
337{
David Weinehall694c2822016-08-22 13:32:43 +0300338 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700339 u32 val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700340 ssize_t ret;
341
342 ret = kstrtou32(buf, 0, &val);
343 if (ret)
344 return ret;
345
Sagar Arun Kamble933bfb42016-02-08 22:47:11 +0530346 intel_runtime_pm_get(dev_priv);
347
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700348 mutex_lock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700349
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200350 val = intel_freq_opcode(dev_priv, val);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700351
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700352 if (val < dev_priv->rps.min_freq ||
353 val > dev_priv->rps.max_freq ||
Ben Widawskyb39fb292014-03-19 18:31:11 -0700354 val < dev_priv->rps.min_freq_softlimit) {
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700355 mutex_unlock(&dev_priv->rps.hw_lock);
Sagar Arun Kamble933bfb42016-02-08 22:47:11 +0530356 intel_runtime_pm_put(dev_priv);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700357 return -EINVAL;
358 }
359
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700360 if (val > dev_priv->rps.rp0_freq)
Ben Widawsky31c77382013-04-05 14:29:22 -0700361 DRM_DEBUG("User requested overclocking to %d\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200362 intel_gpu_freq(dev_priv, val));
Ben Widawsky31c77382013-04-05 14:29:22 -0700363
Ben Widawskyb39fb292014-03-19 18:31:11 -0700364 dev_priv->rps.max_freq_softlimit = val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700365
Ville Syrjäläf745a802015-01-23 21:04:23 +0200366 val = clamp_t(int, dev_priv->rps.cur_freq,
367 dev_priv->rps.min_freq_softlimit,
368 dev_priv->rps.max_freq_softlimit);
369
370 /* We still need *_set_rps to process the new max_delay and
371 * update the interrupt limits and PMINTRMSK even though
372 * frequency request may be unchanged. */
Chris Wilson9fcee2f2017-01-26 10:19:19 +0000373 ret = intel_set_rps(dev_priv, val);
Chris Wilson6917c7b2013-11-06 13:56:26 -0200374
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700375 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700376
Sagar Arun Kamble933bfb42016-02-08 22:47:11 +0530377 intel_runtime_pm_put(dev_priv);
378
Chris Wilson9fcee2f2017-01-26 10:19:19 +0000379 return ret ?: count;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700380}
381
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700382static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
383{
David Weinehall694c2822016-08-22 13:32:43 +0300384 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700385
Chris Wilson62e1baa2016-07-13 09:10:36 +0100386 return snprintf(buf, PAGE_SIZE, "%d\n",
387 intel_gpu_freq(dev_priv,
388 dev_priv->rps.min_freq_softlimit));
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700389}
390
Ben Widawsky46ddf192012-09-12 18:12:07 -0700391static ssize_t gt_min_freq_mhz_store(struct device *kdev,
392 struct device_attribute *attr,
393 const char *buf, size_t count)
394{
David Weinehall694c2822016-08-22 13:32:43 +0300395 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700396 u32 val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700397 ssize_t ret;
398
399 ret = kstrtou32(buf, 0, &val);
400 if (ret)
401 return ret;
402
Sagar Arun Kamble933bfb42016-02-08 22:47:11 +0530403 intel_runtime_pm_get(dev_priv);
404
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700405 mutex_lock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700406
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200407 val = intel_freq_opcode(dev_priv, val);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700408
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700409 if (val < dev_priv->rps.min_freq ||
410 val > dev_priv->rps.max_freq ||
411 val > dev_priv->rps.max_freq_softlimit) {
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700412 mutex_unlock(&dev_priv->rps.hw_lock);
Sagar Arun Kamble933bfb42016-02-08 22:47:11 +0530413 intel_runtime_pm_put(dev_priv);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700414 return -EINVAL;
415 }
416
Ben Widawskyb39fb292014-03-19 18:31:11 -0700417 dev_priv->rps.min_freq_softlimit = val;
Chris Wilson6917c7b2013-11-06 13:56:26 -0200418
Ville Syrjäläf745a802015-01-23 21:04:23 +0200419 val = clamp_t(int, dev_priv->rps.cur_freq,
420 dev_priv->rps.min_freq_softlimit,
421 dev_priv->rps.max_freq_softlimit);
422
423 /* We still need *_set_rps to process the new min_delay and
424 * update the interrupt limits and PMINTRMSK even though
425 * frequency request may be unchanged. */
Chris Wilson9fcee2f2017-01-26 10:19:19 +0000426 ret = intel_set_rps(dev_priv, val);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700427
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700428 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700429
Sagar Arun Kamble933bfb42016-02-08 22:47:11 +0530430 intel_runtime_pm_put(dev_priv);
431
Chris Wilson9fcee2f2017-01-26 10:19:19 +0000432 return ret ?: count;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700433}
434
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200435static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700436static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
Mika Kuoppala73a79872016-12-14 14:26:20 +0200437static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700438static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
439static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700440
Chris Wilson97e4eed2013-08-26 16:18:54 +0100441static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700442
443static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
444static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
445static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
446static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
447
448/* For now we have a static number of RP states */
449static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
450{
David Weinehall694c2822016-08-22 13:32:43 +0300451 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Akash Goelbc4d91f2015-02-26 16:09:47 +0530452 u32 val;
Ben Widawskyac6ae342012-09-07 19:43:44 -0700453
Akash Goelbc4d91f2015-02-26 16:09:47 +0530454 if (attr == &dev_attr_gt_RP0_freq_mhz)
455 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
456 else if (attr == &dev_attr_gt_RP1_freq_mhz)
457 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
458 else if (attr == &dev_attr_gt_RPn_freq_mhz)
459 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
460 else
Ben Widawskyac6ae342012-09-07 19:43:44 -0700461 BUG();
Akash Goelbc4d91f2015-02-26 16:09:47 +0530462
Jani Nikula3e2a1552013-02-14 10:42:11 +0200463 return snprintf(buf, PAGE_SIZE, "%d\n", val);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700464}
465
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700466static const struct attribute *gen6_attrs[] = {
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200467 &dev_attr_gt_act_freq_mhz.attr,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700468 &dev_attr_gt_cur_freq_mhz.attr,
Chris Wilson29ecd78d2016-07-13 09:10:35 +0100469 &dev_attr_gt_boost_freq_mhz.attr,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700470 &dev_attr_gt_max_freq_mhz.attr,
471 &dev_attr_gt_min_freq_mhz.attr,
Ben Widawskyac6ae342012-09-07 19:43:44 -0700472 &dev_attr_gt_RP0_freq_mhz.attr,
473 &dev_attr_gt_RP1_freq_mhz.attr,
474 &dev_attr_gt_RPn_freq_mhz.attr,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700475 NULL,
476};
477
Chris Wilson97e4eed2013-08-26 16:18:54 +0100478static const struct attribute *vlv_attrs[] = {
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200479 &dev_attr_gt_act_freq_mhz.attr,
Chris Wilson97e4eed2013-08-26 16:18:54 +0100480 &dev_attr_gt_cur_freq_mhz.attr,
Chris Wilson29ecd78d2016-07-13 09:10:35 +0100481 &dev_attr_gt_boost_freq_mhz.attr,
Chris Wilson97e4eed2013-08-26 16:18:54 +0100482 &dev_attr_gt_max_freq_mhz.attr,
483 &dev_attr_gt_min_freq_mhz.attr,
Deepak S74c4f622014-07-10 13:16:22 +0530484 &dev_attr_gt_RP0_freq_mhz.attr,
485 &dev_attr_gt_RP1_freq_mhz.attr,
486 &dev_attr_gt_RPn_freq_mhz.attr,
Chris Wilson97e4eed2013-08-26 16:18:54 +0100487 &dev_attr_vlv_rpe_freq_mhz.attr,
488 NULL,
489};
490
Chris Wilson98a2f412016-10-12 10:05:18 +0100491#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
492
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300493static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
494 struct bin_attribute *attr, char *buf,
495 loff_t off, size_t count)
496{
497
Geliang Tang657fb5f2016-01-13 22:48:40 +0800498 struct device *kdev = kobj_to_dev(kobj);
David Weinehall694c2822016-08-22 13:32:43 +0300499 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300500 struct drm_i915_error_state_buf error_str;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000501 struct i915_gpu_state *gpu;
502 ssize_t ret;
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300503
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000504 ret = i915_error_state_buf_init(&error_str, dev_priv, count, off);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300505 if (ret)
506 return ret;
507
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000508 gpu = i915_first_error_state(dev_priv);
509 ret = i915_error_state_to_str(&error_str, gpu);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300510 if (ret)
511 goto out;
512
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000513 ret = count < error_str.bytes ? count : error_str.bytes;
514 memcpy(buf, error_str.buf, ret);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300515
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300516out:
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000517 i915_gpu_state_put(gpu);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300518 i915_error_state_buf_release(&error_str);
519
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000520 return ret;
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300521}
522
523static ssize_t error_state_write(struct file *file, struct kobject *kobj,
524 struct bin_attribute *attr, char *buf,
525 loff_t off, size_t count)
526{
Geliang Tang657fb5f2016-01-13 22:48:40 +0800527 struct device *kdev = kobj_to_dev(kobj);
David Weinehall694c2822016-08-22 13:32:43 +0300528 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300529
530 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000531 i915_reset_error_state(dev_priv);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300532
533 return count;
534}
535
536static struct bin_attribute error_state_attr = {
537 .attr.name = "error",
538 .attr.mode = S_IRUSR | S_IWUSR,
539 .size = 0,
540 .read = error_state_read,
541 .write = error_state_write,
542};
543
Chris Wilson98a2f412016-10-12 10:05:18 +0100544static void i915_setup_error_capture(struct device *kdev)
545{
546 if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
547 DRM_ERROR("error_state sysfs setup failed\n");
548}
549
550static void i915_teardown_error_capture(struct device *kdev)
551{
552 sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
553}
554#else
555static void i915_setup_error_capture(struct device *kdev) {}
556static void i915_teardown_error_capture(struct device *kdev) {}
557#endif
558
David Weinehall694c2822016-08-22 13:32:43 +0300559void i915_setup_sysfs(struct drm_i915_private *dev_priv)
Ben Widawsky0136db52012-04-10 21:17:01 -0700560{
David Weinehall694c2822016-08-22 13:32:43 +0300561 struct device *kdev = dev_priv->drm.primary->kdev;
Ben Widawsky0136db52012-04-10 21:17:01 -0700562 int ret;
563
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700564#ifdef CONFIG_PM
David Weinehall694c2822016-08-22 13:32:43 +0300565 if (HAS_RC6(dev_priv)) {
566 ret = sysfs_merge_group(&kdev->kobj,
Daniel Vetter112abd22012-05-31 14:57:43 +0200567 &rc6_attr_group);
568 if (ret)
569 DRM_ERROR("RC6 residency sysfs setup failed\n");
570 }
David Weinehall694c2822016-08-22 13:32:43 +0300571 if (HAS_RC6p(dev_priv)) {
572 ret = sysfs_merge_group(&kdev->kobj,
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700573 &rc6p_attr_group);
574 if (ret)
575 DRM_ERROR("RC6p residency sysfs setup failed\n");
576 }
David Weinehall694c2822016-08-22 13:32:43 +0300577 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
578 ret = sysfs_merge_group(&kdev->kobj,
Ville Syrjälä626ad6f2015-02-26 21:10:27 +0530579 &media_rc6_attr_group);
580 if (ret)
581 DRM_ERROR("Media RC6 residency sysfs setup failed\n");
582 }
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700583#endif
David Weinehall694c2822016-08-22 13:32:43 +0300584 if (HAS_L3_DPF(dev_priv)) {
585 ret = device_create_bin_file(kdev, &dpf_attrs);
Daniel Vetter112abd22012-05-31 14:57:43 +0200586 if (ret)
587 DRM_ERROR("l3 parity sysfs setup failed\n");
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700588
David Weinehall694c2822016-08-22 13:32:43 +0300589 if (NUM_L3_SLICES(dev_priv) > 1) {
590 ret = device_create_bin_file(kdev,
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700591 &dpf_attrs_1);
592 if (ret)
593 DRM_ERROR("l3 parity slice 1 setup failed\n");
594 }
Daniel Vetter112abd22012-05-31 14:57:43 +0200595 }
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700596
Chris Wilson97e4eed2013-08-26 16:18:54 +0100597 ret = 0;
David Weinehall694c2822016-08-22 13:32:43 +0300598 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
599 ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
600 else if (INTEL_GEN(dev_priv) >= 6)
601 ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100602 if (ret)
603 DRM_ERROR("RPS sysfs setup failed\n");
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300604
Chris Wilson98a2f412016-10-12 10:05:18 +0100605 i915_setup_error_capture(kdev);
Ben Widawsky0136db52012-04-10 21:17:01 -0700606}
607
David Weinehall694c2822016-08-22 13:32:43 +0300608void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
Ben Widawsky0136db52012-04-10 21:17:01 -0700609{
David Weinehall694c2822016-08-22 13:32:43 +0300610 struct device *kdev = dev_priv->drm.primary->kdev;
611
Chris Wilson98a2f412016-10-12 10:05:18 +0100612 i915_teardown_error_capture(kdev);
613
David Weinehall694c2822016-08-22 13:32:43 +0300614 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
615 sysfs_remove_files(&kdev->kobj, vlv_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100616 else
David Weinehall694c2822016-08-22 13:32:43 +0300617 sysfs_remove_files(&kdev->kobj, gen6_attrs);
618 device_remove_bin_file(kdev, &dpf_attrs_1);
619 device_remove_bin_file(kdev, &dpf_attrs);
Ben Widawsky853c70e2012-09-19 10:50:19 -0700620#ifdef CONFIG_PM
David Weinehall694c2822016-08-22 13:32:43 +0300621 sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
622 sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
Ben Widawsky853c70e2012-09-19 10:50:19 -0700623#endif
Ben Widawsky0136db52012-04-10 21:17:01 -0700624}