blob: 50ce9ce2b269fc2e148ca33f4bb7cac62a016149 [file] [log] [blame]
Ben Widawsky0136db582012-04-10 21:17:01 -07001/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28#include <linux/device.h>
29#include <linux/module.h>
30#include <linux/stat.h>
31#include <linux/sysfs.h>
Ben Widawsky84bc7582012-05-25 16:56:25 -070032#include "intel_drv.h"
Ben Widawsky0136db582012-04-10 21:17:01 -070033#include "i915_drv.h"
34
Dave Airlie5bdebb12013-10-11 14:07:25 +100035#define dev_to_drm_minor(d) dev_get_drvdata((d))
Dave Airlie14c8d112013-10-11 14:45:30 +100036
Hunt Xu5ab36332012-07-01 03:45:07 +000037#ifdef CONFIG_PM
Ben Widawsky0136db582012-04-10 21:17:01 -070038static u32 calc_residency(struct drm_device *dev, const u32 reg)
39{
40 struct drm_i915_private *dev_priv = dev->dev_private;
41 u64 raw_time; /* 32b value may overflow during fixed point math */
Ville Syrjälä2cc9fab2015-09-28 23:43:43 +030042 u64 units = 128ULL, div = 100000ULL;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -020043 u32 ret;
Ben Widawsky0136db582012-04-10 21:17:01 -070044
45 if (!intel_enable_rc6(dev))
46 return 0;
47
Paulo Zanonic8c8fb32013-11-27 18:21:54 -020048 intel_runtime_pm_get(dev_priv);
49
Mika Kuoppala542a6b22014-07-09 14:55:56 +030050 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
Jesse Barnese454a052013-09-26 17:55:58 -070051 if (IS_VALLEYVIEW(dev)) {
Ville Syrjälä2cc9fab2015-09-28 23:43:43 +030052 units = 1;
53 div = dev_priv->czclk_freq;
Mika Kuoppala542a6b22014-07-09 14:55:56 +030054
Jesse Barnese454a052013-09-26 17:55:58 -070055 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
56 units <<= 8;
Imre Deakd8135102015-09-29 16:28:46 +030057 } else if (IS_BROXTON(dev)) {
58 units = 1;
59 div = 1200; /* 833.33ns */
Jesse Barnese454a052013-09-26 17:55:58 -070060 }
61
62 raw_time = I915_READ(reg) * units;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -020063 ret = DIV_ROUND_UP_ULL(raw_time, div);
64
Paulo Zanonic8c8fb32013-11-27 18:21:54 -020065 intel_runtime_pm_put(dev_priv);
66 return ret;
Ben Widawsky0136db582012-04-10 21:17:01 -070067}
68
69static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070070show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db582012-04-10 21:17:01 -070071{
Dave Airlie14c8d112013-10-11 14:45:30 +100072 struct drm_minor *dminor = dev_to_drm_minor(kdev);
Jani Nikula3e2a1552013-02-14 10:42:11 +020073 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
Ben Widawsky0136db582012-04-10 21:17:01 -070074}
75
76static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070077show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db582012-04-10 21:17:01 -070078{
Dave Airlie5bdebb12013-10-11 14:07:25 +100079 struct drm_minor *dminor = dev_get_drvdata(kdev);
Ben Widawsky0136db582012-04-10 21:17:01 -070080 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
Jani Nikula3e2a1552013-02-14 10:42:11 +020081 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
Ben Widawsky0136db582012-04-10 21:17:01 -070082}
83
84static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070085show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db582012-04-10 21:17:01 -070086{
Dave Airlie14c8d112013-10-11 14:45:30 +100087 struct drm_minor *dminor = dev_to_drm_minor(kdev);
Ben Widawsky0136db582012-04-10 21:17:01 -070088 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
Jani Nikula3e2a1552013-02-14 10:42:11 +020089 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
Ben Widawsky0136db582012-04-10 21:17:01 -070090}
91
92static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070093show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db582012-04-10 21:17:01 -070094{
Dave Airlie14c8d112013-10-11 14:45:30 +100095 struct drm_minor *dminor = dev_to_drm_minor(kdev);
Ben Widawsky0136db582012-04-10 21:17:01 -070096 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
Jani Nikula3e2a1552013-02-14 10:42:11 +020097 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
Ben Widawsky0136db582012-04-10 21:17:01 -070098}
99
Ville Syrjälä626ad6f2015-02-26 21:10:27 +0530100static ssize_t
101show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
102{
103 struct drm_minor *dminor = dev_get_drvdata(kdev);
104 u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
105 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
106}
107
Ben Widawsky0136db582012-04-10 21:17:01 -0700108static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
109static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
110static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
111static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
Ville Syrjälä626ad6f2015-02-26 21:10:27 +0530112static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
Ben Widawsky0136db582012-04-10 21:17:01 -0700113
114static struct attribute *rc6_attrs[] = {
115 &dev_attr_rc6_enable.attr,
116 &dev_attr_rc6_residency_ms.attr,
Ben Widawsky0136db582012-04-10 21:17:01 -0700117 NULL
118};
119
120static struct attribute_group rc6_attr_group = {
121 .name = power_group_name,
122 .attrs = rc6_attrs
123};
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700124
125static struct attribute *rc6p_attrs[] = {
126 &dev_attr_rc6p_residency_ms.attr,
127 &dev_attr_rc6pp_residency_ms.attr,
128 NULL
129};
130
131static struct attribute_group rc6p_attr_group = {
132 .name = power_group_name,
133 .attrs = rc6p_attrs
134};
Ville Syrjälä626ad6f2015-02-26 21:10:27 +0530135
136static struct attribute *media_rc6_attrs[] = {
137 &dev_attr_media_rc6_residency_ms.attr,
138 NULL
139};
140
141static struct attribute_group media_rc6_attr_group = {
142 .name = power_group_name,
143 .attrs = media_rc6_attrs
144};
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700145#endif
Ben Widawsky0136db582012-04-10 21:17:01 -0700146
Ben Widawsky84bc7582012-05-25 16:56:25 -0700147static int l3_access_valid(struct drm_device *dev, loff_t offset)
148{
Ben Widawsky040d2ba2013-09-19 11:01:40 -0700149 if (!HAS_L3_DPF(dev))
Ben Widawsky84bc7582012-05-25 16:56:25 -0700150 return -EPERM;
151
152 if (offset % 4 != 0)
153 return -EINVAL;
154
155 if (offset >= GEN7_L3LOG_SIZE)
156 return -ENXIO;
157
158 return 0;
159}
160
161static ssize_t
162i915_l3_read(struct file *filp, struct kobject *kobj,
163 struct bin_attribute *attr, char *buf,
164 loff_t offset, size_t count)
165{
166 struct device *dev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d112013-10-11 14:45:30 +1000167 struct drm_minor *dminor = dev_to_drm_minor(dev);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700168 struct drm_device *drm_dev = dminor->dev;
169 struct drm_i915_private *dev_priv = drm_dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700170 int slice = (int)(uintptr_t)attr->private;
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700171 int ret;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700172
Ben Widawsky1c3dcd12013-09-12 22:28:28 -0700173 count = round_down(count, 4);
174
Ben Widawsky84bc7582012-05-25 16:56:25 -0700175 ret = l3_access_valid(drm_dev, offset);
176 if (ret)
177 return ret;
178
Dan Carpentere5ad4022013-09-20 14:20:18 +0300179 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
Ben Widawsky33618ea2013-09-12 22:28:29 -0700180
Ben Widawsky84bc7582012-05-25 16:56:25 -0700181 ret = i915_mutex_lock_interruptible(drm_dev);
182 if (ret)
183 return ret;
184
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700185 if (dev_priv->l3_parity.remap_info[slice])
186 memcpy(buf,
187 dev_priv->l3_parity.remap_info[slice] + (offset/4),
188 count);
189 else
190 memset(buf, 0, count);
Ben Widawsky1c966dd2013-09-17 21:12:42 -0700191
Ben Widawsky84bc7582012-05-25 16:56:25 -0700192 mutex_unlock(&drm_dev->struct_mutex);
193
Ben Widawsky1c966dd2013-09-17 21:12:42 -0700194 return count;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700195}
196
197static ssize_t
198i915_l3_write(struct file *filp, struct kobject *kobj,
199 struct bin_attribute *attr, char *buf,
200 loff_t offset, size_t count)
201{
202 struct device *dev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d112013-10-11 14:45:30 +1000203 struct drm_minor *dminor = dev_to_drm_minor(dev);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700204 struct drm_device *drm_dev = dminor->dev;
205 struct drm_i915_private *dev_priv = drm_dev->dev_private;
Oscar Mateo273497e2014-05-22 14:13:37 +0100206 struct intel_context *ctx;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700207 u32 *temp = NULL; /* Just here to make handling failures easy */
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700208 int slice = (int)(uintptr_t)attr->private;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700209 int ret;
210
Ben Widawsky8245be32013-11-06 13:56:29 -0200211 if (!HAS_HW_CONTEXTS(drm_dev))
212 return -ENXIO;
213
Ben Widawsky84bc7582012-05-25 16:56:25 -0700214 ret = l3_access_valid(drm_dev, offset);
215 if (ret)
216 return ret;
217
218 ret = i915_mutex_lock_interruptible(drm_dev);
219 if (ret)
220 return ret;
221
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700222 if (!dev_priv->l3_parity.remap_info[slice]) {
Ben Widawsky84bc7582012-05-25 16:56:25 -0700223 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
224 if (!temp) {
225 mutex_unlock(&drm_dev->struct_mutex);
226 return -ENOMEM;
227 }
228 }
229
230 ret = i915_gpu_idle(drm_dev);
231 if (ret) {
232 kfree(temp);
233 mutex_unlock(&drm_dev->struct_mutex);
234 return ret;
235 }
236
237 /* TODO: Ideally we really want a GPU reset here to make sure errors
238 * aren't propagated. Since I cannot find a stable way to reset the GPU
239 * at this point it is left as a TODO.
240 */
241 if (temp)
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700242 dev_priv->l3_parity.remap_info[slice] = temp;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700243
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700244 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700245
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700246 /* NB: We defer the remapping until we switch to the context */
247 list_for_each_entry(ctx, &dev_priv->context_list, link)
248 ctx->remap_slice |= (1<<slice);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700249
250 mutex_unlock(&drm_dev->struct_mutex);
251
252 return count;
253}
254
255static struct bin_attribute dpf_attrs = {
256 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
257 .size = GEN7_L3LOG_SIZE,
258 .read = i915_l3_read,
259 .write = i915_l3_write,
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700260 .mmap = NULL,
261 .private = (void *)0
262};
263
264static struct bin_attribute dpf_attrs_1 = {
265 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
266 .size = GEN7_L3LOG_SIZE,
267 .read = i915_l3_read,
268 .write = i915_l3_write,
269 .mmap = NULL,
270 .private = (void *)1
Ben Widawsky84bc7582012-05-25 16:56:25 -0700271};
272
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200273static ssize_t gt_act_freq_mhz_show(struct device *kdev,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700274 struct device_attribute *attr, char *buf)
275{
Dave Airlie14c8d112013-10-11 14:45:30 +1000276 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700277 struct drm_device *dev = minor->dev;
278 struct drm_i915_private *dev_priv = dev->dev_private;
279 int ret;
280
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700281 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
282
Imre Deakd46c0512014-04-14 20:24:27 +0300283 intel_runtime_pm_get(dev_priv);
284
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700285 mutex_lock(&dev_priv->rps.hw_lock);
Jesse Barnes177006a2013-05-02 10:48:07 -0700286 if (IS_VALLEYVIEW(dev_priv->dev)) {
287 u32 freq;
Jani Nikula64936252013-05-22 15:36:20 +0300288 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200289 ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
Jesse Barnes177006a2013-05-02 10:48:07 -0700290 } else {
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200291 u32 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeled64d662015-03-06 11:07:22 +0530292 if (IS_GEN9(dev_priv))
293 ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
294 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200295 ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
296 else
297 ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200298 ret = intel_gpu_freq(dev_priv, ret);
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200299 }
300 mutex_unlock(&dev_priv->rps.hw_lock);
301
302 intel_runtime_pm_put(dev_priv);
303
304 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
305}
306
307static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
308 struct device_attribute *attr, char *buf)
309{
310 struct drm_minor *minor = dev_to_drm_minor(kdev);
311 struct drm_device *dev = minor->dev;
312 struct drm_i915_private *dev_priv = dev->dev_private;
313 int ret;
314
315 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
316
317 intel_runtime_pm_get(dev_priv);
318
319 mutex_lock(&dev_priv->rps.hw_lock);
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200320 ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700321 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700322
Imre Deakd46c0512014-04-14 20:24:27 +0300323 intel_runtime_pm_put(dev_priv);
324
Jani Nikula3e2a1552013-02-14 10:42:11 +0200325 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700326}
327
Chris Wilson97e4eed2013-08-26 16:18:54 +0100328static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
329 struct device_attribute *attr, char *buf)
330{
Dave Airlie14c8d112013-10-11 14:45:30 +1000331 struct drm_minor *minor = dev_to_drm_minor(kdev);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100332 struct drm_device *dev = minor->dev;
333 struct drm_i915_private *dev_priv = dev->dev_private;
334
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200335 return snprintf(buf, PAGE_SIZE,
336 "%d\n",
337 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
Chris Wilson97e4eed2013-08-26 16:18:54 +0100338}
339
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700340static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
341{
Dave Airlie14c8d112013-10-11 14:45:30 +1000342 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700343 struct drm_device *dev = minor->dev;
344 struct drm_i915_private *dev_priv = dev->dev_private;
345 int ret;
346
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700347 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
348
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700349 mutex_lock(&dev_priv->rps.hw_lock);
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200350 ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700351 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700352
Jani Nikula3e2a1552013-02-14 10:42:11 +0200353 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700354}
355
Ben Widawsky46ddf192012-09-12 18:12:07 -0700356static ssize_t gt_max_freq_mhz_store(struct device *kdev,
357 struct device_attribute *attr,
358 const char *buf, size_t count)
359{
Dave Airlie14c8d112013-10-11 14:45:30 +1000360 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700361 struct drm_device *dev = minor->dev;
362 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700363 u32 val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700364 ssize_t ret;
365
366 ret = kstrtou32(buf, 0, &val);
367 if (ret)
368 return ret;
369
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700370 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
371
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700372 mutex_lock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700373
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200374 val = intel_freq_opcode(dev_priv, val);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700375
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700376 if (val < dev_priv->rps.min_freq ||
377 val > dev_priv->rps.max_freq ||
Ben Widawskyb39fb292014-03-19 18:31:11 -0700378 val < dev_priv->rps.min_freq_softlimit) {
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700379 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700380 return -EINVAL;
381 }
382
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700383 if (val > dev_priv->rps.rp0_freq)
Ben Widawsky31c77382013-04-05 14:29:22 -0700384 DRM_DEBUG("User requested overclocking to %d\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200385 intel_gpu_freq(dev_priv, val));
Ben Widawsky31c77382013-04-05 14:29:22 -0700386
Ben Widawskyb39fb292014-03-19 18:31:11 -0700387 dev_priv->rps.max_freq_softlimit = val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700388
Ville Syrjäläf745a802015-01-23 21:04:23 +0200389 val = clamp_t(int, dev_priv->rps.cur_freq,
390 dev_priv->rps.min_freq_softlimit,
391 dev_priv->rps.max_freq_softlimit);
392
393 /* We still need *_set_rps to process the new max_delay and
394 * update the interrupt limits and PMINTRMSK even though
395 * frequency request may be unchanged. */
Ville Syrjäläffe02b42015-02-02 19:09:50 +0200396 intel_set_rps(dev, val);
Chris Wilson6917c7b2013-11-06 13:56:26 -0200397
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700398 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700399
400 return count;
401}
402
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700403static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
404{
Dave Airlie14c8d112013-10-11 14:45:30 +1000405 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700406 struct drm_device *dev = minor->dev;
407 struct drm_i915_private *dev_priv = dev->dev_private;
408 int ret;
409
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700410 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
411
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700412 mutex_lock(&dev_priv->rps.hw_lock);
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200413 ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700414 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700415
Jani Nikula3e2a1552013-02-14 10:42:11 +0200416 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700417}
418
Ben Widawsky46ddf192012-09-12 18:12:07 -0700419static ssize_t gt_min_freq_mhz_store(struct device *kdev,
420 struct device_attribute *attr,
421 const char *buf, size_t count)
422{
Dave Airlie14c8d112013-10-11 14:45:30 +1000423 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700424 struct drm_device *dev = minor->dev;
425 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700426 u32 val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700427 ssize_t ret;
428
429 ret = kstrtou32(buf, 0, &val);
430 if (ret)
431 return ret;
432
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700433 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
434
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700435 mutex_lock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700436
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200437 val = intel_freq_opcode(dev_priv, val);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700438
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700439 if (val < dev_priv->rps.min_freq ||
440 val > dev_priv->rps.max_freq ||
441 val > dev_priv->rps.max_freq_softlimit) {
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700442 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700443 return -EINVAL;
444 }
445
Ben Widawskyb39fb292014-03-19 18:31:11 -0700446 dev_priv->rps.min_freq_softlimit = val;
Chris Wilson6917c7b2013-11-06 13:56:26 -0200447
Ville Syrjäläf745a802015-01-23 21:04:23 +0200448 val = clamp_t(int, dev_priv->rps.cur_freq,
449 dev_priv->rps.min_freq_softlimit,
450 dev_priv->rps.max_freq_softlimit);
451
452 /* We still need *_set_rps to process the new min_delay and
453 * update the interrupt limits and PMINTRMSK even though
454 * frequency request may be unchanged. */
Ville Syrjäläffe02b42015-02-02 19:09:50 +0200455 intel_set_rps(dev, val);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700456
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700457 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700458
459 return count;
460
461}
462
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200463static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700464static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700465static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
466static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700467
Chris Wilson97e4eed2013-08-26 16:18:54 +0100468static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700469
470static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
471static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
472static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
473static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
474
475/* For now we have a static number of RP states */
476static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
477{
Dave Airlie14c8d112013-10-11 14:45:30 +1000478 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700479 struct drm_device *dev = minor->dev;
480 struct drm_i915_private *dev_priv = dev->dev_private;
Akash Goelbc4d91f2015-02-26 16:09:47 +0530481 u32 val;
Ben Widawskyac6ae342012-09-07 19:43:44 -0700482
Akash Goelbc4d91f2015-02-26 16:09:47 +0530483 if (attr == &dev_attr_gt_RP0_freq_mhz)
484 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
485 else if (attr == &dev_attr_gt_RP1_freq_mhz)
486 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
487 else if (attr == &dev_attr_gt_RPn_freq_mhz)
488 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
489 else
Ben Widawskyac6ae342012-09-07 19:43:44 -0700490 BUG();
Akash Goelbc4d91f2015-02-26 16:09:47 +0530491
Jani Nikula3e2a1552013-02-14 10:42:11 +0200492 return snprintf(buf, PAGE_SIZE, "%d\n", val);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700493}
494
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700495static const struct attribute *gen6_attrs[] = {
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200496 &dev_attr_gt_act_freq_mhz.attr,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700497 &dev_attr_gt_cur_freq_mhz.attr,
498 &dev_attr_gt_max_freq_mhz.attr,
499 &dev_attr_gt_min_freq_mhz.attr,
Ben Widawskyac6ae342012-09-07 19:43:44 -0700500 &dev_attr_gt_RP0_freq_mhz.attr,
501 &dev_attr_gt_RP1_freq_mhz.attr,
502 &dev_attr_gt_RPn_freq_mhz.attr,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700503 NULL,
504};
505
Chris Wilson97e4eed2013-08-26 16:18:54 +0100506static const struct attribute *vlv_attrs[] = {
Ville Syrjäläc8c972e2015-01-23 21:04:24 +0200507 &dev_attr_gt_act_freq_mhz.attr,
Chris Wilson97e4eed2013-08-26 16:18:54 +0100508 &dev_attr_gt_cur_freq_mhz.attr,
509 &dev_attr_gt_max_freq_mhz.attr,
510 &dev_attr_gt_min_freq_mhz.attr,
Deepak S74c4f622014-07-10 13:16:22 +0530511 &dev_attr_gt_RP0_freq_mhz.attr,
512 &dev_attr_gt_RP1_freq_mhz.attr,
513 &dev_attr_gt_RPn_freq_mhz.attr,
Chris Wilson97e4eed2013-08-26 16:18:54 +0100514 &dev_attr_vlv_rpe_freq_mhz.attr,
515 NULL,
516};
517
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300518static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
519 struct bin_attribute *attr, char *buf,
520 loff_t off, size_t count)
521{
522
523 struct device *kdev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d112013-10-11 14:45:30 +1000524 struct drm_minor *minor = dev_to_drm_minor(kdev);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300525 struct drm_device *dev = minor->dev;
526 struct i915_error_state_file_priv error_priv;
527 struct drm_i915_error_state_buf error_str;
528 ssize_t ret_count = 0;
529 int ret;
530
531 memset(&error_priv, 0, sizeof(error_priv));
532
Chris Wilson0a4cd7c2014-08-22 14:41:39 +0100533 ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300534 if (ret)
535 return ret;
536
537 error_priv.dev = dev;
538 i915_error_state_get(dev, &error_priv);
539
540 ret = i915_error_state_to_str(&error_str, &error_priv);
541 if (ret)
542 goto out;
543
544 ret_count = count < error_str.bytes ? count : error_str.bytes;
545
546 memcpy(buf, error_str.buf, ret_count);
547out:
548 i915_error_state_put(&error_priv);
549 i915_error_state_buf_release(&error_str);
550
551 return ret ?: ret_count;
552}
553
554static ssize_t error_state_write(struct file *file, struct kobject *kobj,
555 struct bin_attribute *attr, char *buf,
556 loff_t off, size_t count)
557{
558 struct device *kdev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d112013-10-11 14:45:30 +1000559 struct drm_minor *minor = dev_to_drm_minor(kdev);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300560 struct drm_device *dev = minor->dev;
561 int ret;
562
563 DRM_DEBUG_DRIVER("Resetting error state\n");
564
565 ret = mutex_lock_interruptible(&dev->struct_mutex);
566 if (ret)
567 return ret;
568
569 i915_destroy_error_state(dev);
570 mutex_unlock(&dev->struct_mutex);
571
572 return count;
573}
574
575static struct bin_attribute error_state_attr = {
576 .attr.name = "error",
577 .attr.mode = S_IRUSR | S_IWUSR,
578 .size = 0,
579 .read = error_state_read,
580 .write = error_state_write,
581};
582
Ben Widawsky0136db582012-04-10 21:17:01 -0700583void i915_setup_sysfs(struct drm_device *dev)
584{
585 int ret;
586
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700587#ifdef CONFIG_PM
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700588 if (HAS_RC6(dev)) {
Dave Airlie5bdebb12013-10-11 14:07:25 +1000589 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
Daniel Vetter112abd22012-05-31 14:57:43 +0200590 &rc6_attr_group);
591 if (ret)
592 DRM_ERROR("RC6 residency sysfs setup failed\n");
593 }
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700594 if (HAS_RC6p(dev)) {
595 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
596 &rc6p_attr_group);
597 if (ret)
598 DRM_ERROR("RC6p residency sysfs setup failed\n");
599 }
Ville Syrjälä626ad6f2015-02-26 21:10:27 +0530600 if (IS_VALLEYVIEW(dev)) {
601 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
602 &media_rc6_attr_group);
603 if (ret)
604 DRM_ERROR("Media RC6 residency sysfs setup failed\n");
605 }
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700606#endif
Ben Widawsky040d2ba2013-09-19 11:01:40 -0700607 if (HAS_L3_DPF(dev)) {
Dave Airlie5bdebb12013-10-11 14:07:25 +1000608 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
Daniel Vetter112abd22012-05-31 14:57:43 +0200609 if (ret)
610 DRM_ERROR("l3 parity sysfs setup failed\n");
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700611
612 if (NUM_L3_SLICES(dev) > 1) {
Dave Airlie5bdebb12013-10-11 14:07:25 +1000613 ret = device_create_bin_file(dev->primary->kdev,
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700614 &dpf_attrs_1);
615 if (ret)
616 DRM_ERROR("l3 parity slice 1 setup failed\n");
617 }
Daniel Vetter112abd22012-05-31 14:57:43 +0200618 }
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700619
Chris Wilson97e4eed2013-08-26 16:18:54 +0100620 ret = 0;
621 if (IS_VALLEYVIEW(dev))
Dave Airlie5bdebb12013-10-11 14:07:25 +1000622 ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100623 else if (INTEL_INFO(dev)->gen >= 6)
Dave Airlie5bdebb12013-10-11 14:07:25 +1000624 ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100625 if (ret)
626 DRM_ERROR("RPS sysfs setup failed\n");
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300627
Dave Airlie5bdebb12013-10-11 14:07:25 +1000628 ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300629 &error_state_attr);
630 if (ret)
631 DRM_ERROR("error_state sysfs setup failed\n");
Ben Widawsky0136db582012-04-10 21:17:01 -0700632}
633
634void i915_teardown_sysfs(struct drm_device *dev)
635{
Dave Airlie5bdebb12013-10-11 14:07:25 +1000636 sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100637 if (IS_VALLEYVIEW(dev))
Dave Airlie5bdebb12013-10-11 14:07:25 +1000638 sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100639 else
Dave Airlie5bdebb12013-10-11 14:07:25 +1000640 sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
641 device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
642 device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
Ben Widawsky853c70e2012-09-19 10:50:19 -0700643#ifdef CONFIG_PM
Dave Airlie5bdebb12013-10-11 14:07:25 +1000644 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700645 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
Ben Widawsky853c70e2012-09-19 10:50:19 -0700646#endif
Ben Widawsky0136db582012-04-10 21:17:01 -0700647}