blob: 4a5af695307eeaf6f9531ea658bbe3afadb3aa88 [file] [log] [blame]
Ben Widawsky0136db582012-04-10 21:17:01 -07001/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28#include <linux/device.h>
29#include <linux/module.h>
30#include <linux/stat.h>
31#include <linux/sysfs.h>
Ben Widawsky84bc7582012-05-25 16:56:25 -070032#include "intel_drv.h"
Ben Widawsky0136db582012-04-10 21:17:01 -070033#include "i915_drv.h"
34
Dave Airlie5bdebb12013-10-11 14:07:25 +100035#define dev_to_drm_minor(d) dev_get_drvdata((d))
Dave Airlie14c8d112013-10-11 14:45:30 +100036
Hunt Xu5ab36332012-07-01 03:45:07 +000037#ifdef CONFIG_PM
Ben Widawsky0136db582012-04-10 21:17:01 -070038static u32 calc_residency(struct drm_device *dev, const u32 reg)
39{
40 struct drm_i915_private *dev_priv = dev->dev_private;
41 u64 raw_time; /* 32b value may overflow during fixed point math */
Jesse Barnese454a052013-09-26 17:55:58 -070042 u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -020043 u32 ret;
Ben Widawsky0136db582012-04-10 21:17:01 -070044
45 if (!intel_enable_rc6(dev))
46 return 0;
47
Paulo Zanonic8c8fb32013-11-27 18:21:54 -020048 intel_runtime_pm_get(dev_priv);
49
Mika Kuoppala542a6b22014-07-09 14:55:56 +030050 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
Jesse Barnese454a052013-09-26 17:55:58 -070051 if (IS_VALLEYVIEW(dev)) {
Mika Kuoppala542a6b22014-07-09 14:55:56 +030052 u32 reg, czcount_30ns;
Jesse Barnese454a052013-09-26 17:55:58 -070053
Mika Kuoppala542a6b22014-07-09 14:55:56 +030054 if (IS_CHERRYVIEW(dev))
55 reg = CHV_CLK_CTL1;
56 else
57 reg = VLV_CLK_CTL2;
58
59 czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
60
61 if (!czcount_30ns) {
62 WARN(!czcount_30ns, "bogus CZ count value");
Paulo Zanonic8c8fb32013-11-27 18:21:54 -020063 ret = 0;
64 goto out;
Jesse Barnese454a052013-09-26 17:55:58 -070065 }
Mika Kuoppala542a6b22014-07-09 14:55:56 +030066
67 units = 0;
68 div = 1000000ULL;
69
70 if (IS_CHERRYVIEW(dev)) {
71 /* Special case for 320Mhz */
72 if (czcount_30ns == 1) {
73 div = 10000000ULL;
74 units = 3125ULL;
75 } else {
76 /* chv counts are one less */
77 czcount_30ns += 1;
78 }
79 }
80
81 if (units == 0)
82 units = DIV_ROUND_UP_ULL(30ULL * bias,
83 (u64)czcount_30ns);
84
Jesse Barnese454a052013-09-26 17:55:58 -070085 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
86 units <<= 8;
87
Mika Kuoppala542a6b22014-07-09 14:55:56 +030088 div = div * bias;
Jesse Barnese454a052013-09-26 17:55:58 -070089 }
90
91 raw_time = I915_READ(reg) * units;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -020092 ret = DIV_ROUND_UP_ULL(raw_time, div);
93
94out:
95 intel_runtime_pm_put(dev_priv);
96 return ret;
Ben Widawsky0136db582012-04-10 21:17:01 -070097}
98
99static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -0700100show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db582012-04-10 21:17:01 -0700101{
Dave Airlie14c8d112013-10-11 14:45:30 +1000102 struct drm_minor *dminor = dev_to_drm_minor(kdev);
Jani Nikula3e2a1552013-02-14 10:42:11 +0200103 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
Ben Widawsky0136db582012-04-10 21:17:01 -0700104}
105
106static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -0700107show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db582012-04-10 21:17:01 -0700108{
Dave Airlie5bdebb12013-10-11 14:07:25 +1000109 struct drm_minor *dminor = dev_get_drvdata(kdev);
Ben Widawsky0136db582012-04-10 21:17:01 -0700110 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
Jani Nikula3e2a1552013-02-14 10:42:11 +0200111 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
Ben Widawsky0136db582012-04-10 21:17:01 -0700112}
113
114static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -0700115show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db582012-04-10 21:17:01 -0700116{
Dave Airlie14c8d112013-10-11 14:45:30 +1000117 struct drm_minor *dminor = dev_to_drm_minor(kdev);
Ben Widawsky0136db582012-04-10 21:17:01 -0700118 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
Jesse Barnes5ffd4942013-09-11 13:43:20 -0700119 if (IS_VALLEYVIEW(dminor->dev))
120 rc6p_residency = 0;
Jani Nikula3e2a1552013-02-14 10:42:11 +0200121 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
Ben Widawsky0136db582012-04-10 21:17:01 -0700122}
123
124static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -0700125show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db582012-04-10 21:17:01 -0700126{
Dave Airlie14c8d112013-10-11 14:45:30 +1000127 struct drm_minor *dminor = dev_to_drm_minor(kdev);
Ben Widawsky0136db582012-04-10 21:17:01 -0700128 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
Jesse Barnes5ffd4942013-09-11 13:43:20 -0700129 if (IS_VALLEYVIEW(dminor->dev))
130 rc6pp_residency = 0;
Jani Nikula3e2a1552013-02-14 10:42:11 +0200131 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
Ben Widawsky0136db582012-04-10 21:17:01 -0700132}
133
134static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
135static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
136static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
137static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
138
139static struct attribute *rc6_attrs[] = {
140 &dev_attr_rc6_enable.attr,
141 &dev_attr_rc6_residency_ms.attr,
Ben Widawsky0136db582012-04-10 21:17:01 -0700142 NULL
143};
144
145static struct attribute_group rc6_attr_group = {
146 .name = power_group_name,
147 .attrs = rc6_attrs
148};
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700149
150static struct attribute *rc6p_attrs[] = {
151 &dev_attr_rc6p_residency_ms.attr,
152 &dev_attr_rc6pp_residency_ms.attr,
153 NULL
154};
155
156static struct attribute_group rc6p_attr_group = {
157 .name = power_group_name,
158 .attrs = rc6p_attrs
159};
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700160#endif
Ben Widawsky0136db582012-04-10 21:17:01 -0700161
Ben Widawsky84bc7582012-05-25 16:56:25 -0700162static int l3_access_valid(struct drm_device *dev, loff_t offset)
163{
Ben Widawsky040d2ba2013-09-19 11:01:40 -0700164 if (!HAS_L3_DPF(dev))
Ben Widawsky84bc7582012-05-25 16:56:25 -0700165 return -EPERM;
166
167 if (offset % 4 != 0)
168 return -EINVAL;
169
170 if (offset >= GEN7_L3LOG_SIZE)
171 return -ENXIO;
172
173 return 0;
174}
175
176static ssize_t
177i915_l3_read(struct file *filp, struct kobject *kobj,
178 struct bin_attribute *attr, char *buf,
179 loff_t offset, size_t count)
180{
181 struct device *dev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d112013-10-11 14:45:30 +1000182 struct drm_minor *dminor = dev_to_drm_minor(dev);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700183 struct drm_device *drm_dev = dminor->dev;
184 struct drm_i915_private *dev_priv = drm_dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700185 int slice = (int)(uintptr_t)attr->private;
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700186 int ret;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700187
Ben Widawsky1c3dcd12013-09-12 22:28:28 -0700188 count = round_down(count, 4);
189
Ben Widawsky84bc7582012-05-25 16:56:25 -0700190 ret = l3_access_valid(drm_dev, offset);
191 if (ret)
192 return ret;
193
Dan Carpentere5ad4022013-09-20 14:20:18 +0300194 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
Ben Widawsky33618ea2013-09-12 22:28:29 -0700195
Ben Widawsky84bc7582012-05-25 16:56:25 -0700196 ret = i915_mutex_lock_interruptible(drm_dev);
197 if (ret)
198 return ret;
199
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700200 if (dev_priv->l3_parity.remap_info[slice])
201 memcpy(buf,
202 dev_priv->l3_parity.remap_info[slice] + (offset/4),
203 count);
204 else
205 memset(buf, 0, count);
Ben Widawsky1c966dd2013-09-17 21:12:42 -0700206
Ben Widawsky84bc7582012-05-25 16:56:25 -0700207 mutex_unlock(&drm_dev->struct_mutex);
208
Ben Widawsky1c966dd2013-09-17 21:12:42 -0700209 return count;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700210}
211
212static ssize_t
213i915_l3_write(struct file *filp, struct kobject *kobj,
214 struct bin_attribute *attr, char *buf,
215 loff_t offset, size_t count)
216{
217 struct device *dev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d112013-10-11 14:45:30 +1000218 struct drm_minor *dminor = dev_to_drm_minor(dev);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700219 struct drm_device *drm_dev = dminor->dev;
220 struct drm_i915_private *dev_priv = drm_dev->dev_private;
Oscar Mateo273497e2014-05-22 14:13:37 +0100221 struct intel_context *ctx;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700222 u32 *temp = NULL; /* Just here to make handling failures easy */
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700223 int slice = (int)(uintptr_t)attr->private;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700224 int ret;
225
Ben Widawsky8245be32013-11-06 13:56:29 -0200226 if (!HAS_HW_CONTEXTS(drm_dev))
227 return -ENXIO;
228
Ben Widawsky84bc7582012-05-25 16:56:25 -0700229 ret = l3_access_valid(drm_dev, offset);
230 if (ret)
231 return ret;
232
233 ret = i915_mutex_lock_interruptible(drm_dev);
234 if (ret)
235 return ret;
236
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700237 if (!dev_priv->l3_parity.remap_info[slice]) {
Ben Widawsky84bc7582012-05-25 16:56:25 -0700238 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
239 if (!temp) {
240 mutex_unlock(&drm_dev->struct_mutex);
241 return -ENOMEM;
242 }
243 }
244
245 ret = i915_gpu_idle(drm_dev);
246 if (ret) {
247 kfree(temp);
248 mutex_unlock(&drm_dev->struct_mutex);
249 return ret;
250 }
251
252 /* TODO: Ideally we really want a GPU reset here to make sure errors
253 * aren't propagated. Since I cannot find a stable way to reset the GPU
254 * at this point it is left as a TODO.
255 */
256 if (temp)
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700257 dev_priv->l3_parity.remap_info[slice] = temp;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700258
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700259 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700260
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700261 /* NB: We defer the remapping until we switch to the context */
262 list_for_each_entry(ctx, &dev_priv->context_list, link)
263 ctx->remap_slice |= (1<<slice);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700264
265 mutex_unlock(&drm_dev->struct_mutex);
266
267 return count;
268}
269
270static struct bin_attribute dpf_attrs = {
271 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
272 .size = GEN7_L3LOG_SIZE,
273 .read = i915_l3_read,
274 .write = i915_l3_write,
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700275 .mmap = NULL,
276 .private = (void *)0
277};
278
279static struct bin_attribute dpf_attrs_1 = {
280 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
281 .size = GEN7_L3LOG_SIZE,
282 .read = i915_l3_read,
283 .write = i915_l3_write,
284 .mmap = NULL,
285 .private = (void *)1
Ben Widawsky84bc7582012-05-25 16:56:25 -0700286};
287
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700288static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
289 struct device_attribute *attr, char *buf)
290{
Dave Airlie14c8d112013-10-11 14:45:30 +1000291 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700292 struct drm_device *dev = minor->dev;
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 int ret;
295
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700296 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
297
Imre Deakd46c0512014-04-14 20:24:27 +0300298 intel_runtime_pm_get(dev_priv);
299
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700300 mutex_lock(&dev_priv->rps.hw_lock);
Jesse Barnes177006a2013-05-02 10:48:07 -0700301 if (IS_VALLEYVIEW(dev_priv->dev)) {
302 u32 freq;
Jani Nikula64936252013-05-22 15:36:20 +0300303 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
Ville Syrjälä2ec38152013-11-05 22:42:29 +0200304 ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
Jesse Barnes177006a2013-05-02 10:48:07 -0700305 } else {
Ben Widawskyb39fb292014-03-19 18:31:11 -0700306 ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
Jesse Barnes177006a2013-05-02 10:48:07 -0700307 }
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700308 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700309
Imre Deakd46c0512014-04-14 20:24:27 +0300310 intel_runtime_pm_put(dev_priv);
311
Jani Nikula3e2a1552013-02-14 10:42:11 +0200312 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700313}
314
Chris Wilson97e4eed2013-08-26 16:18:54 +0100315static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
316 struct device_attribute *attr, char *buf)
317{
Dave Airlie14c8d112013-10-11 14:45:30 +1000318 struct drm_minor *minor = dev_to_drm_minor(kdev);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100319 struct drm_device *dev = minor->dev;
320 struct drm_i915_private *dev_priv = dev->dev_private;
321
322 return snprintf(buf, PAGE_SIZE, "%d\n",
Ben Widawskyb39fb292014-03-19 18:31:11 -0700323 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
Chris Wilson97e4eed2013-08-26 16:18:54 +0100324}
325
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700326static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
327{
Dave Airlie14c8d112013-10-11 14:45:30 +1000328 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700329 struct drm_device *dev = minor->dev;
330 struct drm_i915_private *dev_priv = dev->dev_private;
331 int ret;
332
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700333 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
334
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700335 mutex_lock(&dev_priv->rps.hw_lock);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700336 if (IS_VALLEYVIEW(dev_priv->dev))
Ben Widawskyb39fb292014-03-19 18:31:11 -0700337 ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700338 else
Ben Widawskyb39fb292014-03-19 18:31:11 -0700339 ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700340 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700341
Jani Nikula3e2a1552013-02-14 10:42:11 +0200342 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700343}
344
Ben Widawsky46ddf192012-09-12 18:12:07 -0700345static ssize_t gt_max_freq_mhz_store(struct device *kdev,
346 struct device_attribute *attr,
347 const char *buf, size_t count)
348{
Dave Airlie14c8d112013-10-11 14:45:30 +1000349 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700350 struct drm_device *dev = minor->dev;
351 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700352 u32 val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700353 ssize_t ret;
354
355 ret = kstrtou32(buf, 0, &val);
356 if (ret)
357 return ret;
358
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700359 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
360
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700361 mutex_lock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700362
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700363 if (IS_VALLEYVIEW(dev_priv->dev))
Ville Syrjälä2ec38152013-11-05 22:42:29 +0200364 val = vlv_freq_opcode(dev_priv, val);
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700365 else
Jesse Barnes0a073b82013-04-17 15:54:58 -0700366 val /= GT_FREQUENCY_MULTIPLIER;
367
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700368 if (val < dev_priv->rps.min_freq ||
369 val > dev_priv->rps.max_freq ||
Ben Widawskyb39fb292014-03-19 18:31:11 -0700370 val < dev_priv->rps.min_freq_softlimit) {
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700371 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700372 return -EINVAL;
373 }
374
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700375 if (val > dev_priv->rps.rp0_freq)
Ben Widawsky31c77382013-04-05 14:29:22 -0700376 DRM_DEBUG("User requested overclocking to %d\n",
377 val * GT_FREQUENCY_MULTIPLIER);
378
Ben Widawskyb39fb292014-03-19 18:31:11 -0700379 dev_priv->rps.max_freq_softlimit = val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700380
Ben Widawskyb39fb292014-03-19 18:31:11 -0700381 if (dev_priv->rps.cur_freq > val) {
Chris Wilson6917c7b2013-11-06 13:56:26 -0200382 if (IS_VALLEYVIEW(dev))
383 valleyview_set_rps(dev, val);
384 else
385 gen6_set_rps(dev, val);
Ben Widawsky5a953ad2014-03-19 18:31:09 -0700386 } else if (!IS_VALLEYVIEW(dev)) {
387 /* We still need gen6_set_rps to process the new max_delay and
388 * update the interrupt limits even though frequency request is
389 * unchanged. */
Ben Widawskyb39fb292014-03-19 18:31:11 -0700390 gen6_set_rps(dev, dev_priv->rps.cur_freq);
Ben Widawsky5a953ad2014-03-19 18:31:09 -0700391 }
Chris Wilson6917c7b2013-11-06 13:56:26 -0200392
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700393 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700394
395 return count;
396}
397
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700398static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
399{
Dave Airlie14c8d112013-10-11 14:45:30 +1000400 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700401 struct drm_device *dev = minor->dev;
402 struct drm_i915_private *dev_priv = dev->dev_private;
403 int ret;
404
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700405 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
406
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700407 mutex_lock(&dev_priv->rps.hw_lock);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700408 if (IS_VALLEYVIEW(dev_priv->dev))
Ben Widawskyb39fb292014-03-19 18:31:11 -0700409 ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700410 else
Ben Widawskyb39fb292014-03-19 18:31:11 -0700411 ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700412 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700413
Jani Nikula3e2a1552013-02-14 10:42:11 +0200414 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700415}
416
Ben Widawsky46ddf192012-09-12 18:12:07 -0700417static ssize_t gt_min_freq_mhz_store(struct device *kdev,
418 struct device_attribute *attr,
419 const char *buf, size_t count)
420{
Dave Airlie14c8d112013-10-11 14:45:30 +1000421 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700422 struct drm_device *dev = minor->dev;
423 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700424 u32 val;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700425 ssize_t ret;
426
427 ret = kstrtou32(buf, 0, &val);
428 if (ret)
429 return ret;
430
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700431 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
432
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700433 mutex_lock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700434
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700435 if (IS_VALLEYVIEW(dev))
Ville Syrjälä2ec38152013-11-05 22:42:29 +0200436 val = vlv_freq_opcode(dev_priv, val);
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700437 else
Jesse Barnes0a073b82013-04-17 15:54:58 -0700438 val /= GT_FREQUENCY_MULTIPLIER;
439
Ben Widawsky2a5913a2014-03-19 18:31:13 -0700440 if (val < dev_priv->rps.min_freq ||
441 val > dev_priv->rps.max_freq ||
442 val > dev_priv->rps.max_freq_softlimit) {
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700443 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700444 return -EINVAL;
445 }
446
Ben Widawskyb39fb292014-03-19 18:31:11 -0700447 dev_priv->rps.min_freq_softlimit = val;
Chris Wilson6917c7b2013-11-06 13:56:26 -0200448
Ben Widawskyb39fb292014-03-19 18:31:11 -0700449 if (dev_priv->rps.cur_freq < val) {
Jesse Barnes0a073b82013-04-17 15:54:58 -0700450 if (IS_VALLEYVIEW(dev))
451 valleyview_set_rps(dev, val);
452 else
Chris Wilson6917c7b2013-11-06 13:56:26 -0200453 gen6_set_rps(dev, val);
Ben Widawsky5a953ad2014-03-19 18:31:09 -0700454 } else if (!IS_VALLEYVIEW(dev)) {
455 /* We still need gen6_set_rps to process the new min_delay and
456 * update the interrupt limits even though frequency request is
457 * unchanged. */
Ben Widawskyb39fb292014-03-19 18:31:11 -0700458 gen6_set_rps(dev, dev_priv->rps.cur_freq);
Ben Widawsky5a953ad2014-03-19 18:31:09 -0700459 }
Ben Widawsky46ddf192012-09-12 18:12:07 -0700460
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700461 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700462
463 return count;
464
465}
466
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700467static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700468static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
469static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700470
Chris Wilson97e4eed2013-08-26 16:18:54 +0100471static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700472
473static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
474static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
475static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
476static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
477
478/* For now we have a static number of RP states */
479static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
480{
Dave Airlie14c8d112013-10-11 14:45:30 +1000481 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700482 struct drm_device *dev = minor->dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
484 u32 val, rp_state_cap;
485 ssize_t ret;
486
487 ret = mutex_lock_interruptible(&dev->struct_mutex);
488 if (ret)
489 return ret;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200490 intel_runtime_pm_get(dev_priv);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700491 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200492 intel_runtime_pm_put(dev_priv);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700493 mutex_unlock(&dev->struct_mutex);
494
495 if (attr == &dev_attr_gt_RP0_freq_mhz) {
Deepak S74c4f622014-07-10 13:16:22 +0530496 if (IS_VALLEYVIEW(dev))
497 val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
498 else
499 val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
Ben Widawskyac6ae342012-09-07 19:43:44 -0700500 } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
Deepak S74c4f622014-07-10 13:16:22 +0530501 if (IS_VALLEYVIEW(dev))
502 val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
503 else
504 val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
Ben Widawskyac6ae342012-09-07 19:43:44 -0700505 } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
Deepak S74c4f622014-07-10 13:16:22 +0530506 if (IS_VALLEYVIEW(dev))
507 val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
508 else
509 val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
Ben Widawskyac6ae342012-09-07 19:43:44 -0700510 } else {
511 BUG();
512 }
Jani Nikula3e2a1552013-02-14 10:42:11 +0200513 return snprintf(buf, PAGE_SIZE, "%d\n", val);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700514}
515
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700516static const struct attribute *gen6_attrs[] = {
517 &dev_attr_gt_cur_freq_mhz.attr,
518 &dev_attr_gt_max_freq_mhz.attr,
519 &dev_attr_gt_min_freq_mhz.attr,
Ben Widawskyac6ae342012-09-07 19:43:44 -0700520 &dev_attr_gt_RP0_freq_mhz.attr,
521 &dev_attr_gt_RP1_freq_mhz.attr,
522 &dev_attr_gt_RPn_freq_mhz.attr,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700523 NULL,
524};
525
Chris Wilson97e4eed2013-08-26 16:18:54 +0100526static const struct attribute *vlv_attrs[] = {
527 &dev_attr_gt_cur_freq_mhz.attr,
528 &dev_attr_gt_max_freq_mhz.attr,
529 &dev_attr_gt_min_freq_mhz.attr,
Deepak S74c4f622014-07-10 13:16:22 +0530530 &dev_attr_gt_RP0_freq_mhz.attr,
531 &dev_attr_gt_RP1_freq_mhz.attr,
532 &dev_attr_gt_RPn_freq_mhz.attr,
Chris Wilson97e4eed2013-08-26 16:18:54 +0100533 &dev_attr_vlv_rpe_freq_mhz.attr,
534 NULL,
535};
536
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300537static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
538 struct bin_attribute *attr, char *buf,
539 loff_t off, size_t count)
540{
541
542 struct device *kdev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d112013-10-11 14:45:30 +1000543 struct drm_minor *minor = dev_to_drm_minor(kdev);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300544 struct drm_device *dev = minor->dev;
545 struct i915_error_state_file_priv error_priv;
546 struct drm_i915_error_state_buf error_str;
547 ssize_t ret_count = 0;
548 int ret;
549
550 memset(&error_priv, 0, sizeof(error_priv));
551
Chris Wilson0a4cd7c2014-08-22 14:41:39 +0100552 ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300553 if (ret)
554 return ret;
555
556 error_priv.dev = dev;
557 i915_error_state_get(dev, &error_priv);
558
559 ret = i915_error_state_to_str(&error_str, &error_priv);
560 if (ret)
561 goto out;
562
563 ret_count = count < error_str.bytes ? count : error_str.bytes;
564
565 memcpy(buf, error_str.buf, ret_count);
566out:
567 i915_error_state_put(&error_priv);
568 i915_error_state_buf_release(&error_str);
569
570 return ret ?: ret_count;
571}
572
573static ssize_t error_state_write(struct file *file, struct kobject *kobj,
574 struct bin_attribute *attr, char *buf,
575 loff_t off, size_t count)
576{
577 struct device *kdev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d112013-10-11 14:45:30 +1000578 struct drm_minor *minor = dev_to_drm_minor(kdev);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300579 struct drm_device *dev = minor->dev;
580 int ret;
581
582 DRM_DEBUG_DRIVER("Resetting error state\n");
583
584 ret = mutex_lock_interruptible(&dev->struct_mutex);
585 if (ret)
586 return ret;
587
588 i915_destroy_error_state(dev);
589 mutex_unlock(&dev->struct_mutex);
590
591 return count;
592}
593
594static struct bin_attribute error_state_attr = {
595 .attr.name = "error",
596 .attr.mode = S_IRUSR | S_IWUSR,
597 .size = 0,
598 .read = error_state_read,
599 .write = error_state_write,
600};
601
Ben Widawsky0136db582012-04-10 21:17:01 -0700602void i915_setup_sysfs(struct drm_device *dev)
603{
604 int ret;
605
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700606#ifdef CONFIG_PM
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700607 if (HAS_RC6(dev)) {
Dave Airlie5bdebb12013-10-11 14:07:25 +1000608 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
Daniel Vetter112abd22012-05-31 14:57:43 +0200609 &rc6_attr_group);
610 if (ret)
611 DRM_ERROR("RC6 residency sysfs setup failed\n");
612 }
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700613 if (HAS_RC6p(dev)) {
614 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
615 &rc6p_attr_group);
616 if (ret)
617 DRM_ERROR("RC6p residency sysfs setup failed\n");
618 }
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700619#endif
Ben Widawsky040d2ba2013-09-19 11:01:40 -0700620 if (HAS_L3_DPF(dev)) {
Dave Airlie5bdebb12013-10-11 14:07:25 +1000621 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
Daniel Vetter112abd22012-05-31 14:57:43 +0200622 if (ret)
623 DRM_ERROR("l3 parity sysfs setup failed\n");
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700624
625 if (NUM_L3_SLICES(dev) > 1) {
Dave Airlie5bdebb12013-10-11 14:07:25 +1000626 ret = device_create_bin_file(dev->primary->kdev,
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700627 &dpf_attrs_1);
628 if (ret)
629 DRM_ERROR("l3 parity slice 1 setup failed\n");
630 }
Daniel Vetter112abd22012-05-31 14:57:43 +0200631 }
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700632
Chris Wilson97e4eed2013-08-26 16:18:54 +0100633 ret = 0;
634 if (IS_VALLEYVIEW(dev))
Dave Airlie5bdebb12013-10-11 14:07:25 +1000635 ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100636 else if (INTEL_INFO(dev)->gen >= 6)
Dave Airlie5bdebb12013-10-11 14:07:25 +1000637 ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100638 if (ret)
639 DRM_ERROR("RPS sysfs setup failed\n");
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300640
Dave Airlie5bdebb12013-10-11 14:07:25 +1000641 ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300642 &error_state_attr);
643 if (ret)
644 DRM_ERROR("error_state sysfs setup failed\n");
Ben Widawsky0136db582012-04-10 21:17:01 -0700645}
646
647void i915_teardown_sysfs(struct drm_device *dev)
648{
Dave Airlie5bdebb12013-10-11 14:07:25 +1000649 sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100650 if (IS_VALLEYVIEW(dev))
Dave Airlie5bdebb12013-10-11 14:07:25 +1000651 sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100652 else
Dave Airlie5bdebb12013-10-11 14:07:25 +1000653 sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
654 device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
655 device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
Ben Widawsky853c70e2012-09-19 10:50:19 -0700656#ifdef CONFIG_PM
Dave Airlie5bdebb12013-10-11 14:07:25 +1000657 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -0700658 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
Ben Widawsky853c70e2012-09-19 10:50:19 -0700659#endif
Ben Widawsky0136db582012-04-10 21:17:01 -0700660}