blob: 05d8b1680c227c79dea56761a72d2c55ba8c372a [file] [log] [blame]
Ben Widawsky0136db52012-04-10 21:17:01 -07001/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28#include <linux/device.h>
29#include <linux/module.h>
30#include <linux/stat.h>
31#include <linux/sysfs.h>
Ben Widawsky84bc7582012-05-25 16:56:25 -070032#include "intel_drv.h"
Ben Widawsky0136db52012-04-10 21:17:01 -070033#include "i915_drv.h"
34
Dave Airlie5bdebb12013-10-11 14:07:25 +100035#define dev_to_drm_minor(d) dev_get_drvdata((d))
Dave Airlie14c8d1102013-10-11 14:45:30 +100036
Hunt Xu5ab36332012-07-01 03:45:07 +000037#ifdef CONFIG_PM
Ben Widawsky0136db52012-04-10 21:17:01 -070038static u32 calc_residency(struct drm_device *dev, const u32 reg)
39{
40 struct drm_i915_private *dev_priv = dev->dev_private;
41 u64 raw_time; /* 32b value may overflow during fixed point math */
Jesse Barnese454a052013-09-26 17:55:58 -070042 u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
Ben Widawsky0136db52012-04-10 21:17:01 -070043
44 if (!intel_enable_rc6(dev))
45 return 0;
46
Jesse Barnese454a052013-09-26 17:55:58 -070047 /* On VLV, residency time is in CZ units rather than 1.28us */
48 if (IS_VALLEYVIEW(dev)) {
49 u32 clkctl2;
50
51 clkctl2 = I915_READ(VLV_CLK_CTL2) >>
52 CLK_CTL2_CZCOUNT_30NS_SHIFT;
53 if (!clkctl2) {
54 WARN(!clkctl2, "bogus CZ count value");
55 return 0;
56 }
57 units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
58 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
59 units <<= 8;
60
61 div = 1000000ULL * bias;
62 }
63
64 raw_time = I915_READ(reg) * units;
65 return DIV_ROUND_UP_ULL(raw_time, div);
Ben Widawsky0136db52012-04-10 21:17:01 -070066}
67
68static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070069show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070070{
Dave Airlie14c8d1102013-10-11 14:45:30 +100071 struct drm_minor *dminor = dev_to_drm_minor(kdev);
Jani Nikula3e2a1552013-02-14 10:42:11 +020072 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
Ben Widawsky0136db52012-04-10 21:17:01 -070073}
74
75static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070076show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070077{
Dave Airlie5bdebb12013-10-11 14:07:25 +100078 struct drm_minor *dminor = dev_get_drvdata(kdev);
Ben Widawsky0136db52012-04-10 21:17:01 -070079 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
Jani Nikula3e2a1552013-02-14 10:42:11 +020080 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
Ben Widawsky0136db52012-04-10 21:17:01 -070081}
82
83static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070084show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070085{
Dave Airlie14c8d1102013-10-11 14:45:30 +100086 struct drm_minor *dminor = dev_to_drm_minor(kdev);
Ben Widawsky0136db52012-04-10 21:17:01 -070087 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
Jesse Barnes5ffd4942013-09-11 13:43:20 -070088 if (IS_VALLEYVIEW(dminor->dev))
89 rc6p_residency = 0;
Jani Nikula3e2a1552013-02-14 10:42:11 +020090 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
Ben Widawsky0136db52012-04-10 21:17:01 -070091}
92
93static ssize_t
Ben Widawskydbdfd8e2012-09-07 19:43:38 -070094show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
Ben Widawsky0136db52012-04-10 21:17:01 -070095{
Dave Airlie14c8d1102013-10-11 14:45:30 +100096 struct drm_minor *dminor = dev_to_drm_minor(kdev);
Ben Widawsky0136db52012-04-10 21:17:01 -070097 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
Jesse Barnes5ffd4942013-09-11 13:43:20 -070098 if (IS_VALLEYVIEW(dminor->dev))
99 rc6pp_residency = 0;
Jani Nikula3e2a1552013-02-14 10:42:11 +0200100 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
Ben Widawsky0136db52012-04-10 21:17:01 -0700101}
102
103static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
104static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
105static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
106static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
107
108static struct attribute *rc6_attrs[] = {
109 &dev_attr_rc6_enable.attr,
110 &dev_attr_rc6_residency_ms.attr,
111 &dev_attr_rc6p_residency_ms.attr,
112 &dev_attr_rc6pp_residency_ms.attr,
113 NULL
114};
115
116static struct attribute_group rc6_attr_group = {
117 .name = power_group_name,
118 .attrs = rc6_attrs
119};
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700120#endif
Ben Widawsky0136db52012-04-10 21:17:01 -0700121
Ben Widawsky84bc7582012-05-25 16:56:25 -0700122static int l3_access_valid(struct drm_device *dev, loff_t offset)
123{
Ben Widawsky040d2ba2013-09-19 11:01:40 -0700124 if (!HAS_L3_DPF(dev))
Ben Widawsky84bc7582012-05-25 16:56:25 -0700125 return -EPERM;
126
127 if (offset % 4 != 0)
128 return -EINVAL;
129
130 if (offset >= GEN7_L3LOG_SIZE)
131 return -ENXIO;
132
133 return 0;
134}
135
136static ssize_t
137i915_l3_read(struct file *filp, struct kobject *kobj,
138 struct bin_attribute *attr, char *buf,
139 loff_t offset, size_t count)
140{
141 struct device *dev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d1102013-10-11 14:45:30 +1000142 struct drm_minor *dminor = dev_to_drm_minor(dev);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700143 struct drm_device *drm_dev = dminor->dev;
144 struct drm_i915_private *dev_priv = drm_dev->dev_private;
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700145 int slice = (int)(uintptr_t)attr->private;
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700146 int ret;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700147
Ben Widawsky1c3dcd12013-09-12 22:28:28 -0700148 count = round_down(count, 4);
149
Ben Widawsky84bc7582012-05-25 16:56:25 -0700150 ret = l3_access_valid(drm_dev, offset);
151 if (ret)
152 return ret;
153
Dan Carpentere5ad4022013-09-20 14:20:18 +0300154 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
Ben Widawsky33618ea2013-09-12 22:28:29 -0700155
Ben Widawsky84bc7582012-05-25 16:56:25 -0700156 ret = i915_mutex_lock_interruptible(drm_dev);
157 if (ret)
158 return ret;
159
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700160 if (dev_priv->l3_parity.remap_info[slice])
161 memcpy(buf,
162 dev_priv->l3_parity.remap_info[slice] + (offset/4),
163 count);
164 else
165 memset(buf, 0, count);
Ben Widawsky1c966dd2013-09-17 21:12:42 -0700166
Ben Widawsky84bc7582012-05-25 16:56:25 -0700167 mutex_unlock(&drm_dev->struct_mutex);
168
Ben Widawsky1c966dd2013-09-17 21:12:42 -0700169 return count;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700170}
171
172static ssize_t
173i915_l3_write(struct file *filp, struct kobject *kobj,
174 struct bin_attribute *attr, char *buf,
175 loff_t offset, size_t count)
176{
177 struct device *dev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d1102013-10-11 14:45:30 +1000178 struct drm_minor *dminor = dev_to_drm_minor(dev);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700179 struct drm_device *drm_dev = dminor->dev;
180 struct drm_i915_private *dev_priv = drm_dev->dev_private;
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700181 struct i915_hw_context *ctx;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700182 u32 *temp = NULL; /* Just here to make handling failures easy */
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700183 int slice = (int)(uintptr_t)attr->private;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700184 int ret;
185
Ben Widawsky8245be32013-11-06 13:56:29 -0200186 if (!HAS_HW_CONTEXTS(drm_dev))
187 return -ENXIO;
188
Ben Widawsky84bc7582012-05-25 16:56:25 -0700189 ret = l3_access_valid(drm_dev, offset);
190 if (ret)
191 return ret;
192
193 ret = i915_mutex_lock_interruptible(drm_dev);
194 if (ret)
195 return ret;
196
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700197 if (!dev_priv->l3_parity.remap_info[slice]) {
Ben Widawsky84bc7582012-05-25 16:56:25 -0700198 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
199 if (!temp) {
200 mutex_unlock(&drm_dev->struct_mutex);
201 return -ENOMEM;
202 }
203 }
204
205 ret = i915_gpu_idle(drm_dev);
206 if (ret) {
207 kfree(temp);
208 mutex_unlock(&drm_dev->struct_mutex);
209 return ret;
210 }
211
212 /* TODO: Ideally we really want a GPU reset here to make sure errors
213 * aren't propagated. Since I cannot find a stable way to reset the GPU
214 * at this point it is left as a TODO.
215 */
216 if (temp)
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700217 dev_priv->l3_parity.remap_info[slice] = temp;
Ben Widawsky84bc7582012-05-25 16:56:25 -0700218
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700219 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700220
Ben Widawsky3ccfd192013-09-18 19:03:18 -0700221 /* NB: We defer the remapping until we switch to the context */
222 list_for_each_entry(ctx, &dev_priv->context_list, link)
223 ctx->remap_slice |= (1<<slice);
Ben Widawsky84bc7582012-05-25 16:56:25 -0700224
225 mutex_unlock(&drm_dev->struct_mutex);
226
227 return count;
228}
229
230static struct bin_attribute dpf_attrs = {
231 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
232 .size = GEN7_L3LOG_SIZE,
233 .read = i915_l3_read,
234 .write = i915_l3_write,
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700235 .mmap = NULL,
236 .private = (void *)0
237};
238
239static struct bin_attribute dpf_attrs_1 = {
240 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
241 .size = GEN7_L3LOG_SIZE,
242 .read = i915_l3_read,
243 .write = i915_l3_write,
244 .mmap = NULL,
245 .private = (void *)1
Ben Widawsky84bc7582012-05-25 16:56:25 -0700246};
247
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700248static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
249 struct device_attribute *attr, char *buf)
250{
Dave Airlie14c8d1102013-10-11 14:45:30 +1000251 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700252 struct drm_device *dev = minor->dev;
253 struct drm_i915_private *dev_priv = dev->dev_private;
254 int ret;
255
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700256 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
257
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700258 mutex_lock(&dev_priv->rps.hw_lock);
Jesse Barnes177006a2013-05-02 10:48:07 -0700259 if (IS_VALLEYVIEW(dev_priv->dev)) {
260 u32 freq;
Jani Nikula64936252013-05-22 15:36:20 +0300261 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
Ville Syrjälä2ec38152013-11-05 22:42:29 +0200262 ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
Jesse Barnes177006a2013-05-02 10:48:07 -0700263 } else {
Jesse Barnes0a073b82013-04-17 15:54:58 -0700264 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
Jesse Barnes177006a2013-05-02 10:48:07 -0700265 }
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700266 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700267
Jani Nikula3e2a1552013-02-14 10:42:11 +0200268 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700269}
270
Chris Wilson97e4eed2013-08-26 16:18:54 +0100271static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
272 struct device_attribute *attr, char *buf)
273{
Dave Airlie14c8d1102013-10-11 14:45:30 +1000274 struct drm_minor *minor = dev_to_drm_minor(kdev);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100275 struct drm_device *dev = minor->dev;
276 struct drm_i915_private *dev_priv = dev->dev_private;
277
278 return snprintf(buf, PAGE_SIZE, "%d\n",
Ville Syrjälä2ec38152013-11-05 22:42:29 +0200279 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay));
Chris Wilson97e4eed2013-08-26 16:18:54 +0100280}
281
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700282static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
283{
Dave Airlie14c8d1102013-10-11 14:45:30 +1000284 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700285 struct drm_device *dev = minor->dev;
286 struct drm_i915_private *dev_priv = dev->dev_private;
287 int ret;
288
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700289 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
290
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700291 mutex_lock(&dev_priv->rps.hw_lock);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700292 if (IS_VALLEYVIEW(dev_priv->dev))
Ville Syrjälä2ec38152013-11-05 22:42:29 +0200293 ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700294 else
295 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700296 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700297
Jani Nikula3e2a1552013-02-14 10:42:11 +0200298 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700299}
300
Ben Widawsky46ddf192012-09-12 18:12:07 -0700301static ssize_t gt_max_freq_mhz_store(struct device *kdev,
302 struct device_attribute *attr,
303 const char *buf, size_t count)
304{
Dave Airlie14c8d1102013-10-11 14:45:30 +1000305 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700306 struct drm_device *dev = minor->dev;
307 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky31c77382013-04-05 14:29:22 -0700308 u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
Ben Widawsky46ddf192012-09-12 18:12:07 -0700309 ssize_t ret;
310
311 ret = kstrtou32(buf, 0, &val);
312 if (ret)
313 return ret;
314
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700315 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
316
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700317 mutex_lock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700318
Jesse Barnes0a073b82013-04-17 15:54:58 -0700319 if (IS_VALLEYVIEW(dev_priv->dev)) {
Ville Syrjälä2ec38152013-11-05 22:42:29 +0200320 val = vlv_freq_opcode(dev_priv, val);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700321
Jesse Barnes0a073b82013-04-17 15:54:58 -0700322 hw_max = valleyview_rps_max_freq(dev_priv);
323 hw_min = valleyview_rps_min_freq(dev_priv);
324 non_oc_max = hw_max;
325 } else {
326 val /= GT_FREQUENCY_MULTIPLIER;
327
328 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
329 hw_max = dev_priv->rps.hw_max;
330 non_oc_max = (rp_state_cap & 0xff);
331 hw_min = ((rp_state_cap & 0xff0000) >> 16);
332 }
333
334 if (val < hw_min || val > hw_max ||
335 val < dev_priv->rps.min_delay) {
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700336 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700337 return -EINVAL;
338 }
339
Ben Widawsky31c77382013-04-05 14:29:22 -0700340 if (val > non_oc_max)
341 DRM_DEBUG("User requested overclocking to %d\n",
342 val * GT_FREQUENCY_MULTIPLIER);
343
Ben Widawsky46ddf192012-09-12 18:12:07 -0700344 dev_priv->rps.max_delay = val;
345
Chris Wilson6917c7b2013-11-06 13:56:26 -0200346 if (dev_priv->rps.cur_delay > val) {
347 if (IS_VALLEYVIEW(dev))
348 valleyview_set_rps(dev, val);
349 else
350 gen6_set_rps(dev, val);
351 }
352
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700353 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700354
355 return count;
356}
357
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700358static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
359{
Dave Airlie14c8d1102013-10-11 14:45:30 +1000360 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700361 struct drm_device *dev = minor->dev;
362 struct drm_i915_private *dev_priv = dev->dev_private;
363 int ret;
364
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700365 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
366
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700367 mutex_lock(&dev_priv->rps.hw_lock);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700368 if (IS_VALLEYVIEW(dev_priv->dev))
Ville Syrjälä2ec38152013-11-05 22:42:29 +0200369 ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700370 else
371 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700372 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700373
Jani Nikula3e2a1552013-02-14 10:42:11 +0200374 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700375}
376
Ben Widawsky46ddf192012-09-12 18:12:07 -0700377static ssize_t gt_min_freq_mhz_store(struct device *kdev,
378 struct device_attribute *attr,
379 const char *buf, size_t count)
380{
Dave Airlie14c8d1102013-10-11 14:45:30 +1000381 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700382 struct drm_device *dev = minor->dev;
383 struct drm_i915_private *dev_priv = dev->dev_private;
384 u32 val, rp_state_cap, hw_max, hw_min;
385 ssize_t ret;
386
387 ret = kstrtou32(buf, 0, &val);
388 if (ret)
389 return ret;
390
Tom O'Rourke5c9669c2013-09-16 14:56:43 -0700391 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
392
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700393 mutex_lock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700394
Jesse Barnes0a073b82013-04-17 15:54:58 -0700395 if (IS_VALLEYVIEW(dev)) {
Ville Syrjälä2ec38152013-11-05 22:42:29 +0200396 val = vlv_freq_opcode(dev_priv, val);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700397
398 hw_max = valleyview_rps_max_freq(dev_priv);
399 hw_min = valleyview_rps_min_freq(dev_priv);
400 } else {
401 val /= GT_FREQUENCY_MULTIPLIER;
402
403 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
404 hw_max = dev_priv->rps.hw_max;
405 hw_min = ((rp_state_cap & 0xff0000) >> 16);
406 }
Ben Widawsky46ddf192012-09-12 18:12:07 -0700407
408 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700409 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700410 return -EINVAL;
411 }
412
Chris Wilson6917c7b2013-11-06 13:56:26 -0200413 dev_priv->rps.min_delay = val;
414
Jesse Barnes0a073b82013-04-17 15:54:58 -0700415 if (dev_priv->rps.cur_delay < val) {
416 if (IS_VALLEYVIEW(dev))
417 valleyview_set_rps(dev, val);
418 else
Chris Wilson6917c7b2013-11-06 13:56:26 -0200419 gen6_set_rps(dev, val);
Jesse Barnes0a073b82013-04-17 15:54:58 -0700420 }
Ben Widawsky46ddf192012-09-12 18:12:07 -0700421
Jesse Barnes4fc688c2012-11-02 11:14:01 -0700422 mutex_unlock(&dev_priv->rps.hw_lock);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700423
424 return count;
425
426}
427
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700428static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
Ben Widawsky46ddf192012-09-12 18:12:07 -0700429static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
430static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700431
Chris Wilson97e4eed2013-08-26 16:18:54 +0100432static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700433
434static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
435static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
436static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
437static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
438
439/* For now we have a static number of RP states */
440static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
441{
Dave Airlie14c8d1102013-10-11 14:45:30 +1000442 struct drm_minor *minor = dev_to_drm_minor(kdev);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700443 struct drm_device *dev = minor->dev;
444 struct drm_i915_private *dev_priv = dev->dev_private;
445 u32 val, rp_state_cap;
446 ssize_t ret;
447
448 ret = mutex_lock_interruptible(&dev->struct_mutex);
449 if (ret)
450 return ret;
451 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
452 mutex_unlock(&dev->struct_mutex);
453
454 if (attr == &dev_attr_gt_RP0_freq_mhz) {
455 val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
456 } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
457 val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
458 } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
459 val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
460 } else {
461 BUG();
462 }
Jani Nikula3e2a1552013-02-14 10:42:11 +0200463 return snprintf(buf, PAGE_SIZE, "%d\n", val);
Ben Widawskyac6ae342012-09-07 19:43:44 -0700464}
465
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700466static const struct attribute *gen6_attrs[] = {
467 &dev_attr_gt_cur_freq_mhz.attr,
468 &dev_attr_gt_max_freq_mhz.attr,
469 &dev_attr_gt_min_freq_mhz.attr,
Ben Widawskyac6ae342012-09-07 19:43:44 -0700470 &dev_attr_gt_RP0_freq_mhz.attr,
471 &dev_attr_gt_RP1_freq_mhz.attr,
472 &dev_attr_gt_RPn_freq_mhz.attr,
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700473 NULL,
474};
475
Chris Wilson97e4eed2013-08-26 16:18:54 +0100476static const struct attribute *vlv_attrs[] = {
477 &dev_attr_gt_cur_freq_mhz.attr,
478 &dev_attr_gt_max_freq_mhz.attr,
479 &dev_attr_gt_min_freq_mhz.attr,
480 &dev_attr_vlv_rpe_freq_mhz.attr,
481 NULL,
482};
483
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300484static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
485 struct bin_attribute *attr, char *buf,
486 loff_t off, size_t count)
487{
488
489 struct device *kdev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d1102013-10-11 14:45:30 +1000490 struct drm_minor *minor = dev_to_drm_minor(kdev);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300491 struct drm_device *dev = minor->dev;
492 struct i915_error_state_file_priv error_priv;
493 struct drm_i915_error_state_buf error_str;
494 ssize_t ret_count = 0;
495 int ret;
496
497 memset(&error_priv, 0, sizeof(error_priv));
498
499 ret = i915_error_state_buf_init(&error_str, count, off);
500 if (ret)
501 return ret;
502
503 error_priv.dev = dev;
504 i915_error_state_get(dev, &error_priv);
505
506 ret = i915_error_state_to_str(&error_str, &error_priv);
507 if (ret)
508 goto out;
509
510 ret_count = count < error_str.bytes ? count : error_str.bytes;
511
512 memcpy(buf, error_str.buf, ret_count);
513out:
514 i915_error_state_put(&error_priv);
515 i915_error_state_buf_release(&error_str);
516
517 return ret ?: ret_count;
518}
519
520static ssize_t error_state_write(struct file *file, struct kobject *kobj,
521 struct bin_attribute *attr, char *buf,
522 loff_t off, size_t count)
523{
524 struct device *kdev = container_of(kobj, struct device, kobj);
Dave Airlie14c8d1102013-10-11 14:45:30 +1000525 struct drm_minor *minor = dev_to_drm_minor(kdev);
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300526 struct drm_device *dev = minor->dev;
527 int ret;
528
529 DRM_DEBUG_DRIVER("Resetting error state\n");
530
531 ret = mutex_lock_interruptible(&dev->struct_mutex);
532 if (ret)
533 return ret;
534
535 i915_destroy_error_state(dev);
536 mutex_unlock(&dev->struct_mutex);
537
538 return count;
539}
540
541static struct bin_attribute error_state_attr = {
542 .attr.name = "error",
543 .attr.mode = S_IRUSR | S_IWUSR,
544 .size = 0,
545 .read = error_state_read,
546 .write = error_state_write,
547};
548
Ben Widawsky0136db52012-04-10 21:17:01 -0700549void i915_setup_sysfs(struct drm_device *dev)
550{
551 int ret;
552
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700553#ifdef CONFIG_PM
Daniel Vetter112abd22012-05-31 14:57:43 +0200554 if (INTEL_INFO(dev)->gen >= 6) {
Dave Airlie5bdebb12013-10-11 14:07:25 +1000555 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
Daniel Vetter112abd22012-05-31 14:57:43 +0200556 &rc6_attr_group);
557 if (ret)
558 DRM_ERROR("RC6 residency sysfs setup failed\n");
559 }
Ben Widawsky8c3f9292012-09-02 00:24:40 -0700560#endif
Ben Widawsky040d2ba2013-09-19 11:01:40 -0700561 if (HAS_L3_DPF(dev)) {
Dave Airlie5bdebb12013-10-11 14:07:25 +1000562 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
Daniel Vetter112abd22012-05-31 14:57:43 +0200563 if (ret)
564 DRM_ERROR("l3 parity sysfs setup failed\n");
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700565
566 if (NUM_L3_SLICES(dev) > 1) {
Dave Airlie5bdebb12013-10-11 14:07:25 +1000567 ret = device_create_bin_file(dev->primary->kdev,
Ben Widawsky35a85ac2013-09-19 11:13:41 -0700568 &dpf_attrs_1);
569 if (ret)
570 DRM_ERROR("l3 parity slice 1 setup failed\n");
571 }
Daniel Vetter112abd22012-05-31 14:57:43 +0200572 }
Ben Widawskydf6eedc2012-09-07 19:43:40 -0700573
Chris Wilson97e4eed2013-08-26 16:18:54 +0100574 ret = 0;
575 if (IS_VALLEYVIEW(dev))
Dave Airlie5bdebb12013-10-11 14:07:25 +1000576 ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100577 else if (INTEL_INFO(dev)->gen >= 6)
Dave Airlie5bdebb12013-10-11 14:07:25 +1000578 ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100579 if (ret)
580 DRM_ERROR("RPS sysfs setup failed\n");
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300581
Dave Airlie5bdebb12013-10-11 14:07:25 +1000582 ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
Mika Kuoppalaef86ddc2013-06-06 17:38:54 +0300583 &error_state_attr);
584 if (ret)
585 DRM_ERROR("error_state sysfs setup failed\n");
Ben Widawsky0136db52012-04-10 21:17:01 -0700586}
587
588void i915_teardown_sysfs(struct drm_device *dev)
589{
Dave Airlie5bdebb12013-10-11 14:07:25 +1000590 sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100591 if (IS_VALLEYVIEW(dev))
Dave Airlie5bdebb12013-10-11 14:07:25 +1000592 sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
Chris Wilson97e4eed2013-08-26 16:18:54 +0100593 else
Dave Airlie5bdebb12013-10-11 14:07:25 +1000594 sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
595 device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
596 device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
Ben Widawsky853c70e2012-09-19 10:50:19 -0700597#ifdef CONFIG_PM
Dave Airlie5bdebb12013-10-11 14:07:25 +1000598 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
Ben Widawsky853c70e2012-09-19 10:50:19 -0700599#endif
Ben Widawsky0136db52012-04-10 21:17:01 -0700600}