blob: e5f7e56b7c47694798a42cb10ed69c56ab33ece7 [file] [log] [blame]
Eugeni Dodonov85208be2012-04-16 22:20:34 -03001/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -030028#include <linux/cpufreq.h>
Eugeni Dodonov85208be2012-04-16 22:20:34 -030029#include "i915_drv.h"
30#include "intel_drv.h"
Daniel Vettereb48eb02012-04-26 23:28:12 +020031#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h>
Damien Lespiauf4db9322013-06-24 22:59:50 +010033#include <drm/i915_powerwell.h>
Eugeni Dodonov85208be2012-04-16 22:20:34 -030034
Eugeni Dodonovf6750b32012-04-18 11:51:14 -030035/* FBC, or Frame Buffer Compression, is a technique employed to compress the
36 * framebuffer contents in-memory, aiming at reducing the required bandwidth
37 * during in-memory transfers and, therefore, reduce the power packet.
Eugeni Dodonov85208be2012-04-16 22:20:34 -030038 *
Eugeni Dodonovf6750b32012-04-18 11:51:14 -030039 * The benefits of FBC are mostly visible with solid backgrounds and
40 * variation-less patterns.
Eugeni Dodonov85208be2012-04-16 22:20:34 -030041 *
Eugeni Dodonovf6750b32012-04-18 11:51:14 -030042 * FBC-related functionality can be enabled by the means of the
43 * i915.i915_enable_fbc parameter
Eugeni Dodonov85208be2012-04-16 22:20:34 -030044 */
45
Chris Wilson3490ea52013-01-07 10:11:40 +000046static bool intel_crtc_active(struct drm_crtc *crtc)
47{
48 /* Be paranoid as we can arrive here with only partial
49 * state retrieved from the hardware during setup.
50 */
51 return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
52}
53
Eugeni Dodonov1fa61102012-04-18 15:29:26 -030054static void i8xx_disable_fbc(struct drm_device *dev)
Eugeni Dodonov85208be2012-04-16 22:20:34 -030055{
56 struct drm_i915_private *dev_priv = dev->dev_private;
57 u32 fbc_ctl;
58
59 /* Disable compression */
60 fbc_ctl = I915_READ(FBC_CONTROL);
61 if ((fbc_ctl & FBC_CTL_EN) == 0)
62 return;
63
64 fbc_ctl &= ~FBC_CTL_EN;
65 I915_WRITE(FBC_CONTROL, fbc_ctl);
66
67 /* Wait for compressing bit to clear */
68 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
69 DRM_DEBUG_KMS("FBC idle timed out\n");
70 return;
71 }
72
73 DRM_DEBUG_KMS("disabled FBC\n");
74}
75
Eugeni Dodonov1fa61102012-04-18 15:29:26 -030076static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
Eugeni Dodonov85208be2012-04-16 22:20:34 -030077{
78 struct drm_device *dev = crtc->dev;
79 struct drm_i915_private *dev_priv = dev->dev_private;
80 struct drm_framebuffer *fb = crtc->fb;
81 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
82 struct drm_i915_gem_object *obj = intel_fb->obj;
83 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
84 int cfb_pitch;
85 int plane, i;
86 u32 fbc_ctl, fbc_ctl2;
87
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -070088 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
Eugeni Dodonov85208be2012-04-16 22:20:34 -030089 if (fb->pitches[0] < cfb_pitch)
90 cfb_pitch = fb->pitches[0];
91
92 /* FBC_CTL wants 64B units */
93 cfb_pitch = (cfb_pitch / 64) - 1;
94 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
95
96 /* Clear old tags */
97 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
98 I915_WRITE(FBC_TAG + (i * 4), 0);
99
100 /* Set it up... */
101 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
102 fbc_ctl2 |= plane;
103 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
104 I915_WRITE(FBC_FENCE_OFF, crtc->y);
105
106 /* enable it... */
107 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
108 if (IS_I945GM(dev))
109 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
110 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
111 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
112 fbc_ctl |= obj->fence_reg;
113 I915_WRITE(FBC_CONTROL, fbc_ctl);
114
Ville Syrjälä84f44ce2013-04-17 17:48:49 +0300115 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
116 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300117}
118
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300119static bool i8xx_fbc_enabled(struct drm_device *dev)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300120{
121 struct drm_i915_private *dev_priv = dev->dev_private;
122
123 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
124}
125
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300126static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300127{
128 struct drm_device *dev = crtc->dev;
129 struct drm_i915_private *dev_priv = dev->dev_private;
130 struct drm_framebuffer *fb = crtc->fb;
131 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
132 struct drm_i915_gem_object *obj = intel_fb->obj;
133 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
134 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
135 unsigned long stall_watermark = 200;
136 u32 dpfc_ctl;
137
138 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
139 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
140 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
141
142 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
143 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
144 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
145 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
146
147 /* enable it... */
148 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
149
Ville Syrjälä84f44ce2013-04-17 17:48:49 +0300150 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300151}
152
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300153static void g4x_disable_fbc(struct drm_device *dev)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300154{
155 struct drm_i915_private *dev_priv = dev->dev_private;
156 u32 dpfc_ctl;
157
158 /* Disable compression */
159 dpfc_ctl = I915_READ(DPFC_CONTROL);
160 if (dpfc_ctl & DPFC_CTL_EN) {
161 dpfc_ctl &= ~DPFC_CTL_EN;
162 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
163
164 DRM_DEBUG_KMS("disabled FBC\n");
165 }
166}
167
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300168static bool g4x_fbc_enabled(struct drm_device *dev)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300169{
170 struct drm_i915_private *dev_priv = dev->dev_private;
171
172 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
173}
174
175static void sandybridge_blit_fbc_update(struct drm_device *dev)
176{
177 struct drm_i915_private *dev_priv = dev->dev_private;
178 u32 blt_ecoskpd;
179
180 /* Make sure blitter notifies FBC of writes */
181 gen6_gt_force_wake_get(dev_priv);
182 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
183 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
184 GEN6_BLITTER_LOCK_SHIFT;
185 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
186 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
187 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
188 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
189 GEN6_BLITTER_LOCK_SHIFT);
190 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
191 POSTING_READ(GEN6_BLITTER_ECOSKPD);
192 gen6_gt_force_wake_put(dev_priv);
193}
194
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300195static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300196{
197 struct drm_device *dev = crtc->dev;
198 struct drm_i915_private *dev_priv = dev->dev_private;
199 struct drm_framebuffer *fb = crtc->fb;
200 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
201 struct drm_i915_gem_object *obj = intel_fb->obj;
202 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
203 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
204 unsigned long stall_watermark = 200;
205 u32 dpfc_ctl;
206
207 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
208 dpfc_ctl &= DPFC_RESERVED;
209 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
210 /* Set persistent mode for front-buffer rendering, ala X. */
211 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
212 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
213 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
214
215 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
216 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
217 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
218 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700219 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300220 /* enable it... */
221 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
222
223 if (IS_GEN6(dev)) {
224 I915_WRITE(SNB_DPFC_CTL_SA,
225 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
226 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
227 sandybridge_blit_fbc_update(dev);
228 }
229
Ville Syrjälä84f44ce2013-04-17 17:48:49 +0300230 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300231}
232
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300233static void ironlake_disable_fbc(struct drm_device *dev)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300234{
235 struct drm_i915_private *dev_priv = dev->dev_private;
236 u32 dpfc_ctl;
237
238 /* Disable compression */
239 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
240 if (dpfc_ctl & DPFC_CTL_EN) {
241 dpfc_ctl &= ~DPFC_CTL_EN;
242 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
243
Rodrigo Vivib74ea102013-05-09 14:08:38 -0300244 if (IS_IVYBRIDGE(dev))
Damien Lespiau7dd23ba2013-05-10 14:33:17 +0100245 /* WaFbcDisableDpfcClockGating:ivb */
Rodrigo Vivib74ea102013-05-09 14:08:38 -0300246 I915_WRITE(ILK_DSPCLK_GATE_D,
247 I915_READ(ILK_DSPCLK_GATE_D) &
248 ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
249
Rodrigo Vivid89f2072013-05-09 14:20:50 -0300250 if (IS_HASWELL(dev))
Damien Lespiau7dd23ba2013-05-10 14:33:17 +0100251 /* WaFbcDisableDpfcClockGating:hsw */
Rodrigo Vivid89f2072013-05-09 14:20:50 -0300252 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
253 I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
254 ~HSW_DPFC_GATING_DISABLE);
255
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300256 DRM_DEBUG_KMS("disabled FBC\n");
257 }
258}
259
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300260static bool ironlake_fbc_enabled(struct drm_device *dev)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300261{
262 struct drm_i915_private *dev_priv = dev->dev_private;
263
264 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
265}
266
Rodrigo Viviabe959c2013-05-06 19:37:33 -0300267static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
268{
269 struct drm_device *dev = crtc->dev;
270 struct drm_i915_private *dev_priv = dev->dev_private;
271 struct drm_framebuffer *fb = crtc->fb;
272 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
273 struct drm_i915_gem_object *obj = intel_fb->obj;
274 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
275
Ben Widawskyf343c5f2013-07-05 14:41:04 -0700276 I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
Rodrigo Viviabe959c2013-05-06 19:37:33 -0300277
278 I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
279 IVB_DPFC_CTL_FENCE_EN |
280 intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
281
Rodrigo Vivi891348b2013-05-06 19:37:36 -0300282 if (IS_IVYBRIDGE(dev)) {
Damien Lespiau7dd23ba2013-05-10 14:33:17 +0100283 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
Rodrigo Vivi891348b2013-05-06 19:37:36 -0300284 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
Damien Lespiau7dd23ba2013-05-10 14:33:17 +0100285 /* WaFbcDisableDpfcClockGating:ivb */
Rodrigo Vivi891348b2013-05-06 19:37:36 -0300286 I915_WRITE(ILK_DSPCLK_GATE_D,
287 I915_READ(ILK_DSPCLK_GATE_D) |
288 ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
Rodrigo Vivi28554162013-05-06 19:37:37 -0300289 } else {
Damien Lespiau7dd23ba2013-05-10 14:33:17 +0100290 /* WaFbcAsynchFlipDisableFbcQueue:hsw */
Rodrigo Vivi28554162013-05-06 19:37:37 -0300291 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
292 HSW_BYPASS_FBC_QUEUE);
Damien Lespiau7dd23ba2013-05-10 14:33:17 +0100293 /* WaFbcDisableDpfcClockGating:hsw */
Rodrigo Vivid89f2072013-05-09 14:20:50 -0300294 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
295 I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
296 HSW_DPFC_GATING_DISABLE);
Rodrigo Vivi891348b2013-05-06 19:37:36 -0300297 }
Rodrigo Vivib74ea102013-05-09 14:08:38 -0300298
Rodrigo Viviabe959c2013-05-06 19:37:33 -0300299 I915_WRITE(SNB_DPFC_CTL_SA,
300 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
301 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
302
303 sandybridge_blit_fbc_update(dev);
304
305 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
306}
307
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300308bool intel_fbc_enabled(struct drm_device *dev)
309{
310 struct drm_i915_private *dev_priv = dev->dev_private;
311
312 if (!dev_priv->display.fbc_enabled)
313 return false;
314
315 return dev_priv->display.fbc_enabled(dev);
316}
317
318static void intel_fbc_work_fn(struct work_struct *__work)
319{
320 struct intel_fbc_work *work =
321 container_of(to_delayed_work(__work),
322 struct intel_fbc_work, work);
323 struct drm_device *dev = work->crtc->dev;
324 struct drm_i915_private *dev_priv = dev->dev_private;
325
326 mutex_lock(&dev->struct_mutex);
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700327 if (work == dev_priv->fbc.fbc_work) {
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300328 /* Double check that we haven't switched fb without cancelling
329 * the prior work.
330 */
331 if (work->crtc->fb == work->fb) {
332 dev_priv->display.enable_fbc(work->crtc,
333 work->interval);
334
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700335 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
336 dev_priv->fbc.fb_id = work->crtc->fb->base.id;
337 dev_priv->fbc.y = work->crtc->y;
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300338 }
339
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700340 dev_priv->fbc.fbc_work = NULL;
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300341 }
342 mutex_unlock(&dev->struct_mutex);
343
344 kfree(work);
345}
346
347static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
348{
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700349 if (dev_priv->fbc.fbc_work == NULL)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300350 return;
351
352 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
353
354 /* Synchronisation is provided by struct_mutex and checking of
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700355 * dev_priv->fbc.fbc_work, so we can perform the cancellation
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300356 * entirely asynchronously.
357 */
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700358 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300359 /* tasklet was killed before being run, clean up */
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700360 kfree(dev_priv->fbc.fbc_work);
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300361
362 /* Mark the work as no longer wanted so that if it does
363 * wake-up (because the work was already running and waiting
364 * for our mutex), it will discover that is no longer
365 * necessary to run.
366 */
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700367 dev_priv->fbc.fbc_work = NULL;
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300368}
369
Damien Lespiaub63fb442013-06-24 16:22:01 +0100370static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300371{
372 struct intel_fbc_work *work;
373 struct drm_device *dev = crtc->dev;
374 struct drm_i915_private *dev_priv = dev->dev_private;
375
376 if (!dev_priv->display.enable_fbc)
377 return;
378
379 intel_cancel_fbc_work(dev_priv);
380
381 work = kzalloc(sizeof *work, GFP_KERNEL);
382 if (work == NULL) {
Paulo Zanoni6cdcb5e2013-06-12 17:27:29 -0300383 DRM_ERROR("Failed to allocate FBC work structure\n");
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300384 dev_priv->display.enable_fbc(crtc, interval);
385 return;
386 }
387
388 work->crtc = crtc;
389 work->fb = crtc->fb;
390 work->interval = interval;
391 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
392
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700393 dev_priv->fbc.fbc_work = work;
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300394
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300395 /* Delay the actual enabling to let pageflipping cease and the
396 * display to settle before starting the compression. Note that
397 * this delay also serves a second purpose: it allows for a
398 * vblank to pass after disabling the FBC before we attempt
399 * to modify the control registers.
400 *
401 * A more complicated solution would involve tracking vblanks
402 * following the termination of the page-flipping sequence
403 * and indeed performing the enable as a co-routine and not
404 * waiting synchronously upon the vblank.
Damien Lespiau7457d612013-06-07 17:41:07 +0100405 *
406 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300407 */
408 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
409}
410
411void intel_disable_fbc(struct drm_device *dev)
412{
413 struct drm_i915_private *dev_priv = dev->dev_private;
414
415 intel_cancel_fbc_work(dev_priv);
416
417 if (!dev_priv->display.disable_fbc)
418 return;
419
420 dev_priv->display.disable_fbc(dev);
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700421 dev_priv->fbc.plane = -1;
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300422}
423
Chris Wilson29ebf902013-07-27 17:23:55 +0100424static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
425 enum no_fbc_reason reason)
426{
427 if (dev_priv->fbc.no_fbc_reason == reason)
428 return false;
429
430 dev_priv->fbc.no_fbc_reason = reason;
431 return true;
432}
433
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300434/**
435 * intel_update_fbc - enable/disable FBC as needed
436 * @dev: the drm_device
437 *
438 * Set up the framebuffer compression hardware at mode set time. We
439 * enable it if possible:
440 * - plane A only (on pre-965)
441 * - no pixel mulitply/line duplication
442 * - no alpha buffer discard
443 * - no dual wide
Paulo Zanonif85da862013-06-04 16:53:39 -0300444 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300445 *
446 * We can't assume that any compression will take place (worst case),
447 * so the compressed buffer has to be the same size as the uncompressed
448 * one. It also must reside (along with the line length buffer) in
449 * stolen memory.
450 *
451 * We need to enable/disable FBC on a global basis.
452 */
453void intel_update_fbc(struct drm_device *dev)
454{
455 struct drm_i915_private *dev_priv = dev->dev_private;
456 struct drm_crtc *crtc = NULL, *tmp_crtc;
457 struct intel_crtc *intel_crtc;
458 struct drm_framebuffer *fb;
459 struct intel_framebuffer *intel_fb;
460 struct drm_i915_gem_object *obj;
Ville Syrjäläef644fd2013-09-04 18:25:21 +0300461 const struct drm_display_mode *mode;
462 const struct drm_display_mode *adjusted_mode;
Paulo Zanonif85da862013-06-04 16:53:39 -0300463 unsigned int max_hdisplay, max_vdisplay;
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300464
Chris Wilson29ebf902013-07-27 17:23:55 +0100465 if (!I915_HAS_FBC(dev)) {
466 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300467 return;
Chris Wilson29ebf902013-07-27 17:23:55 +0100468 }
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300469
Chris Wilson29ebf902013-07-27 17:23:55 +0100470 if (!i915_powersave) {
471 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
472 DRM_DEBUG_KMS("fbc disabled per module param\n");
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300473 return;
Chris Wilson29ebf902013-07-27 17:23:55 +0100474 }
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300475
476 /*
477 * If FBC is already on, we just have to verify that we can
478 * keep it that way...
479 * Need to disable if:
480 * - more than one pipe is active
481 * - changing FBC params (stride, fence, mode)
482 * - new fb is too large to fit in compressed buffer
483 * - going to an unsupported config (interlace, pixel multiply, etc.)
484 */
485 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
Chris Wilson3490ea52013-01-07 10:11:40 +0000486 if (intel_crtc_active(tmp_crtc) &&
487 !to_intel_crtc(tmp_crtc)->primary_disabled) {
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300488 if (crtc) {
Chris Wilson29ebf902013-07-27 17:23:55 +0100489 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
490 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300491 goto out_disable;
492 }
493 crtc = tmp_crtc;
494 }
495 }
496
497 if (!crtc || crtc->fb == NULL) {
Chris Wilson29ebf902013-07-27 17:23:55 +0100498 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
499 DRM_DEBUG_KMS("no output, disabling\n");
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300500 goto out_disable;
501 }
502
503 intel_crtc = to_intel_crtc(crtc);
504 fb = crtc->fb;
505 intel_fb = to_intel_framebuffer(fb);
506 obj = intel_fb->obj;
Ville Syrjäläef644fd2013-09-04 18:25:21 +0300507 mode = &intel_crtc->config.requested_mode;
508 adjusted_mode = &intel_crtc->config.adjusted_mode;
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300509
Damien Lespiau8a5729a2013-06-24 16:22:02 +0100510 if (i915_enable_fbc < 0 &&
511 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
Chris Wilson29ebf902013-07-27 17:23:55 +0100512 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
513 DRM_DEBUG_KMS("disabled per chip default\n");
Damien Lespiau8a5729a2013-06-24 16:22:02 +0100514 goto out_disable;
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300515 }
Damien Lespiau8a5729a2013-06-24 16:22:02 +0100516 if (!i915_enable_fbc) {
Chris Wilson29ebf902013-07-27 17:23:55 +0100517 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
518 DRM_DEBUG_KMS("fbc disabled per module param\n");
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300519 goto out_disable;
520 }
Ville Syrjäläef644fd2013-09-04 18:25:21 +0300521 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
522 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
Chris Wilson29ebf902013-07-27 17:23:55 +0100523 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
524 DRM_DEBUG_KMS("mode incompatible with compression, "
525 "disabling\n");
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300526 goto out_disable;
527 }
Paulo Zanonif85da862013-06-04 16:53:39 -0300528
529 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
530 max_hdisplay = 4096;
531 max_vdisplay = 2048;
532 } else {
533 max_hdisplay = 2048;
534 max_vdisplay = 1536;
535 }
Ville Syrjäläef644fd2013-09-04 18:25:21 +0300536 if ((mode->hdisplay > max_hdisplay) ||
537 (mode->vdisplay > max_vdisplay)) {
Chris Wilson29ebf902013-07-27 17:23:55 +0100538 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
539 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300540 goto out_disable;
541 }
Rodrigo Vivi891348b2013-05-06 19:37:36 -0300542 if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
543 intel_crtc->plane != 0) {
Chris Wilson29ebf902013-07-27 17:23:55 +0100544 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
545 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300546 goto out_disable;
547 }
548
549 /* The use of a CPU fence is mandatory in order to detect writes
550 * by the CPU to the scanout and trigger updates to the FBC.
551 */
552 if (obj->tiling_mode != I915_TILING_X ||
553 obj->fence_reg == I915_FENCE_REG_NONE) {
Chris Wilson29ebf902013-07-27 17:23:55 +0100554 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
555 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300556 goto out_disable;
557 }
558
559 /* If the kernel debugger is active, always disable compression */
560 if (in_dbg_master())
561 goto out_disable;
562
Chris Wilson11be49e2012-11-15 11:32:20 +0000563 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
Chris Wilson29ebf902013-07-27 17:23:55 +0100564 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
565 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
Chris Wilson11be49e2012-11-15 11:32:20 +0000566 goto out_disable;
567 }
568
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300569 /* If the scanout has not changed, don't modify the FBC settings.
570 * Note that we make the fundamental assumption that the fb->obj
571 * cannot be unpinned (and have its GTT offset and fence revoked)
572 * without first being decoupled from the scanout and FBC disabled.
573 */
Ben Widawsky5c3fe8b2013-06-27 16:30:21 -0700574 if (dev_priv->fbc.plane == intel_crtc->plane &&
575 dev_priv->fbc.fb_id == fb->base.id &&
576 dev_priv->fbc.y == crtc->y)
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300577 return;
578
579 if (intel_fbc_enabled(dev)) {
580 /* We update FBC along two paths, after changing fb/crtc
581 * configuration (modeswitching) and after page-flipping
582 * finishes. For the latter, we know that not only did
583 * we disable the FBC at the start of the page-flip
584 * sequence, but also more than one vblank has passed.
585 *
586 * For the former case of modeswitching, it is possible
587 * to switch between two FBC valid configurations
588 * instantaneously so we do need to disable the FBC
589 * before we can modify its control registers. We also
590 * have to wait for the next vblank for that to take
591 * effect. However, since we delay enabling FBC we can
592 * assume that a vblank has passed since disabling and
593 * that we can safely alter the registers in the deferred
594 * callback.
595 *
596 * In the scenario that we go from a valid to invalid
597 * and then back to valid FBC configuration we have
598 * no strict enforcement that a vblank occurred since
599 * disabling the FBC. However, along all current pipe
600 * disabling paths we do need to wait for a vblank at
601 * some point. And we wait before enabling FBC anyway.
602 */
603 DRM_DEBUG_KMS("disabling active FBC for update\n");
604 intel_disable_fbc(dev);
605 }
606
607 intel_enable_fbc(crtc, 500);
Chris Wilson29ebf902013-07-27 17:23:55 +0100608 dev_priv->fbc.no_fbc_reason = FBC_OK;
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300609 return;
610
611out_disable:
612 /* Multiple disables should be harmless */
613 if (intel_fbc_enabled(dev)) {
614 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
615 intel_disable_fbc(dev);
616 }
Chris Wilson11be49e2012-11-15 11:32:20 +0000617 i915_gem_stolen_cleanup_compression(dev);
Eugeni Dodonov85208be2012-04-16 22:20:34 -0300618}
619
Daniel Vetterc921aba2012-04-26 23:28:17 +0200620static void i915_pineview_get_mem_freq(struct drm_device *dev)
621{
622 drm_i915_private_t *dev_priv = dev->dev_private;
623 u32 tmp;
624
625 tmp = I915_READ(CLKCFG);
626
627 switch (tmp & CLKCFG_FSB_MASK) {
628 case CLKCFG_FSB_533:
629 dev_priv->fsb_freq = 533; /* 133*4 */
630 break;
631 case CLKCFG_FSB_800:
632 dev_priv->fsb_freq = 800; /* 200*4 */
633 break;
634 case CLKCFG_FSB_667:
635 dev_priv->fsb_freq = 667; /* 167*4 */
636 break;
637 case CLKCFG_FSB_400:
638 dev_priv->fsb_freq = 400; /* 100*4 */
639 break;
640 }
641
642 switch (tmp & CLKCFG_MEM_MASK) {
643 case CLKCFG_MEM_533:
644 dev_priv->mem_freq = 533;
645 break;
646 case CLKCFG_MEM_667:
647 dev_priv->mem_freq = 667;
648 break;
649 case CLKCFG_MEM_800:
650 dev_priv->mem_freq = 800;
651 break;
652 }
653
654 /* detect pineview DDR3 setting */
655 tmp = I915_READ(CSHRDDR3CTL);
656 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
657}
658
659static void i915_ironlake_get_mem_freq(struct drm_device *dev)
660{
661 drm_i915_private_t *dev_priv = dev->dev_private;
662 u16 ddrpll, csipll;
663
664 ddrpll = I915_READ16(DDRMPLL1);
665 csipll = I915_READ16(CSIPLL0);
666
667 switch (ddrpll & 0xff) {
668 case 0xc:
669 dev_priv->mem_freq = 800;
670 break;
671 case 0x10:
672 dev_priv->mem_freq = 1066;
673 break;
674 case 0x14:
675 dev_priv->mem_freq = 1333;
676 break;
677 case 0x18:
678 dev_priv->mem_freq = 1600;
679 break;
680 default:
681 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
682 ddrpll & 0xff);
683 dev_priv->mem_freq = 0;
684 break;
685 }
686
Daniel Vetter20e4d402012-08-08 23:35:39 +0200687 dev_priv->ips.r_t = dev_priv->mem_freq;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200688
689 switch (csipll & 0x3ff) {
690 case 0x00c:
691 dev_priv->fsb_freq = 3200;
692 break;
693 case 0x00e:
694 dev_priv->fsb_freq = 3733;
695 break;
696 case 0x010:
697 dev_priv->fsb_freq = 4266;
698 break;
699 case 0x012:
700 dev_priv->fsb_freq = 4800;
701 break;
702 case 0x014:
703 dev_priv->fsb_freq = 5333;
704 break;
705 case 0x016:
706 dev_priv->fsb_freq = 5866;
707 break;
708 case 0x018:
709 dev_priv->fsb_freq = 6400;
710 break;
711 default:
712 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
713 csipll & 0x3ff);
714 dev_priv->fsb_freq = 0;
715 break;
716 }
717
718 if (dev_priv->fsb_freq == 3200) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200719 dev_priv->ips.c_m = 0;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200720 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200721 dev_priv->ips.c_m = 1;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200722 } else {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200723 dev_priv->ips.c_m = 2;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200724 }
725}
726
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300727static const struct cxsr_latency cxsr_latency_table[] = {
728 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
729 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
730 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
731 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
732 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
733
734 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
735 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
736 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
737 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
738 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
739
740 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
741 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
742 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
743 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
744 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
745
746 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
747 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
748 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
749 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
750 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
751
752 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
753 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
754 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
755 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
756 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
757
758 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
759 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
760 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
761 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
762 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
763};
764
Daniel Vetter63c62272012-04-21 23:17:55 +0200765static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300766 int is_ddr3,
767 int fsb,
768 int mem)
769{
770 const struct cxsr_latency *latency;
771 int i;
772
773 if (fsb == 0 || mem == 0)
774 return NULL;
775
776 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
777 latency = &cxsr_latency_table[i];
778 if (is_desktop == latency->is_desktop &&
779 is_ddr3 == latency->is_ddr3 &&
780 fsb == latency->fsb_freq && mem == latency->mem_freq)
781 return latency;
782 }
783
784 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
785
786 return NULL;
787}
788
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300789static void pineview_disable_cxsr(struct drm_device *dev)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300790{
791 struct drm_i915_private *dev_priv = dev->dev_private;
792
793 /* deactivate cxsr */
794 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
795}
796
797/*
798 * Latency for FIFO fetches is dependent on several factors:
799 * - memory configuration (speed, channels)
800 * - chipset
801 * - current MCH state
802 * It can be fairly high in some situations, so here we assume a fairly
803 * pessimal value. It's a tradeoff between extra memory fetches (if we
804 * set this value too high, the FIFO will fetch frequently to stay full)
805 * and power consumption (set it too low to save power and we might see
806 * FIFO underruns and display "flicker").
807 *
808 * A value of 5us seems to be a good balance; safe for very low end
809 * platforms but not overly aggressive on lower latency configs.
810 */
811static const int latency_ns = 5000;
812
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300813static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300814{
815 struct drm_i915_private *dev_priv = dev->dev_private;
816 uint32_t dsparb = I915_READ(DSPARB);
817 int size;
818
819 size = dsparb & 0x7f;
820 if (plane)
821 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
822
823 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
824 plane ? "B" : "A", size);
825
826 return size;
827}
828
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300829static int i85x_get_fifo_size(struct drm_device *dev, int plane)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300830{
831 struct drm_i915_private *dev_priv = dev->dev_private;
832 uint32_t dsparb = I915_READ(DSPARB);
833 int size;
834
835 size = dsparb & 0x1ff;
836 if (plane)
837 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
838 size >>= 1; /* Convert to cachelines */
839
840 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
841 plane ? "B" : "A", size);
842
843 return size;
844}
845
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300846static int i845_get_fifo_size(struct drm_device *dev, int plane)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300847{
848 struct drm_i915_private *dev_priv = dev->dev_private;
849 uint32_t dsparb = I915_READ(DSPARB);
850 int size;
851
852 size = dsparb & 0x7f;
853 size >>= 2; /* Convert to cachelines */
854
855 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
856 plane ? "B" : "A",
857 size);
858
859 return size;
860}
861
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300862static int i830_get_fifo_size(struct drm_device *dev, int plane)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300863{
864 struct drm_i915_private *dev_priv = dev->dev_private;
865 uint32_t dsparb = I915_READ(DSPARB);
866 int size;
867
868 size = dsparb & 0x7f;
869 size >>= 1; /* Convert to cachelines */
870
871 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
872 plane ? "B" : "A", size);
873
874 return size;
875}
876
877/* Pineview has different values for various configs */
878static const struct intel_watermark_params pineview_display_wm = {
879 PINEVIEW_DISPLAY_FIFO,
880 PINEVIEW_MAX_WM,
881 PINEVIEW_DFT_WM,
882 PINEVIEW_GUARD_WM,
883 PINEVIEW_FIFO_LINE_SIZE
884};
885static const struct intel_watermark_params pineview_display_hplloff_wm = {
886 PINEVIEW_DISPLAY_FIFO,
887 PINEVIEW_MAX_WM,
888 PINEVIEW_DFT_HPLLOFF_WM,
889 PINEVIEW_GUARD_WM,
890 PINEVIEW_FIFO_LINE_SIZE
891};
892static const struct intel_watermark_params pineview_cursor_wm = {
893 PINEVIEW_CURSOR_FIFO,
894 PINEVIEW_CURSOR_MAX_WM,
895 PINEVIEW_CURSOR_DFT_WM,
896 PINEVIEW_CURSOR_GUARD_WM,
897 PINEVIEW_FIFO_LINE_SIZE,
898};
899static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
900 PINEVIEW_CURSOR_FIFO,
901 PINEVIEW_CURSOR_MAX_WM,
902 PINEVIEW_CURSOR_DFT_WM,
903 PINEVIEW_CURSOR_GUARD_WM,
904 PINEVIEW_FIFO_LINE_SIZE
905};
906static const struct intel_watermark_params g4x_wm_info = {
907 G4X_FIFO_SIZE,
908 G4X_MAX_WM,
909 G4X_MAX_WM,
910 2,
911 G4X_FIFO_LINE_SIZE,
912};
913static const struct intel_watermark_params g4x_cursor_wm_info = {
914 I965_CURSOR_FIFO,
915 I965_CURSOR_MAX_WM,
916 I965_CURSOR_DFT_WM,
917 2,
918 G4X_FIFO_LINE_SIZE,
919};
920static const struct intel_watermark_params valleyview_wm_info = {
921 VALLEYVIEW_FIFO_SIZE,
922 VALLEYVIEW_MAX_WM,
923 VALLEYVIEW_MAX_WM,
924 2,
925 G4X_FIFO_LINE_SIZE,
926};
927static const struct intel_watermark_params valleyview_cursor_wm_info = {
928 I965_CURSOR_FIFO,
929 VALLEYVIEW_CURSOR_MAX_WM,
930 I965_CURSOR_DFT_WM,
931 2,
932 G4X_FIFO_LINE_SIZE,
933};
934static const struct intel_watermark_params i965_cursor_wm_info = {
935 I965_CURSOR_FIFO,
936 I965_CURSOR_MAX_WM,
937 I965_CURSOR_DFT_WM,
938 2,
939 I915_FIFO_LINE_SIZE,
940};
941static const struct intel_watermark_params i945_wm_info = {
942 I945_FIFO_SIZE,
943 I915_MAX_WM,
944 1,
945 2,
946 I915_FIFO_LINE_SIZE
947};
948static const struct intel_watermark_params i915_wm_info = {
949 I915_FIFO_SIZE,
950 I915_MAX_WM,
951 1,
952 2,
953 I915_FIFO_LINE_SIZE
954};
955static const struct intel_watermark_params i855_wm_info = {
956 I855GM_FIFO_SIZE,
957 I915_MAX_WM,
958 1,
959 2,
960 I830_FIFO_LINE_SIZE
961};
962static const struct intel_watermark_params i830_wm_info = {
963 I830_FIFO_SIZE,
964 I915_MAX_WM,
965 1,
966 2,
967 I830_FIFO_LINE_SIZE
968};
969
970static const struct intel_watermark_params ironlake_display_wm_info = {
971 ILK_DISPLAY_FIFO,
972 ILK_DISPLAY_MAXWM,
973 ILK_DISPLAY_DFTWM,
974 2,
975 ILK_FIFO_LINE_SIZE
976};
977static const struct intel_watermark_params ironlake_cursor_wm_info = {
978 ILK_CURSOR_FIFO,
979 ILK_CURSOR_MAXWM,
980 ILK_CURSOR_DFTWM,
981 2,
982 ILK_FIFO_LINE_SIZE
983};
984static const struct intel_watermark_params ironlake_display_srwm_info = {
985 ILK_DISPLAY_SR_FIFO,
986 ILK_DISPLAY_MAX_SRWM,
987 ILK_DISPLAY_DFT_SRWM,
988 2,
989 ILK_FIFO_LINE_SIZE
990};
991static const struct intel_watermark_params ironlake_cursor_srwm_info = {
992 ILK_CURSOR_SR_FIFO,
993 ILK_CURSOR_MAX_SRWM,
994 ILK_CURSOR_DFT_SRWM,
995 2,
996 ILK_FIFO_LINE_SIZE
997};
998
999static const struct intel_watermark_params sandybridge_display_wm_info = {
1000 SNB_DISPLAY_FIFO,
1001 SNB_DISPLAY_MAXWM,
1002 SNB_DISPLAY_DFTWM,
1003 2,
1004 SNB_FIFO_LINE_SIZE
1005};
1006static const struct intel_watermark_params sandybridge_cursor_wm_info = {
1007 SNB_CURSOR_FIFO,
1008 SNB_CURSOR_MAXWM,
1009 SNB_CURSOR_DFTWM,
1010 2,
1011 SNB_FIFO_LINE_SIZE
1012};
1013static const struct intel_watermark_params sandybridge_display_srwm_info = {
1014 SNB_DISPLAY_SR_FIFO,
1015 SNB_DISPLAY_MAX_SRWM,
1016 SNB_DISPLAY_DFT_SRWM,
1017 2,
1018 SNB_FIFO_LINE_SIZE
1019};
1020static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
1021 SNB_CURSOR_SR_FIFO,
1022 SNB_CURSOR_MAX_SRWM,
1023 SNB_CURSOR_DFT_SRWM,
1024 2,
1025 SNB_FIFO_LINE_SIZE
1026};
1027
1028
1029/**
1030 * intel_calculate_wm - calculate watermark level
1031 * @clock_in_khz: pixel clock
1032 * @wm: chip FIFO params
1033 * @pixel_size: display pixel size
1034 * @latency_ns: memory latency for the platform
1035 *
1036 * Calculate the watermark level (the level at which the display plane will
1037 * start fetching from memory again). Each chip has a different display
1038 * FIFO size and allocation, so the caller needs to figure that out and pass
1039 * in the correct intel_watermark_params structure.
1040 *
1041 * As the pixel clock runs, the FIFO will be drained at a rate that depends
1042 * on the pixel size. When it reaches the watermark level, it'll start
1043 * fetching FIFO line sized based chunks from memory until the FIFO fills
1044 * past the watermark point. If the FIFO drains completely, a FIFO underrun
1045 * will occur, and a display engine hang could result.
1046 */
1047static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1048 const struct intel_watermark_params *wm,
1049 int fifo_size,
1050 int pixel_size,
1051 unsigned long latency_ns)
1052{
1053 long entries_required, wm_size;
1054
1055 /*
1056 * Note: we need to make sure we don't overflow for various clock &
1057 * latency values.
1058 * clocks go from a few thousand to several hundred thousand.
1059 * latency is usually a few thousand
1060 */
1061 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1062 1000;
1063 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
1064
1065 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
1066
1067 wm_size = fifo_size - (entries_required + wm->guard_size);
1068
1069 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1070
1071 /* Don't promote wm_size to unsigned... */
1072 if (wm_size > (long)wm->max_wm)
1073 wm_size = wm->max_wm;
1074 if (wm_size <= 0)
1075 wm_size = wm->default_wm;
1076 return wm_size;
1077}
1078
1079static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1080{
1081 struct drm_crtc *crtc, *enabled = NULL;
1082
1083 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Chris Wilson3490ea52013-01-07 10:11:40 +00001084 if (intel_crtc_active(crtc)) {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001085 if (enabled)
1086 return NULL;
1087 enabled = crtc;
1088 }
1089 }
1090
1091 return enabled;
1092}
1093
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001094static void pineview_update_wm(struct drm_crtc *unused_crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001095{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001096 struct drm_device *dev = unused_crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001097 struct drm_i915_private *dev_priv = dev->dev_private;
1098 struct drm_crtc *crtc;
1099 const struct cxsr_latency *latency;
1100 u32 reg;
1101 unsigned long wm;
1102
1103 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1104 dev_priv->fsb_freq, dev_priv->mem_freq);
1105 if (!latency) {
1106 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1107 pineview_disable_cxsr(dev);
1108 return;
1109 }
1110
1111 crtc = single_enabled_crtc(dev);
1112 if (crtc) {
1113 int clock = crtc->mode.clock;
1114 int pixel_size = crtc->fb->bits_per_pixel / 8;
1115
1116 /* Display SR */
1117 wm = intel_calculate_wm(clock, &pineview_display_wm,
1118 pineview_display_wm.fifo_size,
1119 pixel_size, latency->display_sr);
1120 reg = I915_READ(DSPFW1);
1121 reg &= ~DSPFW_SR_MASK;
1122 reg |= wm << DSPFW_SR_SHIFT;
1123 I915_WRITE(DSPFW1, reg);
1124 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1125
1126 /* cursor SR */
1127 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1128 pineview_display_wm.fifo_size,
1129 pixel_size, latency->cursor_sr);
1130 reg = I915_READ(DSPFW3);
1131 reg &= ~DSPFW_CURSOR_SR_MASK;
1132 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1133 I915_WRITE(DSPFW3, reg);
1134
1135 /* Display HPLL off SR */
1136 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1137 pineview_display_hplloff_wm.fifo_size,
1138 pixel_size, latency->display_hpll_disable);
1139 reg = I915_READ(DSPFW3);
1140 reg &= ~DSPFW_HPLL_SR_MASK;
1141 reg |= wm & DSPFW_HPLL_SR_MASK;
1142 I915_WRITE(DSPFW3, reg);
1143
1144 /* cursor HPLL off SR */
1145 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1146 pineview_display_hplloff_wm.fifo_size,
1147 pixel_size, latency->cursor_hpll_disable);
1148 reg = I915_READ(DSPFW3);
1149 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1150 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1151 I915_WRITE(DSPFW3, reg);
1152 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1153
1154 /* activate cxsr */
1155 I915_WRITE(DSPFW3,
1156 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1157 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1158 } else {
1159 pineview_disable_cxsr(dev);
1160 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1161 }
1162}
1163
1164static bool g4x_compute_wm0(struct drm_device *dev,
1165 int plane,
1166 const struct intel_watermark_params *display,
1167 int display_latency_ns,
1168 const struct intel_watermark_params *cursor,
1169 int cursor_latency_ns,
1170 int *plane_wm,
1171 int *cursor_wm)
1172{
1173 struct drm_crtc *crtc;
1174 int htotal, hdisplay, clock, pixel_size;
1175 int line_time_us, line_count;
1176 int entries, tlb_miss;
1177
1178 crtc = intel_get_crtc_for_plane(dev, plane);
Chris Wilson3490ea52013-01-07 10:11:40 +00001179 if (!intel_crtc_active(crtc)) {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001180 *cursor_wm = cursor->guard_size;
1181 *plane_wm = display->guard_size;
1182 return false;
1183 }
1184
1185 htotal = crtc->mode.htotal;
1186 hdisplay = crtc->mode.hdisplay;
1187 clock = crtc->mode.clock;
1188 pixel_size = crtc->fb->bits_per_pixel / 8;
1189
1190 /* Use the small buffer method to calculate plane watermark */
1191 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1192 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1193 if (tlb_miss > 0)
1194 entries += tlb_miss;
1195 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1196 *plane_wm = entries + display->guard_size;
1197 if (*plane_wm > (int)display->max_wm)
1198 *plane_wm = display->max_wm;
1199
1200 /* Use the large buffer method to calculate cursor watermark */
1201 line_time_us = ((htotal * 1000) / clock);
1202 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1203 entries = line_count * 64 * pixel_size;
1204 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1205 if (tlb_miss > 0)
1206 entries += tlb_miss;
1207 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1208 *cursor_wm = entries + cursor->guard_size;
1209 if (*cursor_wm > (int)cursor->max_wm)
1210 *cursor_wm = (int)cursor->max_wm;
1211
1212 return true;
1213}
1214
1215/*
1216 * Check the wm result.
1217 *
1218 * If any calculated watermark values is larger than the maximum value that
1219 * can be programmed into the associated watermark register, that watermark
1220 * must be disabled.
1221 */
1222static bool g4x_check_srwm(struct drm_device *dev,
1223 int display_wm, int cursor_wm,
1224 const struct intel_watermark_params *display,
1225 const struct intel_watermark_params *cursor)
1226{
1227 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1228 display_wm, cursor_wm);
1229
1230 if (display_wm > display->max_wm) {
1231 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1232 display_wm, display->max_wm);
1233 return false;
1234 }
1235
1236 if (cursor_wm > cursor->max_wm) {
1237 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1238 cursor_wm, cursor->max_wm);
1239 return false;
1240 }
1241
1242 if (!(display_wm || cursor_wm)) {
1243 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1244 return false;
1245 }
1246
1247 return true;
1248}
1249
1250static bool g4x_compute_srwm(struct drm_device *dev,
1251 int plane,
1252 int latency_ns,
1253 const struct intel_watermark_params *display,
1254 const struct intel_watermark_params *cursor,
1255 int *display_wm, int *cursor_wm)
1256{
1257 struct drm_crtc *crtc;
1258 int hdisplay, htotal, pixel_size, clock;
1259 unsigned long line_time_us;
1260 int line_count, line_size;
1261 int small, large;
1262 int entries;
1263
1264 if (!latency_ns) {
1265 *display_wm = *cursor_wm = 0;
1266 return false;
1267 }
1268
1269 crtc = intel_get_crtc_for_plane(dev, plane);
1270 hdisplay = crtc->mode.hdisplay;
1271 htotal = crtc->mode.htotal;
1272 clock = crtc->mode.clock;
1273 pixel_size = crtc->fb->bits_per_pixel / 8;
1274
1275 line_time_us = (htotal * 1000) / clock;
1276 line_count = (latency_ns / line_time_us + 1000) / 1000;
1277 line_size = hdisplay * pixel_size;
1278
1279 /* Use the minimum of the small and large buffer method for primary */
1280 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1281 large = line_count * line_size;
1282
1283 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1284 *display_wm = entries + display->guard_size;
1285
1286 /* calculate the self-refresh watermark for display cursor */
1287 entries = line_count * pixel_size * 64;
1288 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1289 *cursor_wm = entries + cursor->guard_size;
1290
1291 return g4x_check_srwm(dev,
1292 *display_wm, *cursor_wm,
1293 display, cursor);
1294}
1295
1296static bool vlv_compute_drain_latency(struct drm_device *dev,
1297 int plane,
1298 int *plane_prec_mult,
1299 int *plane_dl,
1300 int *cursor_prec_mult,
1301 int *cursor_dl)
1302{
1303 struct drm_crtc *crtc;
1304 int clock, pixel_size;
1305 int entries;
1306
1307 crtc = intel_get_crtc_for_plane(dev, plane);
Chris Wilson3490ea52013-01-07 10:11:40 +00001308 if (!intel_crtc_active(crtc))
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001309 return false;
1310
1311 clock = crtc->mode.clock; /* VESA DOT Clock */
1312 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
1313
1314 entries = (clock / 1000) * pixel_size;
1315 *plane_prec_mult = (entries > 256) ?
1316 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1317 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1318 pixel_size);
1319
1320 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
1321 *cursor_prec_mult = (entries > 256) ?
1322 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1323 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1324
1325 return true;
1326}
1327
1328/*
1329 * Update drain latency registers of memory arbiter
1330 *
1331 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1332 * to be programmed. Each plane has a drain latency multiplier and a drain
1333 * latency value.
1334 */
1335
1336static void vlv_update_drain_latency(struct drm_device *dev)
1337{
1338 struct drm_i915_private *dev_priv = dev->dev_private;
1339 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1340 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1341 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1342 either 16 or 32 */
1343
1344 /* For plane A, Cursor A */
1345 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1346 &cursor_prec_mult, &cursora_dl)) {
1347 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1348 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1349 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1350 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1351
1352 I915_WRITE(VLV_DDL1, cursora_prec |
1353 (cursora_dl << DDL_CURSORA_SHIFT) |
1354 planea_prec | planea_dl);
1355 }
1356
1357 /* For plane B, Cursor B */
1358 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1359 &cursor_prec_mult, &cursorb_dl)) {
1360 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1361 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1362 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1363 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1364
1365 I915_WRITE(VLV_DDL2, cursorb_prec |
1366 (cursorb_dl << DDL_CURSORB_SHIFT) |
1367 planeb_prec | planeb_dl);
1368 }
1369}
1370
1371#define single_plane_enabled(mask) is_power_of_2(mask)
1372
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001373static void valleyview_update_wm(struct drm_crtc *crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001374{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001375 struct drm_device *dev = crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001376 static const int sr_latency_ns = 12000;
1377 struct drm_i915_private *dev_priv = dev->dev_private;
1378 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1379 int plane_sr, cursor_sr;
Chris Wilsonaf6c4572012-12-11 12:01:43 +00001380 int ignore_plane_sr, ignore_cursor_sr;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001381 unsigned int enabled = 0;
1382
1383 vlv_update_drain_latency(dev);
1384
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001385 if (g4x_compute_wm0(dev, PIPE_A,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001386 &valleyview_wm_info, latency_ns,
1387 &valleyview_cursor_wm_info, latency_ns,
1388 &planea_wm, &cursora_wm))
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001389 enabled |= 1 << PIPE_A;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001390
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001391 if (g4x_compute_wm0(dev, PIPE_B,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001392 &valleyview_wm_info, latency_ns,
1393 &valleyview_cursor_wm_info, latency_ns,
1394 &planeb_wm, &cursorb_wm))
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001395 enabled |= 1 << PIPE_B;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001396
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001397 if (single_plane_enabled(enabled) &&
1398 g4x_compute_srwm(dev, ffs(enabled) - 1,
1399 sr_latency_ns,
1400 &valleyview_wm_info,
1401 &valleyview_cursor_wm_info,
Chris Wilsonaf6c4572012-12-11 12:01:43 +00001402 &plane_sr, &ignore_cursor_sr) &&
1403 g4x_compute_srwm(dev, ffs(enabled) - 1,
1404 2*sr_latency_ns,
1405 &valleyview_wm_info,
1406 &valleyview_cursor_wm_info,
Chris Wilson52bd02d2012-12-07 10:43:24 +00001407 &ignore_plane_sr, &cursor_sr)) {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001408 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
Chris Wilson52bd02d2012-12-07 10:43:24 +00001409 } else {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001410 I915_WRITE(FW_BLC_SELF_VLV,
1411 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
Chris Wilson52bd02d2012-12-07 10:43:24 +00001412 plane_sr = cursor_sr = 0;
1413 }
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001414
1415 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1416 planea_wm, cursora_wm,
1417 planeb_wm, cursorb_wm,
1418 plane_sr, cursor_sr);
1419
1420 I915_WRITE(DSPFW1,
1421 (plane_sr << DSPFW_SR_SHIFT) |
1422 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1423 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1424 planea_wm);
1425 I915_WRITE(DSPFW2,
Chris Wilson8c919b22012-12-04 16:33:19 +00001426 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001427 (cursora_wm << DSPFW_CURSORA_SHIFT));
1428 I915_WRITE(DSPFW3,
Chris Wilson8c919b22012-12-04 16:33:19 +00001429 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1430 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001431}
1432
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001433static void g4x_update_wm(struct drm_crtc *crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001434{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001435 struct drm_device *dev = crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001436 static const int sr_latency_ns = 12000;
1437 struct drm_i915_private *dev_priv = dev->dev_private;
1438 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1439 int plane_sr, cursor_sr;
1440 unsigned int enabled = 0;
1441
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001442 if (g4x_compute_wm0(dev, PIPE_A,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001443 &g4x_wm_info, latency_ns,
1444 &g4x_cursor_wm_info, latency_ns,
1445 &planea_wm, &cursora_wm))
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001446 enabled |= 1 << PIPE_A;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001447
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001448 if (g4x_compute_wm0(dev, PIPE_B,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001449 &g4x_wm_info, latency_ns,
1450 &g4x_cursor_wm_info, latency_ns,
1451 &planeb_wm, &cursorb_wm))
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001452 enabled |= 1 << PIPE_B;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001453
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001454 if (single_plane_enabled(enabled) &&
1455 g4x_compute_srwm(dev, ffs(enabled) - 1,
1456 sr_latency_ns,
1457 &g4x_wm_info,
1458 &g4x_cursor_wm_info,
Chris Wilson52bd02d2012-12-07 10:43:24 +00001459 &plane_sr, &cursor_sr)) {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001460 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
Chris Wilson52bd02d2012-12-07 10:43:24 +00001461 } else {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001462 I915_WRITE(FW_BLC_SELF,
1463 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
Chris Wilson52bd02d2012-12-07 10:43:24 +00001464 plane_sr = cursor_sr = 0;
1465 }
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001466
1467 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1468 planea_wm, cursora_wm,
1469 planeb_wm, cursorb_wm,
1470 plane_sr, cursor_sr);
1471
1472 I915_WRITE(DSPFW1,
1473 (plane_sr << DSPFW_SR_SHIFT) |
1474 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1475 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1476 planea_wm);
1477 I915_WRITE(DSPFW2,
Chris Wilson8c919b22012-12-04 16:33:19 +00001478 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001479 (cursora_wm << DSPFW_CURSORA_SHIFT));
1480 /* HPLL off in SR has some issues on G4x... disable it */
1481 I915_WRITE(DSPFW3,
Chris Wilson8c919b22012-12-04 16:33:19 +00001482 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001483 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1484}
1485
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001486static void i965_update_wm(struct drm_crtc *unused_crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001487{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001488 struct drm_device *dev = unused_crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001489 struct drm_i915_private *dev_priv = dev->dev_private;
1490 struct drm_crtc *crtc;
1491 int srwm = 1;
1492 int cursor_sr = 16;
1493
1494 /* Calc sr entries for one plane configs */
1495 crtc = single_enabled_crtc(dev);
1496 if (crtc) {
1497 /* self-refresh has much higher latency */
1498 static const int sr_latency_ns = 12000;
1499 int clock = crtc->mode.clock;
1500 int htotal = crtc->mode.htotal;
1501 int hdisplay = crtc->mode.hdisplay;
1502 int pixel_size = crtc->fb->bits_per_pixel / 8;
1503 unsigned long line_time_us;
1504 int entries;
1505
1506 line_time_us = ((htotal * 1000) / clock);
1507
1508 /* Use ns/us then divide to preserve precision */
1509 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1510 pixel_size * hdisplay;
1511 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1512 srwm = I965_FIFO_SIZE - entries;
1513 if (srwm < 0)
1514 srwm = 1;
1515 srwm &= 0x1ff;
1516 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1517 entries, srwm);
1518
1519 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1520 pixel_size * 64;
1521 entries = DIV_ROUND_UP(entries,
1522 i965_cursor_wm_info.cacheline_size);
1523 cursor_sr = i965_cursor_wm_info.fifo_size -
1524 (entries + i965_cursor_wm_info.guard_size);
1525
1526 if (cursor_sr > i965_cursor_wm_info.max_wm)
1527 cursor_sr = i965_cursor_wm_info.max_wm;
1528
1529 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1530 "cursor %d\n", srwm, cursor_sr);
1531
1532 if (IS_CRESTLINE(dev))
1533 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1534 } else {
1535 /* Turn off self refresh if both pipes are enabled */
1536 if (IS_CRESTLINE(dev))
1537 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1538 & ~FW_BLC_SELF_EN);
1539 }
1540
1541 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1542 srwm);
1543
1544 /* 965 has limitations... */
1545 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1546 (8 << 16) | (8 << 8) | (8 << 0));
1547 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1548 /* update cursor SR watermark */
1549 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1550}
1551
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001552static void i9xx_update_wm(struct drm_crtc *unused_crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001553{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001554 struct drm_device *dev = unused_crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001555 struct drm_i915_private *dev_priv = dev->dev_private;
1556 const struct intel_watermark_params *wm_info;
1557 uint32_t fwater_lo;
1558 uint32_t fwater_hi;
1559 int cwm, srwm = 1;
1560 int fifo_size;
1561 int planea_wm, planeb_wm;
1562 struct drm_crtc *crtc, *enabled = NULL;
1563
1564 if (IS_I945GM(dev))
1565 wm_info = &i945_wm_info;
1566 else if (!IS_GEN2(dev))
1567 wm_info = &i915_wm_info;
1568 else
1569 wm_info = &i855_wm_info;
1570
1571 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1572 crtc = intel_get_crtc_for_plane(dev, 0);
Chris Wilson3490ea52013-01-07 10:11:40 +00001573 if (intel_crtc_active(crtc)) {
Chris Wilsonb9e0bda2012-10-22 12:32:15 +01001574 int cpp = crtc->fb->bits_per_pixel / 8;
1575 if (IS_GEN2(dev))
1576 cpp = 4;
1577
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001578 planea_wm = intel_calculate_wm(crtc->mode.clock,
Chris Wilsonb9e0bda2012-10-22 12:32:15 +01001579 wm_info, fifo_size, cpp,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001580 latency_ns);
1581 enabled = crtc;
1582 } else
1583 planea_wm = fifo_size - wm_info->guard_size;
1584
1585 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1586 crtc = intel_get_crtc_for_plane(dev, 1);
Chris Wilson3490ea52013-01-07 10:11:40 +00001587 if (intel_crtc_active(crtc)) {
Chris Wilsonb9e0bda2012-10-22 12:32:15 +01001588 int cpp = crtc->fb->bits_per_pixel / 8;
1589 if (IS_GEN2(dev))
1590 cpp = 4;
1591
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001592 planeb_wm = intel_calculate_wm(crtc->mode.clock,
Chris Wilsonb9e0bda2012-10-22 12:32:15 +01001593 wm_info, fifo_size, cpp,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001594 latency_ns);
1595 if (enabled == NULL)
1596 enabled = crtc;
1597 else
1598 enabled = NULL;
1599 } else
1600 planeb_wm = fifo_size - wm_info->guard_size;
1601
1602 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1603
1604 /*
1605 * Overlay gets an aggressive default since video jitter is bad.
1606 */
1607 cwm = 2;
1608
1609 /* Play safe and disable self-refresh before adjusting watermarks. */
1610 if (IS_I945G(dev) || IS_I945GM(dev))
1611 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1612 else if (IS_I915GM(dev))
1613 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1614
1615 /* Calc sr entries for one plane configs */
1616 if (HAS_FW_BLC(dev) && enabled) {
1617 /* self-refresh has much higher latency */
1618 static const int sr_latency_ns = 6000;
1619 int clock = enabled->mode.clock;
1620 int htotal = enabled->mode.htotal;
1621 int hdisplay = enabled->mode.hdisplay;
1622 int pixel_size = enabled->fb->bits_per_pixel / 8;
1623 unsigned long line_time_us;
1624 int entries;
1625
1626 line_time_us = (htotal * 1000) / clock;
1627
1628 /* Use ns/us then divide to preserve precision */
1629 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1630 pixel_size * hdisplay;
1631 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1632 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1633 srwm = wm_info->fifo_size - entries;
1634 if (srwm < 0)
1635 srwm = 1;
1636
1637 if (IS_I945G(dev) || IS_I945GM(dev))
1638 I915_WRITE(FW_BLC_SELF,
1639 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1640 else if (IS_I915GM(dev))
1641 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1642 }
1643
1644 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1645 planea_wm, planeb_wm, cwm, srwm);
1646
1647 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1648 fwater_hi = (cwm & 0x1f);
1649
1650 /* Set request length to 8 cachelines per fetch */
1651 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1652 fwater_hi = fwater_hi | (1 << 8);
1653
1654 I915_WRITE(FW_BLC, fwater_lo);
1655 I915_WRITE(FW_BLC2, fwater_hi);
1656
1657 if (HAS_FW_BLC(dev)) {
1658 if (enabled) {
1659 if (IS_I945G(dev) || IS_I945GM(dev))
1660 I915_WRITE(FW_BLC_SELF,
1661 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1662 else if (IS_I915GM(dev))
1663 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1664 DRM_DEBUG_KMS("memory self refresh enabled\n");
1665 } else
1666 DRM_DEBUG_KMS("memory self refresh disabled\n");
1667 }
1668}
1669
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001670static void i830_update_wm(struct drm_crtc *unused_crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001671{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001672 struct drm_device *dev = unused_crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001673 struct drm_i915_private *dev_priv = dev->dev_private;
1674 struct drm_crtc *crtc;
1675 uint32_t fwater_lo;
1676 int planea_wm;
1677
1678 crtc = single_enabled_crtc(dev);
1679 if (crtc == NULL)
1680 return;
1681
1682 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1683 dev_priv->display.get_fifo_size(dev, 0),
Chris Wilsonb9e0bda2012-10-22 12:32:15 +01001684 4, latency_ns);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001685 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1686 fwater_lo |= (3<<8) | planea_wm;
1687
1688 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1689
1690 I915_WRITE(FW_BLC, fwater_lo);
1691}
1692
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001693/*
1694 * Check the wm result.
1695 *
1696 * If any calculated watermark values is larger than the maximum value that
1697 * can be programmed into the associated watermark register, that watermark
1698 * must be disabled.
1699 */
1700static bool ironlake_check_srwm(struct drm_device *dev, int level,
1701 int fbc_wm, int display_wm, int cursor_wm,
1702 const struct intel_watermark_params *display,
1703 const struct intel_watermark_params *cursor)
1704{
1705 struct drm_i915_private *dev_priv = dev->dev_private;
1706
1707 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1708 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1709
1710 if (fbc_wm > SNB_FBC_MAX_SRWM) {
1711 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1712 fbc_wm, SNB_FBC_MAX_SRWM, level);
1713
1714 /* fbc has it's own way to disable FBC WM */
1715 I915_WRITE(DISP_ARB_CTL,
1716 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1717 return false;
Ville Syrjälä615aaa52013-04-24 21:09:10 +03001718 } else if (INTEL_INFO(dev)->gen >= 6) {
1719 /* enable FBC WM (except on ILK, where it must remain off) */
1720 I915_WRITE(DISP_ARB_CTL,
1721 I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001722 }
1723
1724 if (display_wm > display->max_wm) {
1725 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1726 display_wm, SNB_DISPLAY_MAX_SRWM, level);
1727 return false;
1728 }
1729
1730 if (cursor_wm > cursor->max_wm) {
1731 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1732 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1733 return false;
1734 }
1735
1736 if (!(fbc_wm || display_wm || cursor_wm)) {
1737 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1738 return false;
1739 }
1740
1741 return true;
1742}
1743
1744/*
1745 * Compute watermark values of WM[1-3],
1746 */
1747static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1748 int latency_ns,
1749 const struct intel_watermark_params *display,
1750 const struct intel_watermark_params *cursor,
1751 int *fbc_wm, int *display_wm, int *cursor_wm)
1752{
1753 struct drm_crtc *crtc;
1754 unsigned long line_time_us;
1755 int hdisplay, htotal, pixel_size, clock;
1756 int line_count, line_size;
1757 int small, large;
1758 int entries;
1759
1760 if (!latency_ns) {
1761 *fbc_wm = *display_wm = *cursor_wm = 0;
1762 return false;
1763 }
1764
1765 crtc = intel_get_crtc_for_plane(dev, plane);
1766 hdisplay = crtc->mode.hdisplay;
1767 htotal = crtc->mode.htotal;
1768 clock = crtc->mode.clock;
1769 pixel_size = crtc->fb->bits_per_pixel / 8;
1770
1771 line_time_us = (htotal * 1000) / clock;
1772 line_count = (latency_ns / line_time_us + 1000) / 1000;
1773 line_size = hdisplay * pixel_size;
1774
1775 /* Use the minimum of the small and large buffer method for primary */
1776 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1777 large = line_count * line_size;
1778
1779 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1780 *display_wm = entries + display->guard_size;
1781
1782 /*
1783 * Spec says:
1784 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1785 */
1786 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1787
1788 /* calculate the self-refresh watermark for display cursor */
1789 entries = line_count * pixel_size * 64;
1790 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1791 *cursor_wm = entries + cursor->guard_size;
1792
1793 return ironlake_check_srwm(dev, level,
1794 *fbc_wm, *display_wm, *cursor_wm,
1795 display, cursor);
1796}
1797
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001798static void ironlake_update_wm(struct drm_crtc *crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001799{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001800 struct drm_device *dev = crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001801 struct drm_i915_private *dev_priv = dev->dev_private;
1802 int fbc_wm, plane_wm, cursor_wm;
1803 unsigned int enabled;
1804
1805 enabled = 0;
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001806 if (g4x_compute_wm0(dev, PIPE_A,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001807 &ironlake_display_wm_info,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001808 dev_priv->wm.pri_latency[0] * 100,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001809 &ironlake_cursor_wm_info,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001810 dev_priv->wm.cur_latency[0] * 100,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001811 &plane_wm, &cursor_wm)) {
1812 I915_WRITE(WM0_PIPEA_ILK,
1813 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1814 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1815 " plane %d, " "cursor: %d\n",
1816 plane_wm, cursor_wm);
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001817 enabled |= 1 << PIPE_A;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001818 }
1819
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001820 if (g4x_compute_wm0(dev, PIPE_B,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001821 &ironlake_display_wm_info,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001822 dev_priv->wm.pri_latency[0] * 100,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001823 &ironlake_cursor_wm_info,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001824 dev_priv->wm.cur_latency[0] * 100,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001825 &plane_wm, &cursor_wm)) {
1826 I915_WRITE(WM0_PIPEB_ILK,
1827 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1828 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1829 " plane %d, cursor: %d\n",
1830 plane_wm, cursor_wm);
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001831 enabled |= 1 << PIPE_B;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001832 }
1833
1834 /*
1835 * Calculate and update the self-refresh watermark only when one
1836 * display plane is used.
1837 */
1838 I915_WRITE(WM3_LP_ILK, 0);
1839 I915_WRITE(WM2_LP_ILK, 0);
1840 I915_WRITE(WM1_LP_ILK, 0);
1841
1842 if (!single_plane_enabled(enabled))
1843 return;
1844 enabled = ffs(enabled) - 1;
1845
1846 /* WM1 */
1847 if (!ironlake_compute_srwm(dev, 1, enabled,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001848 dev_priv->wm.pri_latency[1] * 500,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001849 &ironlake_display_srwm_info,
1850 &ironlake_cursor_srwm_info,
1851 &fbc_wm, &plane_wm, &cursor_wm))
1852 return;
1853
1854 I915_WRITE(WM1_LP_ILK,
1855 WM1_LP_SR_EN |
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001856 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001857 (fbc_wm << WM1_LP_FBC_SHIFT) |
1858 (plane_wm << WM1_LP_SR_SHIFT) |
1859 cursor_wm);
1860
1861 /* WM2 */
1862 if (!ironlake_compute_srwm(dev, 2, enabled,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001863 dev_priv->wm.pri_latency[2] * 500,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001864 &ironlake_display_srwm_info,
1865 &ironlake_cursor_srwm_info,
1866 &fbc_wm, &plane_wm, &cursor_wm))
1867 return;
1868
1869 I915_WRITE(WM2_LP_ILK,
1870 WM2_LP_EN |
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001871 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001872 (fbc_wm << WM1_LP_FBC_SHIFT) |
1873 (plane_wm << WM1_LP_SR_SHIFT) |
1874 cursor_wm);
1875
1876 /*
1877 * WM3 is unsupported on ILK, probably because we don't have latency
1878 * data for that power state
1879 */
1880}
1881
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001882static void sandybridge_update_wm(struct drm_crtc *crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001883{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001884 struct drm_device *dev = crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001885 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001886 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001887 u32 val;
1888 int fbc_wm, plane_wm, cursor_wm;
1889 unsigned int enabled;
1890
1891 enabled = 0;
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001892 if (g4x_compute_wm0(dev, PIPE_A,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001893 &sandybridge_display_wm_info, latency,
1894 &sandybridge_cursor_wm_info, latency,
1895 &plane_wm, &cursor_wm)) {
1896 val = I915_READ(WM0_PIPEA_ILK);
1897 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1898 I915_WRITE(WM0_PIPEA_ILK, val |
1899 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1900 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1901 " plane %d, " "cursor: %d\n",
1902 plane_wm, cursor_wm);
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001903 enabled |= 1 << PIPE_A;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001904 }
1905
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001906 if (g4x_compute_wm0(dev, PIPE_B,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001907 &sandybridge_display_wm_info, latency,
1908 &sandybridge_cursor_wm_info, latency,
1909 &plane_wm, &cursor_wm)) {
1910 val = I915_READ(WM0_PIPEB_ILK);
1911 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1912 I915_WRITE(WM0_PIPEB_ILK, val |
1913 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1914 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1915 " plane %d, cursor: %d\n",
1916 plane_wm, cursor_wm);
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001917 enabled |= 1 << PIPE_B;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001918 }
1919
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001920 /*
1921 * Calculate and update the self-refresh watermark only when one
1922 * display plane is used.
1923 *
1924 * SNB support 3 levels of watermark.
1925 *
1926 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1927 * and disabled in the descending order
1928 *
1929 */
1930 I915_WRITE(WM3_LP_ILK, 0);
1931 I915_WRITE(WM2_LP_ILK, 0);
1932 I915_WRITE(WM1_LP_ILK, 0);
1933
1934 if (!single_plane_enabled(enabled) ||
1935 dev_priv->sprite_scaling_enabled)
1936 return;
1937 enabled = ffs(enabled) - 1;
1938
1939 /* WM1 */
1940 if (!ironlake_compute_srwm(dev, 1, enabled,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001941 dev_priv->wm.pri_latency[1] * 500,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001942 &sandybridge_display_srwm_info,
1943 &sandybridge_cursor_srwm_info,
1944 &fbc_wm, &plane_wm, &cursor_wm))
1945 return;
1946
1947 I915_WRITE(WM1_LP_ILK,
1948 WM1_LP_SR_EN |
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001949 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001950 (fbc_wm << WM1_LP_FBC_SHIFT) |
1951 (plane_wm << WM1_LP_SR_SHIFT) |
1952 cursor_wm);
1953
1954 /* WM2 */
1955 if (!ironlake_compute_srwm(dev, 2, enabled,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001956 dev_priv->wm.pri_latency[2] * 500,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001957 &sandybridge_display_srwm_info,
1958 &sandybridge_cursor_srwm_info,
1959 &fbc_wm, &plane_wm, &cursor_wm))
1960 return;
1961
1962 I915_WRITE(WM2_LP_ILK,
1963 WM2_LP_EN |
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001964 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001965 (fbc_wm << WM1_LP_FBC_SHIFT) |
1966 (plane_wm << WM1_LP_SR_SHIFT) |
1967 cursor_wm);
1968
1969 /* WM3 */
1970 if (!ironlake_compute_srwm(dev, 3, enabled,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001971 dev_priv->wm.pri_latency[3] * 500,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001972 &sandybridge_display_srwm_info,
1973 &sandybridge_cursor_srwm_info,
1974 &fbc_wm, &plane_wm, &cursor_wm))
1975 return;
1976
1977 I915_WRITE(WM3_LP_ILK,
1978 WM3_LP_EN |
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001979 (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001980 (fbc_wm << WM1_LP_FBC_SHIFT) |
1981 (plane_wm << WM1_LP_SR_SHIFT) |
1982 cursor_wm);
1983}
1984
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001985static void ivybridge_update_wm(struct drm_crtc *crtc)
Chris Wilsonc43d0182012-12-11 12:01:42 +00001986{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001987 struct drm_device *dev = crtc->dev;
Chris Wilsonc43d0182012-12-11 12:01:42 +00001988 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03001989 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
Chris Wilsonc43d0182012-12-11 12:01:42 +00001990 u32 val;
1991 int fbc_wm, plane_wm, cursor_wm;
1992 int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
1993 unsigned int enabled;
1994
1995 enabled = 0;
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001996 if (g4x_compute_wm0(dev, PIPE_A,
Chris Wilsonc43d0182012-12-11 12:01:42 +00001997 &sandybridge_display_wm_info, latency,
1998 &sandybridge_cursor_wm_info, latency,
1999 &plane_wm, &cursor_wm)) {
2000 val = I915_READ(WM0_PIPEA_ILK);
2001 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2002 I915_WRITE(WM0_PIPEA_ILK, val |
2003 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2004 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
2005 " plane %d, " "cursor: %d\n",
2006 plane_wm, cursor_wm);
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02002007 enabled |= 1 << PIPE_A;
Chris Wilsonc43d0182012-12-11 12:01:42 +00002008 }
2009
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02002010 if (g4x_compute_wm0(dev, PIPE_B,
Chris Wilsonc43d0182012-12-11 12:01:42 +00002011 &sandybridge_display_wm_info, latency,
2012 &sandybridge_cursor_wm_info, latency,
2013 &plane_wm, &cursor_wm)) {
2014 val = I915_READ(WM0_PIPEB_ILK);
2015 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2016 I915_WRITE(WM0_PIPEB_ILK, val |
2017 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2018 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
2019 " plane %d, cursor: %d\n",
2020 plane_wm, cursor_wm);
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02002021 enabled |= 1 << PIPE_B;
Chris Wilsonc43d0182012-12-11 12:01:42 +00002022 }
2023
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02002024 if (g4x_compute_wm0(dev, PIPE_C,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002025 &sandybridge_display_wm_info, latency,
2026 &sandybridge_cursor_wm_info, latency,
2027 &plane_wm, &cursor_wm)) {
2028 val = I915_READ(WM0_PIPEC_IVB);
2029 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2030 I915_WRITE(WM0_PIPEC_IVB, val |
2031 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2032 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
2033 " plane %d, cursor: %d\n",
2034 plane_wm, cursor_wm);
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02002035 enabled |= 1 << PIPE_C;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002036 }
2037
2038 /*
2039 * Calculate and update the self-refresh watermark only when one
2040 * display plane is used.
2041 *
2042 * SNB support 3 levels of watermark.
2043 *
2044 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
2045 * and disabled in the descending order
2046 *
2047 */
2048 I915_WRITE(WM3_LP_ILK, 0);
2049 I915_WRITE(WM2_LP_ILK, 0);
2050 I915_WRITE(WM1_LP_ILK, 0);
2051
2052 if (!single_plane_enabled(enabled) ||
2053 dev_priv->sprite_scaling_enabled)
2054 return;
2055 enabled = ffs(enabled) - 1;
2056
2057 /* WM1 */
2058 if (!ironlake_compute_srwm(dev, 1, enabled,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03002059 dev_priv->wm.pri_latency[1] * 500,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002060 &sandybridge_display_srwm_info,
2061 &sandybridge_cursor_srwm_info,
2062 &fbc_wm, &plane_wm, &cursor_wm))
2063 return;
2064
2065 I915_WRITE(WM1_LP_ILK,
2066 WM1_LP_SR_EN |
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03002067 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002068 (fbc_wm << WM1_LP_FBC_SHIFT) |
2069 (plane_wm << WM1_LP_SR_SHIFT) |
2070 cursor_wm);
2071
2072 /* WM2 */
2073 if (!ironlake_compute_srwm(dev, 2, enabled,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03002074 dev_priv->wm.pri_latency[2] * 500,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002075 &sandybridge_display_srwm_info,
2076 &sandybridge_cursor_srwm_info,
2077 &fbc_wm, &plane_wm, &cursor_wm))
2078 return;
2079
2080 I915_WRITE(WM2_LP_ILK,
2081 WM2_LP_EN |
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03002082 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002083 (fbc_wm << WM1_LP_FBC_SHIFT) |
2084 (plane_wm << WM1_LP_SR_SHIFT) |
2085 cursor_wm);
2086
Chris Wilsonc43d0182012-12-11 12:01:42 +00002087 /* WM3, note we have to correct the cursor latency */
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002088 if (!ironlake_compute_srwm(dev, 3, enabled,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03002089 dev_priv->wm.pri_latency[3] * 500,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002090 &sandybridge_display_srwm_info,
2091 &sandybridge_cursor_srwm_info,
Chris Wilsonc43d0182012-12-11 12:01:42 +00002092 &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
2093 !ironlake_compute_srwm(dev, 3, enabled,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03002094 dev_priv->wm.cur_latency[3] * 500,
Chris Wilsonc43d0182012-12-11 12:01:42 +00002095 &sandybridge_display_srwm_info,
2096 &sandybridge_cursor_srwm_info,
2097 &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002098 return;
2099
2100 I915_WRITE(WM3_LP_ILK,
2101 WM3_LP_EN |
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03002102 (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002103 (fbc_wm << WM1_LP_FBC_SHIFT) |
2104 (plane_wm << WM1_LP_SR_SHIFT) |
2105 cursor_wm);
2106}
2107
Ville Syrjälä36587292013-07-05 11:57:16 +03002108static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2109 struct drm_crtc *crtc)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002110{
2111 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2112 uint32_t pixel_rate, pfit_size;
2113
Daniel Vetterff9a6752013-06-01 17:16:21 +02002114 pixel_rate = intel_crtc->config.adjusted_mode.clock;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002115
2116 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2117 * adjust the pixel_rate here. */
2118
2119 pfit_size = intel_crtc->config.pch_pfit.size;
2120 if (pfit_size) {
2121 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
2122
2123 pipe_w = intel_crtc->config.requested_mode.hdisplay;
2124 pipe_h = intel_crtc->config.requested_mode.vdisplay;
2125 pfit_w = (pfit_size >> 16) & 0xFFFF;
2126 pfit_h = pfit_size & 0xFFFF;
2127 if (pipe_w < pfit_w)
2128 pipe_w = pfit_w;
2129 if (pipe_h < pfit_h)
2130 pipe_h = pfit_h;
2131
2132 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
2133 pfit_w * pfit_h);
2134 }
2135
2136 return pixel_rate;
2137}
2138
Ville Syrjälä37126462013-08-01 16:18:55 +03002139/* latency must be in 0.1us units. */
Ville Syrjälä23297042013-07-05 11:57:17 +03002140static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002141 uint32_t latency)
2142{
2143 uint64_t ret;
2144
Ville Syrjälä3312ba62013-08-01 16:18:53 +03002145 if (WARN(latency == 0, "Latency value missing\n"))
2146 return UINT_MAX;
2147
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002148 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
2149 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
2150
2151 return ret;
2152}
2153
Ville Syrjälä37126462013-08-01 16:18:55 +03002154/* latency must be in 0.1us units. */
Ville Syrjälä23297042013-07-05 11:57:17 +03002155static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002156 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2157 uint32_t latency)
2158{
2159 uint32_t ret;
2160
Ville Syrjälä3312ba62013-08-01 16:18:53 +03002161 if (WARN(latency == 0, "Latency value missing\n"))
2162 return UINT_MAX;
2163
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002164 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
2165 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
2166 ret = DIV_ROUND_UP(ret, 64) + 2;
2167 return ret;
2168}
2169
Ville Syrjälä23297042013-07-05 11:57:17 +03002170static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
Paulo Zanonicca32e92013-05-31 11:45:06 -03002171 uint8_t bytes_per_pixel)
2172{
2173 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
2174}
2175
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002176struct hsw_pipe_wm_parameters {
2177 bool active;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002178 uint32_t pipe_htotal;
2179 uint32_t pixel_rate;
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002180 struct intel_plane_wm_parameters pri;
2181 struct intel_plane_wm_parameters spr;
2182 struct intel_plane_wm_parameters cur;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002183};
2184
Paulo Zanonicca32e92013-05-31 11:45:06 -03002185struct hsw_wm_maximums {
2186 uint16_t pri;
2187 uint16_t spr;
2188 uint16_t cur;
2189 uint16_t fbc;
2190};
2191
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002192struct hsw_wm_values {
2193 uint32_t wm_pipe[3];
2194 uint32_t wm_lp[3];
2195 uint32_t wm_lp_spr[3];
2196 uint32_t wm_linetime[3];
Paulo Zanonicca32e92013-05-31 11:45:06 -03002197 bool enable_fbc_wm;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002198};
2199
Ville Syrjälä240264f2013-08-07 13:29:12 +03002200/* used in computing the new watermarks state */
2201struct intel_wm_config {
2202 unsigned int num_pipes_active;
2203 bool sprites_enabled;
2204 bool sprites_scaled;
2205 bool fbc_wm_enabled;
2206};
2207
Ville Syrjälä37126462013-08-01 16:18:55 +03002208/*
2209 * For both WM_PIPE and WM_LP.
2210 * mem_value must be in 0.1us units.
2211 */
Ville Syrjäläac830fe2013-08-30 14:30:23 +03002212static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
Paulo Zanonicca32e92013-05-31 11:45:06 -03002213 uint32_t mem_value,
2214 bool is_lp)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002215{
Paulo Zanonicca32e92013-05-31 11:45:06 -03002216 uint32_t method1, method2;
2217
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002218 if (!params->active || !params->pri.enabled)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002219 return 0;
2220
Ville Syrjälä23297042013-07-05 11:57:17 +03002221 method1 = ilk_wm_method1(params->pixel_rate,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002222 params->pri.bytes_per_pixel,
Paulo Zanonicca32e92013-05-31 11:45:06 -03002223 mem_value);
2224
2225 if (!is_lp)
2226 return method1;
2227
Ville Syrjälä23297042013-07-05 11:57:17 +03002228 method2 = ilk_wm_method2(params->pixel_rate,
Paulo Zanonicca32e92013-05-31 11:45:06 -03002229 params->pipe_htotal,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002230 params->pri.horiz_pixels,
2231 params->pri.bytes_per_pixel,
Paulo Zanonicca32e92013-05-31 11:45:06 -03002232 mem_value);
2233
2234 return min(method1, method2);
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002235}
2236
Ville Syrjälä37126462013-08-01 16:18:55 +03002237/*
2238 * For both WM_PIPE and WM_LP.
2239 * mem_value must be in 0.1us units.
2240 */
Ville Syrjäläac830fe2013-08-30 14:30:23 +03002241static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002242 uint32_t mem_value)
2243{
2244 uint32_t method1, method2;
2245
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002246 if (!params->active || !params->spr.enabled)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002247 return 0;
2248
Ville Syrjälä23297042013-07-05 11:57:17 +03002249 method1 = ilk_wm_method1(params->pixel_rate,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002250 params->spr.bytes_per_pixel,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002251 mem_value);
Ville Syrjälä23297042013-07-05 11:57:17 +03002252 method2 = ilk_wm_method2(params->pixel_rate,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002253 params->pipe_htotal,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002254 params->spr.horiz_pixels,
2255 params->spr.bytes_per_pixel,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002256 mem_value);
2257 return min(method1, method2);
2258}
2259
Ville Syrjälä37126462013-08-01 16:18:55 +03002260/*
2261 * For both WM_PIPE and WM_LP.
2262 * mem_value must be in 0.1us units.
2263 */
Ville Syrjäläac830fe2013-08-30 14:30:23 +03002264static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002265 uint32_t mem_value)
2266{
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002267 if (!params->active || !params->cur.enabled)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002268 return 0;
2269
Ville Syrjälä23297042013-07-05 11:57:17 +03002270 return ilk_wm_method2(params->pixel_rate,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002271 params->pipe_htotal,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002272 params->cur.horiz_pixels,
2273 params->cur.bytes_per_pixel,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002274 mem_value);
2275}
2276
Paulo Zanonicca32e92013-05-31 11:45:06 -03002277/* Only for WM_LP. */
Ville Syrjäläac830fe2013-08-30 14:30:23 +03002278static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
Ville Syrjälä1fda9882013-07-05 11:57:19 +03002279 uint32_t pri_val)
Paulo Zanonicca32e92013-05-31 11:45:06 -03002280{
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002281 if (!params->active || !params->pri.enabled)
Paulo Zanonicca32e92013-05-31 11:45:06 -03002282 return 0;
2283
Ville Syrjälä23297042013-07-05 11:57:17 +03002284 return ilk_wm_fbc(pri_val,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002285 params->pri.horiz_pixels,
2286 params->pri.bytes_per_pixel);
Paulo Zanonicca32e92013-05-31 11:45:06 -03002287}
2288
Ville Syrjälä158ae642013-08-07 13:28:19 +03002289static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
2290{
2291 if (INTEL_INFO(dev)->gen >= 7)
2292 return 768;
2293 else
2294 return 512;
2295}
2296
2297/* Calculate the maximum primary/sprite plane watermark */
2298static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2299 int level,
Ville Syrjälä240264f2013-08-07 13:29:12 +03002300 const struct intel_wm_config *config,
Ville Syrjälä158ae642013-08-07 13:28:19 +03002301 enum intel_ddb_partitioning ddb_partitioning,
2302 bool is_sprite)
2303{
2304 unsigned int fifo_size = ilk_display_fifo_size(dev);
2305 unsigned int max;
2306
2307 /* if sprites aren't enabled, sprites get nothing */
Ville Syrjälä240264f2013-08-07 13:29:12 +03002308 if (is_sprite && !config->sprites_enabled)
Ville Syrjälä158ae642013-08-07 13:28:19 +03002309 return 0;
2310
2311 /* HSW allows LP1+ watermarks even with multiple pipes */
Ville Syrjälä240264f2013-08-07 13:29:12 +03002312 if (level == 0 || config->num_pipes_active > 1) {
Ville Syrjälä158ae642013-08-07 13:28:19 +03002313 fifo_size /= INTEL_INFO(dev)->num_pipes;
2314
2315 /*
2316 * For some reason the non self refresh
2317 * FIFO size is only half of the self
2318 * refresh FIFO size on ILK/SNB.
2319 */
2320 if (INTEL_INFO(dev)->gen <= 6)
2321 fifo_size /= 2;
2322 }
2323
Ville Syrjälä240264f2013-08-07 13:29:12 +03002324 if (config->sprites_enabled) {
Ville Syrjälä158ae642013-08-07 13:28:19 +03002325 /* level 0 is always calculated with 1:1 split */
2326 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2327 if (is_sprite)
2328 fifo_size *= 5;
2329 fifo_size /= 6;
2330 } else {
2331 fifo_size /= 2;
2332 }
2333 }
2334
2335 /* clamp to max that the registers can hold */
2336 if (INTEL_INFO(dev)->gen >= 7)
2337 /* IVB/HSW primary/sprite plane watermarks */
2338 max = level == 0 ? 127 : 1023;
2339 else if (!is_sprite)
2340 /* ILK/SNB primary plane watermarks */
2341 max = level == 0 ? 127 : 511;
2342 else
2343 /* ILK/SNB sprite plane watermarks */
2344 max = level == 0 ? 63 : 255;
2345
2346 return min(fifo_size, max);
2347}
2348
2349/* Calculate the maximum cursor plane watermark */
2350static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
Ville Syrjälä240264f2013-08-07 13:29:12 +03002351 int level,
2352 const struct intel_wm_config *config)
Ville Syrjälä158ae642013-08-07 13:28:19 +03002353{
2354 /* HSW LP1+ watermarks w/ multiple pipes */
Ville Syrjälä240264f2013-08-07 13:29:12 +03002355 if (level > 0 && config->num_pipes_active > 1)
Ville Syrjälä158ae642013-08-07 13:28:19 +03002356 return 64;
2357
2358 /* otherwise just report max that registers can hold */
2359 if (INTEL_INFO(dev)->gen >= 7)
2360 return level == 0 ? 63 : 255;
2361 else
2362 return level == 0 ? 31 : 63;
2363}
2364
2365/* Calculate the maximum FBC watermark */
2366static unsigned int ilk_fbc_wm_max(void)
2367{
2368 /* max that registers can hold */
2369 return 15;
2370}
2371
2372static void ilk_wm_max(struct drm_device *dev,
2373 int level,
Ville Syrjälä240264f2013-08-07 13:29:12 +03002374 const struct intel_wm_config *config,
Ville Syrjälä158ae642013-08-07 13:28:19 +03002375 enum intel_ddb_partitioning ddb_partitioning,
2376 struct hsw_wm_maximums *max)
2377{
Ville Syrjälä240264f2013-08-07 13:29:12 +03002378 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2379 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2380 max->cur = ilk_cursor_wm_max(dev, level, config);
Ville Syrjälä158ae642013-08-07 13:28:19 +03002381 max->fbc = ilk_fbc_wm_max();
2382}
2383
Ville Syrjäläa9786a12013-08-07 13:24:47 +03002384static bool ilk_check_wm(int level,
2385 const struct hsw_wm_maximums *max,
Ville Syrjälä1fd527c2013-08-06 22:24:05 +03002386 struct intel_wm_level *result)
Ville Syrjäläa9786a12013-08-07 13:24:47 +03002387{
2388 bool ret;
2389
2390 /* already determined to be invalid? */
2391 if (!result->enable)
2392 return false;
2393
2394 result->enable = result->pri_val <= max->pri &&
2395 result->spr_val <= max->spr &&
2396 result->cur_val <= max->cur;
2397
2398 ret = result->enable;
2399
2400 /*
2401 * HACK until we can pre-compute everything,
2402 * and thus fail gracefully if LP0 watermarks
2403 * are exceeded...
2404 */
2405 if (level == 0 && !result->enable) {
2406 if (result->pri_val > max->pri)
2407 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2408 level, result->pri_val, max->pri);
2409 if (result->spr_val > max->spr)
2410 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2411 level, result->spr_val, max->spr);
2412 if (result->cur_val > max->cur)
2413 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2414 level, result->cur_val, max->cur);
2415
2416 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2417 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2418 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2419 result->enable = true;
2420 }
2421
2422 DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
2423
2424 return ret;
2425}
2426
Ville Syrjälä6f5ddd12013-08-06 22:24:02 +03002427static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2428 int level,
Ville Syrjäläac830fe2013-08-30 14:30:23 +03002429 const struct hsw_pipe_wm_parameters *p,
Ville Syrjälä1fd527c2013-08-06 22:24:05 +03002430 struct intel_wm_level *result)
Ville Syrjälä6f5ddd12013-08-06 22:24:02 +03002431{
2432 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2433 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2434 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2435
2436 /* WM1+ latency values stored in 0.5us units */
2437 if (level > 0) {
2438 pri_latency *= 5;
2439 spr_latency *= 5;
2440 cur_latency *= 5;
2441 }
2442
2443 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2444 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2445 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2446 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2447 result->enable = true;
2448}
2449
Ville Syrjälä5b77da32013-08-01 16:18:51 +03002450static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
Ville Syrjäläac830fe2013-08-30 14:30:23 +03002451 int level, const struct hsw_wm_maximums *max,
2452 const struct hsw_pipe_wm_parameters *params,
Ville Syrjälä1fd527c2013-08-06 22:24:05 +03002453 struct intel_wm_level *result)
Paulo Zanonicca32e92013-05-31 11:45:06 -03002454{
2455 enum pipe pipe;
Ville Syrjälä1fd527c2013-08-06 22:24:05 +03002456 struct intel_wm_level res[3];
Paulo Zanonicca32e92013-05-31 11:45:06 -03002457
Ville Syrjälä6f5ddd12013-08-06 22:24:02 +03002458 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
2459 ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
Paulo Zanonicca32e92013-05-31 11:45:06 -03002460
Ville Syrjälä6f5ddd12013-08-06 22:24:02 +03002461 result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
2462 result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
2463 result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
2464 result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
2465 result->enable = true;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002466
Ville Syrjäläa9786a12013-08-07 13:24:47 +03002467 return ilk_check_wm(level, max, result);
Paulo Zanonicca32e92013-05-31 11:45:06 -03002468}
2469
Ville Syrjälä8de123a2013-08-30 14:30:24 +03002470
2471static uint32_t hsw_compute_wm_pipe(struct drm_device *dev,
Ville Syrjäläac830fe2013-08-30 14:30:23 +03002472 const struct hsw_pipe_wm_parameters *params)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002473{
Ville Syrjälä8de123a2013-08-30 14:30:24 +03002474 struct drm_i915_private *dev_priv = dev->dev_private;
2475 struct intel_wm_config config = {
2476 .num_pipes_active = 1,
2477 .sprites_enabled = params->spr.enabled,
2478 .sprites_scaled = params->spr.scaled,
2479 };
2480 struct hsw_wm_maximums max;
2481 struct intel_wm_level res;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002482
Ville Syrjälä8de123a2013-08-30 14:30:24 +03002483 if (!params->active)
2484 return 0;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002485
Ville Syrjälä8de123a2013-08-30 14:30:24 +03002486 ilk_wm_max(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002487
Ville Syrjälä8de123a2013-08-30 14:30:24 +03002488 ilk_compute_wm_level(dev_priv, 0, params, &res);
2489
2490 ilk_check_wm(0, &max, &res);
2491
2492 return (res.pri_val << WM0_PIPE_PLANE_SHIFT) |
2493 (res.spr_val << WM0_PIPE_SPRITE_SHIFT) |
2494 res.cur_val;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002495}
2496
2497static uint32_t
2498hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002499{
2500 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002501 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002502 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
Paulo Zanoni85a02de2013-05-03 17:23:43 -03002503 u32 linetime, ips_linetime;
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002504
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002505 if (!intel_crtc_active(crtc))
2506 return 0;
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002507
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002508 /* The WM are computed with base on how long it takes to fill a single
2509 * row at the given clock rate, multiplied by 8.
2510 * */
Paulo Zanoni85a02de2013-05-03 17:23:43 -03002511 linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
2512 ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
2513 intel_ddi_get_cdclk_freq(dev_priv));
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002514
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002515 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2516 PIPE_WM_LINETIME_TIME(linetime);
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002517}
2518
Ville Syrjälä12b134d2013-07-05 11:57:21 +03002519static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2520{
2521 struct drm_i915_private *dev_priv = dev->dev_private;
2522
2523 if (IS_HASWELL(dev)) {
2524 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2525
2526 wm[0] = (sskpd >> 56) & 0xFF;
2527 if (wm[0] == 0)
2528 wm[0] = sskpd & 0xF;
Ville Syrjäläe5d50192013-07-05 11:57:22 +03002529 wm[1] = (sskpd >> 4) & 0xFF;
2530 wm[2] = (sskpd >> 12) & 0xFF;
2531 wm[3] = (sskpd >> 20) & 0x1FF;
2532 wm[4] = (sskpd >> 32) & 0x1FF;
Ville Syrjälä63cf9a12013-07-05 11:57:23 +03002533 } else if (INTEL_INFO(dev)->gen >= 6) {
2534 uint32_t sskpd = I915_READ(MCH_SSKPD);
2535
2536 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2537 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2538 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2539 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
Ville Syrjälä3a88d0a2013-08-01 16:18:49 +03002540 } else if (INTEL_INFO(dev)->gen >= 5) {
2541 uint32_t mltr = I915_READ(MLTR_ILK);
2542
2543 /* ILK primary LP0 latency is 700 ns */
2544 wm[0] = 7;
2545 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2546 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
Ville Syrjälä12b134d2013-07-05 11:57:21 +03002547 }
2548}
2549
Ville Syrjälä53615a52013-08-01 16:18:50 +03002550static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2551{
2552 /* ILK sprite LP0 latency is 1300 ns */
2553 if (INTEL_INFO(dev)->gen == 5)
2554 wm[0] = 13;
2555}
2556
2557static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2558{
2559 /* ILK cursor LP0 latency is 1300 ns */
2560 if (INTEL_INFO(dev)->gen == 5)
2561 wm[0] = 13;
2562
2563 /* WaDoubleCursorLP3Latency:ivb */
2564 if (IS_IVYBRIDGE(dev))
2565 wm[3] *= 2;
2566}
2567
Ville Syrjäläad0d6dc2013-08-30 14:30:25 +03002568static int ilk_wm_max_level(const struct drm_device *dev)
2569{
2570 /* how many WM levels are we expecting */
2571 if (IS_HASWELL(dev))
2572 return 4;
2573 else if (INTEL_INFO(dev)->gen >= 6)
2574 return 3;
2575 else
2576 return 2;
2577}
2578
Ville Syrjälä26ec9712013-08-01 16:18:52 +03002579static void intel_print_wm_latency(struct drm_device *dev,
2580 const char *name,
2581 const uint16_t wm[5])
2582{
Ville Syrjäläad0d6dc2013-08-30 14:30:25 +03002583 int level, max_level = ilk_wm_max_level(dev);
Ville Syrjälä26ec9712013-08-01 16:18:52 +03002584
2585 for (level = 0; level <= max_level; level++) {
2586 unsigned int latency = wm[level];
2587
2588 if (latency == 0) {
2589 DRM_ERROR("%s WM%d latency not provided\n",
2590 name, level);
2591 continue;
2592 }
2593
2594 /* WM1+ latency values in 0.5us units */
2595 if (level > 0)
2596 latency *= 5;
2597
2598 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2599 name, level, wm[level],
2600 latency / 10, latency % 10);
2601 }
2602}
2603
Ville Syrjälä53615a52013-08-01 16:18:50 +03002604static void intel_setup_wm_latency(struct drm_device *dev)
2605{
2606 struct drm_i915_private *dev_priv = dev->dev_private;
2607
2608 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2609
2610 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2611 sizeof(dev_priv->wm.pri_latency));
2612 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2613 sizeof(dev_priv->wm.pri_latency));
2614
2615 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2616 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
Ville Syrjälä26ec9712013-08-01 16:18:52 +03002617
2618 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2619 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2620 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
Ville Syrjälä53615a52013-08-01 16:18:50 +03002621}
2622
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002623static void hsw_compute_wm_parameters(struct drm_device *dev,
2624 struct hsw_pipe_wm_parameters *params,
Paulo Zanoni861f3382013-05-31 10:19:21 -03002625 struct hsw_wm_maximums *lp_max_1_2,
2626 struct hsw_wm_maximums *lp_max_5_6)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002627{
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002628 struct drm_crtc *crtc;
2629 struct drm_plane *plane;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002630 enum pipe pipe;
Ville Syrjälä240264f2013-08-07 13:29:12 +03002631 struct intel_wm_config config = {};
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002632
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002633 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2634 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2635 struct hsw_pipe_wm_parameters *p;
2636
2637 pipe = intel_crtc->pipe;
2638 p = &params[pipe];
2639
2640 p->active = intel_crtc_active(crtc);
2641 if (!p->active)
2642 continue;
2643
Ville Syrjälä240264f2013-08-07 13:29:12 +03002644 config.num_pipes_active++;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002645
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002646 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
Ville Syrjälä36587292013-07-05 11:57:16 +03002647 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002648 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2649 p->cur.bytes_per_pixel = 4;
2650 p->pri.horiz_pixels =
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002651 intel_crtc->config.requested_mode.hdisplay;
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002652 p->cur.horiz_pixels = 64;
2653 /* TODO: for now, assume primary and cursor planes are always enabled. */
2654 p->pri.enabled = true;
2655 p->cur.enabled = true;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002656 }
2657
2658 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2659 struct intel_plane *intel_plane = to_intel_plane(plane);
2660 struct hsw_pipe_wm_parameters *p;
2661
2662 pipe = intel_plane->pipe;
2663 p = &params[pipe];
2664
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002665 p->spr = intel_plane->wm;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002666
Ville Syrjäläc35426d2013-08-07 13:29:50 +03002667 config.sprites_enabled |= p->spr.enabled;
2668 config.sprites_scaled |= p->spr.scaled;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002669 }
Paulo Zanonicca32e92013-05-31 11:45:06 -03002670
Ville Syrjälä240264f2013-08-07 13:29:12 +03002671 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
Ville Syrjälä158ae642013-08-07 13:28:19 +03002672
2673 /* 5/6 split only in single pipe config on IVB+ */
Ville Syrjälä240264f2013-08-07 13:29:12 +03002674 if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
2675 ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
Ville Syrjälä158ae642013-08-07 13:28:19 +03002676 else
2677 *lp_max_5_6 = *lp_max_1_2;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002678}
2679
2680static void hsw_compute_wm_results(struct drm_device *dev,
Ville Syrjäläac830fe2013-08-30 14:30:23 +03002681 const struct hsw_pipe_wm_parameters *params,
2682 const struct hsw_wm_maximums *lp_maximums,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002683 struct hsw_wm_values *results)
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002684{
2685 struct drm_i915_private *dev_priv = dev->dev_private;
2686 struct drm_crtc *crtc;
Ville Syrjälä1fd527c2013-08-06 22:24:05 +03002687 struct intel_wm_level lp_results[4] = {};
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002688 enum pipe pipe;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002689 int level, max_level, wm_lp;
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002690
Paulo Zanonicca32e92013-05-31 11:45:06 -03002691 for (level = 1; level <= 4; level++)
Ville Syrjälä5b77da32013-08-01 16:18:51 +03002692 if (!hsw_compute_lp_wm(dev_priv, level,
2693 lp_maximums, params,
Paulo Zanonicca32e92013-05-31 11:45:06 -03002694 &lp_results[level - 1]))
2695 break;
2696 max_level = level - 1;
2697
Ville Syrjälä5c536612013-08-09 18:02:09 +03002698 memset(results, 0, sizeof(*results));
2699
Paulo Zanonicca32e92013-05-31 11:45:06 -03002700 /* The spec says it is preferred to disable FBC WMs instead of disabling
2701 * a WM level. */
2702 results->enable_fbc_wm = true;
2703 for (level = 1; level <= max_level; level++) {
Dan Carpenter16e54062013-08-09 13:07:31 +03002704 if (lp_results[level - 1].fbc_val > lp_maximums->fbc) {
Paulo Zanonicca32e92013-05-31 11:45:06 -03002705 results->enable_fbc_wm = false;
Ville Syrjälä71fff202013-08-06 22:24:03 +03002706 lp_results[level - 1].fbc_val = 0;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002707 }
2708 }
2709
Paulo Zanonicca32e92013-05-31 11:45:06 -03002710 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
Ville Syrjälä1fd527c2013-08-06 22:24:05 +03002711 const struct intel_wm_level *r;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002712
2713 level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
2714 if (level > max_level)
2715 break;
2716
2717 r = &lp_results[level - 1];
2718 results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
2719 r->fbc_val,
2720 r->pri_val,
2721 r->cur_val);
2722 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2723 }
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002724
2725 for_each_pipe(pipe)
Ville Syrjälä8de123a2013-08-30 14:30:24 +03002726 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002727 &params[pipe]);
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002728
2729 for_each_pipe(pipe) {
2730 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002731 results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
2732 }
2733}
2734
Paulo Zanoni861f3382013-05-31 10:19:21 -03002735/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2736 * case both are at the same level. Prefer r1 in case they're the same. */
Damien Lespiauf4db9322013-06-24 22:59:50 +01002737static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
2738 struct hsw_wm_values *r2)
Paulo Zanoni861f3382013-05-31 10:19:21 -03002739{
2740 int i, val_r1 = 0, val_r2 = 0;
2741
2742 for (i = 0; i < 3; i++) {
2743 if (r1->wm_lp[i] & WM3_LP_EN)
2744 val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
2745 if (r2->wm_lp[i] & WM3_LP_EN)
2746 val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
2747 }
2748
2749 if (val_r1 == val_r2) {
2750 if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
2751 return r2;
2752 else
2753 return r1;
2754 } else if (val_r1 > val_r2) {
2755 return r1;
2756 } else {
2757 return r2;
2758 }
2759}
2760
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002761/*
2762 * The spec says we shouldn't write when we don't need, because every write
2763 * causes WMs to be re-evaluated, expending some power.
2764 */
2765static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2766 struct hsw_wm_values *results,
Ville Syrjälä77c122b2013-08-06 22:24:04 +03002767 enum intel_ddb_partitioning partitioning)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002768{
2769 struct hsw_wm_values previous;
2770 uint32_t val;
Ville Syrjälä77c122b2013-08-06 22:24:04 +03002771 enum intel_ddb_partitioning prev_partitioning;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002772 bool prev_enable_fbc_wm;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002773
2774 previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
2775 previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
2776 previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
2777 previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
2778 previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
2779 previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
2780 previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2781 previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2782 previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2783 previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
2784 previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
2785 previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
2786
2787 prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
Ville Syrjälä77c122b2013-08-06 22:24:04 +03002788 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002789
Paulo Zanonicca32e92013-05-31 11:45:06 -03002790 prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2791
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002792 if (memcmp(results->wm_pipe, previous.wm_pipe,
2793 sizeof(results->wm_pipe)) == 0 &&
2794 memcmp(results->wm_lp, previous.wm_lp,
2795 sizeof(results->wm_lp)) == 0 &&
2796 memcmp(results->wm_lp_spr, previous.wm_lp_spr,
2797 sizeof(results->wm_lp_spr)) == 0 &&
2798 memcmp(results->wm_linetime, previous.wm_linetime,
2799 sizeof(results->wm_linetime)) == 0 &&
Paulo Zanonicca32e92013-05-31 11:45:06 -03002800 partitioning == prev_partitioning &&
2801 results->enable_fbc_wm == prev_enable_fbc_wm)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002802 return;
2803
2804 if (previous.wm_lp[2] != 0)
2805 I915_WRITE(WM3_LP_ILK, 0);
2806 if (previous.wm_lp[1] != 0)
2807 I915_WRITE(WM2_LP_ILK, 0);
2808 if (previous.wm_lp[0] != 0)
2809 I915_WRITE(WM1_LP_ILK, 0);
2810
2811 if (previous.wm_pipe[0] != results->wm_pipe[0])
2812 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2813 if (previous.wm_pipe[1] != results->wm_pipe[1])
2814 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2815 if (previous.wm_pipe[2] != results->wm_pipe[2])
2816 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2817
2818 if (previous.wm_linetime[0] != results->wm_linetime[0])
2819 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2820 if (previous.wm_linetime[1] != results->wm_linetime[1])
2821 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2822 if (previous.wm_linetime[2] != results->wm_linetime[2])
2823 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2824
2825 if (prev_partitioning != partitioning) {
2826 val = I915_READ(WM_MISC);
Ville Syrjälä77c122b2013-08-06 22:24:04 +03002827 if (partitioning == INTEL_DDB_PART_1_2)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002828 val &= ~WM_MISC_DATA_PARTITION_5_6;
2829 else
2830 val |= WM_MISC_DATA_PARTITION_5_6;
2831 I915_WRITE(WM_MISC, val);
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002832 }
2833
Paulo Zanonicca32e92013-05-31 11:45:06 -03002834 if (prev_enable_fbc_wm != results->enable_fbc_wm) {
2835 val = I915_READ(DISP_ARB_CTL);
2836 if (results->enable_fbc_wm)
2837 val &= ~DISP_FBC_WM_DIS;
2838 else
2839 val |= DISP_FBC_WM_DIS;
2840 I915_WRITE(DISP_ARB_CTL, val);
2841 }
2842
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002843 if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
2844 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2845 if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
2846 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2847 if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
2848 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2849
2850 if (results->wm_lp[0] != 0)
2851 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2852 if (results->wm_lp[1] != 0)
2853 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2854 if (results->wm_lp[2] != 0)
2855 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2856}
2857
Ville Syrjälä46ba6142013-09-10 11:40:40 +03002858static void haswell_update_wm(struct drm_crtc *crtc)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002859{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03002860 struct drm_device *dev = crtc->dev;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002861 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni861f3382013-05-31 10:19:21 -03002862 struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002863 struct hsw_pipe_wm_parameters params[3];
Paulo Zanoni861f3382013-05-31 10:19:21 -03002864 struct hsw_wm_values results_1_2, results_5_6, *best_results;
Ville Syrjälä77c122b2013-08-06 22:24:04 +03002865 enum intel_ddb_partitioning partitioning;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002866
Ville Syrjälä12b134d2013-07-05 11:57:21 +03002867 hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6);
Paulo Zanoni861f3382013-05-31 10:19:21 -03002868
Ville Syrjälä53615a52013-08-01 16:18:50 +03002869 hsw_compute_wm_results(dev, params,
Ville Syrjälä53615a52013-08-01 16:18:50 +03002870 &lp_max_1_2, &results_1_2);
Paulo Zanoni861f3382013-05-31 10:19:21 -03002871 if (lp_max_1_2.pri != lp_max_5_6.pri) {
Ville Syrjälä53615a52013-08-01 16:18:50 +03002872 hsw_compute_wm_results(dev, params,
Ville Syrjälä53615a52013-08-01 16:18:50 +03002873 &lp_max_5_6, &results_5_6);
Paulo Zanoni861f3382013-05-31 10:19:21 -03002874 best_results = hsw_find_best_result(&results_1_2, &results_5_6);
2875 } else {
2876 best_results = &results_1_2;
2877 }
2878
2879 partitioning = (best_results == &results_1_2) ?
Ville Syrjälä77c122b2013-08-06 22:24:04 +03002880 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
Paulo Zanoni861f3382013-05-31 10:19:21 -03002881
2882 hsw_write_wm_values(dev_priv, best_results, partitioning);
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002883}
2884
Ville Syrjäläadf3d352013-08-06 22:24:11 +03002885static void haswell_update_sprite_wm(struct drm_plane *plane,
2886 struct drm_crtc *crtc,
Paulo Zanoni526682e2013-05-24 11:59:18 -03002887 uint32_t sprite_width, int pixel_size,
Ville Syrjäläbdd57d02013-07-05 11:57:13 +03002888 bool enabled, bool scaled)
Paulo Zanoni526682e2013-05-24 11:59:18 -03002889{
Ville Syrjäläadf3d352013-08-06 22:24:11 +03002890 struct intel_plane *intel_plane = to_intel_plane(plane);
Paulo Zanoni526682e2013-05-24 11:59:18 -03002891
Ville Syrjäläadf3d352013-08-06 22:24:11 +03002892 intel_plane->wm.enabled = enabled;
2893 intel_plane->wm.scaled = scaled;
2894 intel_plane->wm.horiz_pixels = sprite_width;
2895 intel_plane->wm.bytes_per_pixel = pixel_size;
Paulo Zanoni526682e2013-05-24 11:59:18 -03002896
Ville Syrjälä46ba6142013-09-10 11:40:40 +03002897 haswell_update_wm(crtc);
Paulo Zanoni526682e2013-05-24 11:59:18 -03002898}
2899
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002900static bool
2901sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
2902 uint32_t sprite_width, int pixel_size,
2903 const struct intel_watermark_params *display,
2904 int display_latency_ns, int *sprite_wm)
2905{
2906 struct drm_crtc *crtc;
2907 int clock;
2908 int entries, tlb_miss;
2909
2910 crtc = intel_get_crtc_for_plane(dev, plane);
Chris Wilson3490ea52013-01-07 10:11:40 +00002911 if (!intel_crtc_active(crtc)) {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002912 *sprite_wm = display->guard_size;
2913 return false;
2914 }
2915
2916 clock = crtc->mode.clock;
2917
2918 /* Use the small buffer method to calculate the sprite watermark */
2919 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
2920 tlb_miss = display->fifo_size*display->cacheline_size -
2921 sprite_width * 8;
2922 if (tlb_miss > 0)
2923 entries += tlb_miss;
2924 entries = DIV_ROUND_UP(entries, display->cacheline_size);
2925 *sprite_wm = entries + display->guard_size;
2926 if (*sprite_wm > (int)display->max_wm)
2927 *sprite_wm = display->max_wm;
2928
2929 return true;
2930}
2931
2932static bool
2933sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2934 uint32_t sprite_width, int pixel_size,
2935 const struct intel_watermark_params *display,
2936 int latency_ns, int *sprite_wm)
2937{
2938 struct drm_crtc *crtc;
2939 unsigned long line_time_us;
2940 int clock;
2941 int line_count, line_size;
2942 int small, large;
2943 int entries;
2944
2945 if (!latency_ns) {
2946 *sprite_wm = 0;
2947 return false;
2948 }
2949
2950 crtc = intel_get_crtc_for_plane(dev, plane);
2951 clock = crtc->mode.clock;
2952 if (!clock) {
2953 *sprite_wm = 0;
2954 return false;
2955 }
2956
2957 line_time_us = (sprite_width * 1000) / clock;
2958 if (!line_time_us) {
2959 *sprite_wm = 0;
2960 return false;
2961 }
2962
2963 line_count = (latency_ns / line_time_us + 1000) / 1000;
2964 line_size = sprite_width * pixel_size;
2965
2966 /* Use the minimum of the small and large buffer method for primary */
2967 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
2968 large = line_count * line_size;
2969
2970 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
2971 *sprite_wm = entries + display->guard_size;
2972
2973 return *sprite_wm > 0x3ff ? false : true;
2974}
2975
Ville Syrjäläadf3d352013-08-06 22:24:11 +03002976static void sandybridge_update_sprite_wm(struct drm_plane *plane,
2977 struct drm_crtc *crtc,
Paulo Zanoni4c4ff432013-05-24 11:59:17 -03002978 uint32_t sprite_width, int pixel_size,
Ville Syrjälä39db4a42013-08-06 22:24:00 +03002979 bool enabled, bool scaled)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002980{
Ville Syrjäläadf3d352013-08-06 22:24:11 +03002981 struct drm_device *dev = plane->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002982 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläadf3d352013-08-06 22:24:11 +03002983 int pipe = to_intel_plane(plane)->pipe;
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03002984 int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002985 u32 val;
2986 int sprite_wm, reg;
2987 int ret;
2988
Ville Syrjälä39db4a42013-08-06 22:24:00 +03002989 if (!enabled)
Paulo Zanoni4c4ff432013-05-24 11:59:17 -03002990 return;
2991
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03002992 switch (pipe) {
2993 case 0:
2994 reg = WM0_PIPEA_ILK;
2995 break;
2996 case 1:
2997 reg = WM0_PIPEB_ILK;
2998 break;
2999 case 2:
3000 reg = WM0_PIPEC_IVB;
3001 break;
3002 default:
3003 return; /* bad pipe */
3004 }
3005
3006 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
3007 &sandybridge_display_wm_info,
3008 latency, &sprite_wm);
3009 if (!ret) {
Ville Syrjälä84f44ce2013-04-17 17:48:49 +03003010 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
3011 pipe_name(pipe));
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03003012 return;
3013 }
3014
3015 val = I915_READ(reg);
3016 val &= ~WM0_PIPE_SPRITE_MASK;
3017 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
Ville Syrjälä84f44ce2013-04-17 17:48:49 +03003018 DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03003019
3020
3021 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3022 pixel_size,
3023 &sandybridge_display_srwm_info,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03003024 dev_priv->wm.spr_latency[1] * 500,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03003025 &sprite_wm);
3026 if (!ret) {
Ville Syrjälä84f44ce2013-04-17 17:48:49 +03003027 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
3028 pipe_name(pipe));
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03003029 return;
3030 }
3031 I915_WRITE(WM1S_LP_ILK, sprite_wm);
3032
3033 /* Only IVB has two more LP watermarks for sprite */
3034 if (!IS_IVYBRIDGE(dev))
3035 return;
3036
3037 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3038 pixel_size,
3039 &sandybridge_display_srwm_info,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03003040 dev_priv->wm.spr_latency[2] * 500,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03003041 &sprite_wm);
3042 if (!ret) {
Ville Syrjälä84f44ce2013-04-17 17:48:49 +03003043 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
3044 pipe_name(pipe));
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03003045 return;
3046 }
3047 I915_WRITE(WM2S_LP_IVB, sprite_wm);
3048
3049 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3050 pixel_size,
3051 &sandybridge_display_srwm_info,
Ville Syrjäläb0aea5d2013-08-01 16:18:54 +03003052 dev_priv->wm.spr_latency[3] * 500,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03003053 &sprite_wm);
3054 if (!ret) {
Ville Syrjälä84f44ce2013-04-17 17:48:49 +03003055 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
3056 pipe_name(pipe));
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03003057 return;
3058 }
3059 I915_WRITE(WM3S_LP_IVB, sprite_wm);
3060}
3061
3062/**
3063 * intel_update_watermarks - update FIFO watermark values based on current modes
3064 *
3065 * Calculate watermark values for the various WM regs based on current mode
3066 * and plane configuration.
3067 *
3068 * There are several cases to deal with here:
3069 * - normal (i.e. non-self-refresh)
3070 * - self-refresh (SR) mode
3071 * - lines are large relative to FIFO size (buffer can hold up to 2)
3072 * - lines are small relative to FIFO size (buffer can hold more than 2
3073 * lines), so need to account for TLB latency
3074 *
3075 * The normal calculation is:
3076 * watermark = dotclock * bytes per pixel * latency
3077 * where latency is platform & configuration dependent (we assume pessimal
3078 * values here).
3079 *
3080 * The SR calculation is:
3081 * watermark = (trunc(latency/line time)+1) * surface width *
3082 * bytes per pixel
3083 * where
3084 * line time = htotal / dotclock
3085 * surface width = hdisplay for normal plane and 64 for cursor
3086 * and latency is assumed to be high, as above.
3087 *
3088 * The final value programmed to the register should always be rounded up,
3089 * and include an extra 2 entries to account for clock crossings.
3090 *
3091 * We don't use the sprite, so we can ignore that. And on Crestline we have
3092 * to set the non-SR watermarks to 8.
3093 */
Ville Syrjälä46ba6142013-09-10 11:40:40 +03003094void intel_update_watermarks(struct drm_crtc *crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03003095{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03003096 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03003097
3098 if (dev_priv->display.update_wm)
Ville Syrjälä46ba6142013-09-10 11:40:40 +03003099 dev_priv->display.update_wm(crtc);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03003100}
3101
Ville Syrjäläadf3d352013-08-06 22:24:11 +03003102void intel_update_sprite_watermarks(struct drm_plane *plane,
3103 struct drm_crtc *crtc,
Paulo Zanoni4c4ff432013-05-24 11:59:17 -03003104 uint32_t sprite_width, int pixel_size,
Ville Syrjälä39db4a42013-08-06 22:24:00 +03003105 bool enabled, bool scaled)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03003106{
Ville Syrjäläadf3d352013-08-06 22:24:11 +03003107 struct drm_i915_private *dev_priv = plane->dev->dev_private;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03003108
3109 if (dev_priv->display.update_sprite_wm)
Ville Syrjäläadf3d352013-08-06 22:24:11 +03003110 dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
Ville Syrjälä39db4a42013-08-06 22:24:00 +03003111 pixel_size, enabled, scaled);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03003112}
3113
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003114static struct drm_i915_gem_object *
3115intel_alloc_context_page(struct drm_device *dev)
3116{
3117 struct drm_i915_gem_object *ctx;
3118 int ret;
3119
3120 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3121
3122 ctx = i915_gem_alloc_object(dev, 4096);
3123 if (!ctx) {
3124 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
3125 return NULL;
3126 }
3127
Ben Widawskyc37e2202013-07-31 16:59:58 -07003128 ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003129 if (ret) {
3130 DRM_ERROR("failed to pin power context: %d\n", ret);
3131 goto err_unref;
3132 }
3133
3134 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
3135 if (ret) {
3136 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
3137 goto err_unpin;
3138 }
3139
3140 return ctx;
3141
3142err_unpin:
3143 i915_gem_object_unpin(ctx);
3144err_unref:
3145 drm_gem_object_unreference(&ctx->base);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003146 return NULL;
3147}
3148
Daniel Vetter92703882012-08-09 16:46:01 +02003149/**
3150 * Lock protecting IPS related data structures
Daniel Vetter92703882012-08-09 16:46:01 +02003151 */
3152DEFINE_SPINLOCK(mchdev_lock);
3153
3154/* Global for IPS driver to get at the current i915 device. Protected by
3155 * mchdev_lock. */
3156static struct drm_i915_private *i915_mch_dev;
3157
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003158bool ironlake_set_drps(struct drm_device *dev, u8 val)
3159{
3160 struct drm_i915_private *dev_priv = dev->dev_private;
3161 u16 rgvswctl;
3162
Daniel Vetter92703882012-08-09 16:46:01 +02003163 assert_spin_locked(&mchdev_lock);
3164
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003165 rgvswctl = I915_READ16(MEMSWCTL);
3166 if (rgvswctl & MEMCTL_CMD_STS) {
3167 DRM_DEBUG("gpu busy, RCS change rejected\n");
3168 return false; /* still busy with another command */
3169 }
3170
3171 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
3172 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
3173 I915_WRITE16(MEMSWCTL, rgvswctl);
3174 POSTING_READ16(MEMSWCTL);
3175
3176 rgvswctl |= MEMCTL_CMD_STS;
3177 I915_WRITE16(MEMSWCTL, rgvswctl);
3178
3179 return true;
3180}
3181
Daniel Vetter8090c6b2012-06-24 16:42:32 +02003182static void ironlake_enable_drps(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003183{
3184 struct drm_i915_private *dev_priv = dev->dev_private;
3185 u32 rgvmodectl = I915_READ(MEMMODECTL);
3186 u8 fmax, fmin, fstart, vstart;
3187
Daniel Vetter92703882012-08-09 16:46:01 +02003188 spin_lock_irq(&mchdev_lock);
3189
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003190 /* Enable temp reporting */
3191 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
3192 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
3193
3194 /* 100ms RC evaluation intervals */
3195 I915_WRITE(RCUPEI, 100000);
3196 I915_WRITE(RCDNEI, 100000);
3197
3198 /* Set max/min thresholds to 90ms and 80ms respectively */
3199 I915_WRITE(RCBMAXAVG, 90000);
3200 I915_WRITE(RCBMINAVG, 80000);
3201
3202 I915_WRITE(MEMIHYST, 1);
3203
3204 /* Set up min, max, and cur for interrupt handling */
3205 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
3206 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
3207 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
3208 MEMMODE_FSTART_SHIFT;
3209
3210 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
3211 PXVFREQ_PX_SHIFT;
3212
Daniel Vetter20e4d402012-08-08 23:35:39 +02003213 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
3214 dev_priv->ips.fstart = fstart;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003215
Daniel Vetter20e4d402012-08-08 23:35:39 +02003216 dev_priv->ips.max_delay = fstart;
3217 dev_priv->ips.min_delay = fmin;
3218 dev_priv->ips.cur_delay = fstart;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003219
3220 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
3221 fmax, fmin, fstart);
3222
3223 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
3224
3225 /*
3226 * Interrupts will be enabled in ironlake_irq_postinstall
3227 */
3228
3229 I915_WRITE(VIDSTART, vstart);
3230 POSTING_READ(VIDSTART);
3231
3232 rgvmodectl |= MEMMODE_SWMODE_EN;
3233 I915_WRITE(MEMMODECTL, rgvmodectl);
3234
Daniel Vetter92703882012-08-09 16:46:01 +02003235 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003236 DRM_ERROR("stuck trying to change perf mode\n");
Daniel Vetter92703882012-08-09 16:46:01 +02003237 mdelay(1);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003238
3239 ironlake_set_drps(dev, fstart);
3240
Daniel Vetter20e4d402012-08-08 23:35:39 +02003241 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003242 I915_READ(0x112e0);
Daniel Vetter20e4d402012-08-08 23:35:39 +02003243 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
3244 dev_priv->ips.last_count2 = I915_READ(0x112f4);
3245 getrawmonotonic(&dev_priv->ips.last_time2);
Daniel Vetter92703882012-08-09 16:46:01 +02003246
3247 spin_unlock_irq(&mchdev_lock);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003248}
3249
Daniel Vetter8090c6b2012-06-24 16:42:32 +02003250static void ironlake_disable_drps(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003251{
3252 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter92703882012-08-09 16:46:01 +02003253 u16 rgvswctl;
3254
3255 spin_lock_irq(&mchdev_lock);
3256
3257 rgvswctl = I915_READ16(MEMSWCTL);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003258
3259 /* Ack interrupts, disable EFC interrupt */
3260 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
3261 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
3262 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
3263 I915_WRITE(DEIIR, DE_PCU_EVENT);
3264 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
3265
3266 /* Go back to the starting frequency */
Daniel Vetter20e4d402012-08-08 23:35:39 +02003267 ironlake_set_drps(dev, dev_priv->ips.fstart);
Daniel Vetter92703882012-08-09 16:46:01 +02003268 mdelay(1);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003269 rgvswctl |= MEMCTL_CMD_STS;
3270 I915_WRITE(MEMSWCTL, rgvswctl);
Daniel Vetter92703882012-08-09 16:46:01 +02003271 mdelay(1);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003272
Daniel Vetter92703882012-08-09 16:46:01 +02003273 spin_unlock_irq(&mchdev_lock);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003274}
3275
Daniel Vetteracbe9472012-07-26 11:50:05 +02003276/* There's a funny hw issue where the hw returns all 0 when reading from
3277 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3278 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3279 * all limits and the gpu stuck at whatever frequency it is at atm).
3280 */
Daniel Vetter65bccb52012-08-08 17:42:52 +02003281static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003282{
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01003283 u32 limits;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003284
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01003285 limits = 0;
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003286
3287 if (*val >= dev_priv->rps.max_delay)
3288 *val = dev_priv->rps.max_delay;
3289 limits |= dev_priv->rps.max_delay << 24;
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01003290
Daniel Vetter20b46e52012-07-26 11:16:14 +02003291 /* Only set the down limit when we've reached the lowest level to avoid
3292 * getting more interrupts, otherwise leave this clear. This prevents a
3293 * race in the hw when coming out of rc6: There's a tiny window where
3294 * the hw runs at the minimal clock before selecting the desired
3295 * frequency, if the down threshold expires in that window we will not
3296 * receive a down interrupt. */
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003297 if (*val <= dev_priv->rps.min_delay) {
3298 *val = dev_priv->rps.min_delay;
3299 limits |= dev_priv->rps.min_delay << 16;
Daniel Vetter20b46e52012-07-26 11:16:14 +02003300 }
3301
3302 return limits;
3303}
3304
3305void gen6_set_rps(struct drm_device *dev, u8 val)
3306{
3307 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter65bccb52012-08-08 17:42:52 +02003308 u32 limits = gen6_rps_limits(dev_priv, &val);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01003309
Jesse Barnes4fc688c2012-11-02 11:14:01 -07003310 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Ben Widawsky79249632012-09-07 19:43:42 -07003311 WARN_ON(val > dev_priv->rps.max_delay);
3312 WARN_ON(val < dev_priv->rps.min_delay);
Daniel Vetter004777c2012-08-09 15:07:01 +02003313
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003314 if (val == dev_priv->rps.cur_delay)
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01003315 return;
3316
Rodrigo Vivi92bd1bf2013-03-25 17:55:49 -03003317 if (IS_HASWELL(dev))
3318 I915_WRITE(GEN6_RPNSWREQ,
3319 HSW_FREQUENCY(val));
3320 else
3321 I915_WRITE(GEN6_RPNSWREQ,
3322 GEN6_FREQUENCY(val) |
3323 GEN6_OFFSET(0) |
3324 GEN6_AGGRESSIVE_TURBO);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01003325
3326 /* Make sure we continue to get interrupts
3327 * until we hit the minimum or maximum frequencies.
3328 */
3329 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3330
Ben Widawskyd5570a72012-09-07 19:43:41 -07003331 POSTING_READ(GEN6_RPNSWREQ);
3332
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003333 dev_priv->rps.cur_delay = val;
Daniel Vetterbe2cde92012-08-30 13:26:48 +02003334
3335 trace_intel_gpu_freq_change(val * 50);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003336}
3337
Ville Syrjälä80814ae2013-06-25 19:21:02 +03003338/*
3339 * Wait until the previous freq change has completed,
3340 * or the timeout elapsed, and then update our notion
3341 * of the current GPU frequency.
3342 */
3343static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
3344{
Ville Syrjälä80814ae2013-06-25 19:21:02 +03003345 u32 pval;
3346
3347 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3348
Ville Syrjäläe8474402013-06-26 17:43:24 +03003349 if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
3350 DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
Ville Syrjälä80814ae2013-06-25 19:21:02 +03003351
3352 pval >>= 8;
3353
3354 if (pval != dev_priv->rps.cur_delay)
3355 DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
3356 vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
3357 dev_priv->rps.cur_delay,
3358 vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
3359
3360 dev_priv->rps.cur_delay = pval;
3361}
3362
Jesse Barnes0a073b82013-04-17 15:54:58 -07003363void valleyview_set_rps(struct drm_device *dev, u8 val)
3364{
3365 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä7a670922013-06-25 19:21:06 +03003366
3367 gen6_rps_limits(dev_priv, &val);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003368
3369 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3370 WARN_ON(val > dev_priv->rps.max_delay);
3371 WARN_ON(val < dev_priv->rps.min_delay);
3372
Ville Syrjälä80814ae2013-06-25 19:21:02 +03003373 vlv_update_rps_cur_delay(dev_priv);
3374
Ville Syrjälä73008b92013-06-25 19:21:01 +03003375 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
Jesse Barnes0a073b82013-04-17 15:54:58 -07003376 vlv_gpu_freq(dev_priv->mem_freq,
3377 dev_priv->rps.cur_delay),
Ville Syrjälä73008b92013-06-25 19:21:01 +03003378 dev_priv->rps.cur_delay,
3379 vlv_gpu_freq(dev_priv->mem_freq, val), val);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003380
3381 if (val == dev_priv->rps.cur_delay)
3382 return;
3383
Jani Nikulaae992582013-05-22 15:36:19 +03003384 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003385
Ville Syrjälä80814ae2013-06-25 19:21:02 +03003386 dev_priv->rps.cur_delay = val;
Jesse Barnes0a073b82013-04-17 15:54:58 -07003387
3388 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
3389}
3390
Daniel Vetter44fc7d52013-07-12 22:43:27 +02003391static void gen6_disable_rps_interrupts(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003392{
3393 struct drm_i915_private *dev_priv = dev->dev_private;
3394
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003395 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
Ben Widawsky48484052013-05-28 19:22:27 -07003396 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003397 /* Complete PM interrupt masking here doesn't race with the rps work
3398 * item again unmasking PM interrupts because that is using a different
3399 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3400 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3401
Daniel Vetter59cdb632013-07-04 23:35:28 +02003402 spin_lock_irq(&dev_priv->irq_lock);
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003403 dev_priv->rps.pm_iir = 0;
Daniel Vetter59cdb632013-07-04 23:35:28 +02003404 spin_unlock_irq(&dev_priv->irq_lock);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003405
Ben Widawsky48484052013-05-28 19:22:27 -07003406 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003407}
3408
Daniel Vetter44fc7d52013-07-12 22:43:27 +02003409static void gen6_disable_rps(struct drm_device *dev)
3410{
3411 struct drm_i915_private *dev_priv = dev->dev_private;
3412
3413 I915_WRITE(GEN6_RC_CONTROL, 0);
3414 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3415
3416 gen6_disable_rps_interrupts(dev);
3417}
3418
Jesse Barnesd20d4f02013-04-23 10:09:28 -07003419static void valleyview_disable_rps(struct drm_device *dev)
3420{
3421 struct drm_i915_private *dev_priv = dev->dev_private;
3422
3423 I915_WRITE(GEN6_RC_CONTROL, 0);
Jesse Barnesd20d4f02013-04-23 10:09:28 -07003424
Daniel Vetter44fc7d52013-07-12 22:43:27 +02003425 gen6_disable_rps_interrupts(dev);
Jesse Barnesc9cddff2013-05-08 10:45:13 -07003426
3427 if (dev_priv->vlv_pctx) {
3428 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3429 dev_priv->vlv_pctx = NULL;
3430 }
Jesse Barnesd20d4f02013-04-23 10:09:28 -07003431}
3432
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003433int intel_enable_rc6(const struct drm_device *dev)
3434{
Damien Lespiaueb4926e2013-06-07 17:41:14 +01003435 /* No RC6 before Ironlake */
3436 if (INTEL_INFO(dev)->gen < 5)
3437 return 0;
3438
Daniel Vetter456470e2012-08-08 23:35:40 +02003439 /* Respect the kernel parameter if it is set */
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003440 if (i915_enable_rc6 >= 0)
3441 return i915_enable_rc6;
3442
Chris Wilson6567d742012-11-10 10:00:06 +00003443 /* Disable RC6 on Ironlake */
3444 if (INTEL_INFO(dev)->gen == 5)
3445 return 0;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003446
Daniel Vetter456470e2012-08-08 23:35:40 +02003447 if (IS_HASWELL(dev)) {
3448 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3449 return INTEL_RC6_ENABLE;
3450 }
3451
3452 /* snb/ivb have more than one rc6 state. */
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003453 if (INTEL_INFO(dev)->gen == 6) {
3454 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3455 return INTEL_RC6_ENABLE;
3456 }
Daniel Vetter456470e2012-08-08 23:35:40 +02003457
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003458 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
3459 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3460}
3461
Daniel Vetter44fc7d52013-07-12 22:43:27 +02003462static void gen6_enable_rps_interrupts(struct drm_device *dev)
3463{
3464 struct drm_i915_private *dev_priv = dev->dev_private;
Mika Kuoppalaa9c1f902013-08-22 21:09:00 +03003465 u32 enabled_intrs;
Daniel Vetter44fc7d52013-07-12 22:43:27 +02003466
3467 spin_lock_irq(&dev_priv->irq_lock);
Daniel Vettera0b33352013-07-04 23:35:34 +02003468 WARN_ON(dev_priv->rps.pm_iir);
Paulo Zanoniedbfdb42013-08-06 18:57:13 -03003469 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
Daniel Vetter44fc7d52013-07-12 22:43:27 +02003470 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3471 spin_unlock_irq(&dev_priv->irq_lock);
Mika Kuoppalaa9c1f902013-08-22 21:09:00 +03003472
Vinit Azadfd547d22013-08-14 13:34:33 -07003473 /* only unmask PM interrupts we need. Mask all others. */
Mika Kuoppalaa9c1f902013-08-22 21:09:00 +03003474 enabled_intrs = GEN6_PM_RPS_EVENTS;
3475
3476 /* IVB and SNB hard hangs on looping batchbuffer
3477 * if GEN6_PM_UP_EI_EXPIRED is masked.
3478 */
3479 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
3480 enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
3481
3482 I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
Daniel Vetter44fc7d52013-07-12 22:43:27 +02003483}
3484
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02003485static void gen6_enable_rps(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003486{
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02003487 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01003488 struct intel_ring_buffer *ring;
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01003489 u32 rp_state_cap;
3490 u32 gt_perf_status;
Ben Widawsky31643d52012-09-26 10:34:01 -07003491 u32 rc6vids, pcu_mbox, rc6_mask = 0;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003492 u32 gtfifodbg;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003493 int rc6_mode;
Ben Widawsky42c05262012-09-26 10:34:00 -07003494 int i, ret;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003495
Jesse Barnes4fc688c2012-11-02 11:14:01 -07003496 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02003497
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003498 /* Here begins a magic sequence of register writes to enable
3499 * auto-downclocking.
3500 *
3501 * Perhaps there might be some value in exposing these to
3502 * userspace...
3503 */
3504 I915_WRITE(GEN6_RC_STATE, 0);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003505
3506 /* Clear the DBG now so we don't confuse earlier errors */
3507 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3508 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3509 I915_WRITE(GTFIFODBG, gtfifodbg);
3510 }
3511
3512 gen6_gt_force_wake_get(dev_priv);
3513
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01003514 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3515 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3516
Ben Widawsky31c77382013-04-05 14:29:22 -07003517 /* In units of 50MHz */
3518 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003519 dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
3520 dev_priv->rps.cur_delay = 0;
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01003521
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003522 /* disable the counters and set deterministic thresholds */
3523 I915_WRITE(GEN6_RC_CONTROL, 0);
3524
3525 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3526 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3527 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3528 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3529 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3530
Chris Wilsonb4519512012-05-11 14:29:30 +01003531 for_each_ring(ring, dev_priv, i)
3532 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003533
3534 I915_WRITE(GEN6_RC_SLEEP, 0);
3535 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
Stéphane Marchesin351aa562013-08-13 11:55:17 -07003536 if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
3537 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3538 else
3539 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
Stéphane Marchesin0920a482013-01-29 19:41:59 -08003540 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003541 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3542
Eugeni Dodonov5a7dc922012-07-02 11:51:05 -03003543 /* Check if we are enabling RC6 */
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003544 rc6_mode = intel_enable_rc6(dev_priv->dev);
3545 if (rc6_mode & INTEL_RC6_ENABLE)
3546 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3547
Eugeni Dodonov5a7dc922012-07-02 11:51:05 -03003548 /* We don't use those on Haswell */
3549 if (!IS_HASWELL(dev)) {
3550 if (rc6_mode & INTEL_RC6p_ENABLE)
3551 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003552
Eugeni Dodonov5a7dc922012-07-02 11:51:05 -03003553 if (rc6_mode & INTEL_RC6pp_ENABLE)
3554 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3555 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003556
3557 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
Eugeni Dodonov5a7dc922012-07-02 11:51:05 -03003558 (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3559 (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3560 (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003561
3562 I915_WRITE(GEN6_RC_CONTROL,
3563 rc6_mask |
3564 GEN6_RC_CTL_EI_MODE(1) |
3565 GEN6_RC_CTL_HW_ENABLE);
3566
Rodrigo Vivi92bd1bf2013-03-25 17:55:49 -03003567 if (IS_HASWELL(dev)) {
3568 I915_WRITE(GEN6_RPNSWREQ,
3569 HSW_FREQUENCY(10));
3570 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3571 HSW_FREQUENCY(12));
3572 } else {
3573 I915_WRITE(GEN6_RPNSWREQ,
3574 GEN6_FREQUENCY(10) |
3575 GEN6_OFFSET(0) |
3576 GEN6_AGGRESSIVE_TURBO);
3577 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3578 GEN6_FREQUENCY(12));
3579 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003580
3581 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
3582 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003583 dev_priv->rps.max_delay << 24 |
3584 dev_priv->rps.min_delay << 16);
Eugeni Dodonov5a7dc922012-07-02 11:51:05 -03003585
Daniel Vetter1ee9ae32012-08-15 10:41:45 +02003586 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3587 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3588 I915_WRITE(GEN6_RP_UP_EI, 66000);
3589 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
Eugeni Dodonov5a7dc922012-07-02 11:51:05 -03003590
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003591 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3592 I915_WRITE(GEN6_RP_CONTROL,
3593 GEN6_RP_MEDIA_TURBO |
Jesse Barnes89ba8292012-05-22 09:30:33 -07003594 GEN6_RP_MEDIA_HW_NORMAL_MODE |
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003595 GEN6_RP_MEDIA_IS_GFX |
3596 GEN6_RP_ENABLE |
3597 GEN6_RP_UP_BUSY_AVG |
Eugeni Dodonov5a7dc922012-07-02 11:51:05 -03003598 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003599
Ben Widawsky42c05262012-09-26 10:34:00 -07003600 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
Ben Widawsky988b36e2013-04-23 17:33:02 -07003601 if (!ret) {
Ben Widawsky42c05262012-09-26 10:34:00 -07003602 pcu_mbox = 0;
3603 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
Ben Widawskya2b3fc02013-03-19 20:19:56 -07003604 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
Ben Widawsky10e08492013-04-05 14:29:23 -07003605 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
Ben Widawskya2b3fc02013-03-19 20:19:56 -07003606 (dev_priv->rps.max_delay & 0xff) * 50,
3607 (pcu_mbox & 0xff) * 50);
Ben Widawsky31c77382013-04-05 14:29:22 -07003608 dev_priv->rps.hw_max = pcu_mbox & 0xff;
Ben Widawsky42c05262012-09-26 10:34:00 -07003609 }
3610 } else {
3611 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003612 }
3613
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01003614 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003615
Daniel Vetter44fc7d52013-07-12 22:43:27 +02003616 gen6_enable_rps_interrupts(dev);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003617
Ben Widawsky31643d52012-09-26 10:34:01 -07003618 rc6vids = 0;
3619 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3620 if (IS_GEN6(dev) && ret) {
3621 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3622 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3623 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3624 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3625 rc6vids &= 0xffff00;
3626 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3627 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3628 if (ret)
3629 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3630 }
3631
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003632 gen6_gt_force_wake_put(dev_priv);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003633}
3634
Paulo Zanonic67a4702013-08-19 13:18:09 -03003635void gen6_update_ring_freq(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003636{
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02003637 struct drm_i915_private *dev_priv = dev->dev_private;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003638 int min_freq = 15;
Chris Wilson3ebecd02013-04-12 19:10:13 +01003639 unsigned int gpu_freq;
3640 unsigned int max_ia_freq, min_ring_freq;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003641 int scaling_factor = 180;
3642
Jesse Barnes4fc688c2012-11-02 11:14:01 -07003643 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02003644
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003645 max_ia_freq = cpufreq_quick_get_max(0);
3646 /*
3647 * Default to measured freq if none found, PCU will ensure we don't go
3648 * over
3649 */
3650 if (!max_ia_freq)
3651 max_ia_freq = tsc_khz;
3652
3653 /* Convert from kHz to MHz */
3654 max_ia_freq /= 1000;
3655
Chris Wilson3ebecd02013-04-12 19:10:13 +01003656 min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
3657 /* convert DDR frequency from units of 133.3MHz to bandwidth */
3658 min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
3659
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003660 /*
3661 * For each potential GPU frequency, load a ring frequency we'd like
3662 * to use for memory access. We do this by specifying the IA frequency
3663 * the PCU should use as a reference to determine the ring frequency.
3664 */
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003665 for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003666 gpu_freq--) {
Daniel Vetterc6a828d2012-08-08 23:35:35 +02003667 int diff = dev_priv->rps.max_delay - gpu_freq;
Chris Wilson3ebecd02013-04-12 19:10:13 +01003668 unsigned int ia_freq = 0, ring_freq = 0;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003669
Chris Wilson3ebecd02013-04-12 19:10:13 +01003670 if (IS_HASWELL(dev)) {
3671 ring_freq = (gpu_freq * 5 + 3) / 4;
3672 ring_freq = max(min_ring_freq, ring_freq);
3673 /* leave ia_freq as the default, chosen by cpufreq */
3674 } else {
3675 /* On older processors, there is no separate ring
3676 * clock domain, so in order to boost the bandwidth
3677 * of the ring, we need to upclock the CPU (ia_freq).
3678 *
3679 * For GPU frequencies less than 750MHz,
3680 * just use the lowest ring freq.
3681 */
3682 if (gpu_freq < min_freq)
3683 ia_freq = 800;
3684 else
3685 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3686 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
3687 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003688
Ben Widawsky42c05262012-09-26 10:34:00 -07003689 sandybridge_pcode_write(dev_priv,
3690 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
Chris Wilson3ebecd02013-04-12 19:10:13 +01003691 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3692 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3693 gpu_freq);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003694 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003695}
3696
Jesse Barnes0a073b82013-04-17 15:54:58 -07003697int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3698{
3699 u32 val, rp0;
3700
Jani Nikula64936252013-05-22 15:36:20 +03003701 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003702
3703 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
3704 /* Clamp to max */
3705 rp0 = min_t(u32, rp0, 0xea);
3706
3707 return rp0;
3708}
3709
3710static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3711{
3712 u32 val, rpe;
3713
Jani Nikula64936252013-05-22 15:36:20 +03003714 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003715 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
Jani Nikula64936252013-05-22 15:36:20 +03003716 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003717 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3718
3719 return rpe;
3720}
3721
3722int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3723{
Jani Nikula64936252013-05-22 15:36:20 +03003724 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
Jesse Barnes0a073b82013-04-17 15:54:58 -07003725}
3726
Jesse Barnes52ceb902013-04-23 10:09:26 -07003727static void vlv_rps_timer_work(struct work_struct *work)
3728{
3729 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3730 rps.vlv_work.work);
3731
3732 /*
3733 * Timer fired, we must be idle. Drop to min voltage state.
3734 * Note: we use RPe here since it should match the
3735 * Vmin we were shooting for. That should give us better
3736 * perf when we come back out of RC6 than if we used the
3737 * min freq available.
3738 */
3739 mutex_lock(&dev_priv->rps.hw_lock);
Ville Syrjälä6dc58482013-06-25 21:38:10 +03003740 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
3741 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
Jesse Barnes52ceb902013-04-23 10:09:26 -07003742 mutex_unlock(&dev_priv->rps.hw_lock);
3743}
3744
Jesse Barnesc9cddff2013-05-08 10:45:13 -07003745static void valleyview_setup_pctx(struct drm_device *dev)
3746{
3747 struct drm_i915_private *dev_priv = dev->dev_private;
3748 struct drm_i915_gem_object *pctx;
3749 unsigned long pctx_paddr;
3750 u32 pcbr;
3751 int pctx_size = 24*1024;
3752
3753 pcbr = I915_READ(VLV_PCBR);
3754 if (pcbr) {
3755 /* BIOS set it up already, grab the pre-alloc'd space */
3756 int pcbr_offset;
3757
3758 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3759 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3760 pcbr_offset,
Daniel Vetter190d6cd2013-07-04 13:06:28 +02003761 I915_GTT_OFFSET_NONE,
Jesse Barnesc9cddff2013-05-08 10:45:13 -07003762 pctx_size);
3763 goto out;
3764 }
3765
3766 /*
3767 * From the Gunit register HAS:
3768 * The Gfx driver is expected to program this register and ensure
3769 * proper allocation within Gfx stolen memory. For example, this
3770 * register should be programmed such than the PCBR range does not
3771 * overlap with other ranges, such as the frame buffer, protected
3772 * memory, or any other relevant ranges.
3773 */
3774 pctx = i915_gem_object_create_stolen(dev, pctx_size);
3775 if (!pctx) {
3776 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3777 return;
3778 }
3779
3780 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
3781 I915_WRITE(VLV_PCBR, pctx_paddr);
3782
3783out:
3784 dev_priv->vlv_pctx = pctx;
3785}
3786
Jesse Barnes0a073b82013-04-17 15:54:58 -07003787static void valleyview_enable_rps(struct drm_device *dev)
3788{
3789 struct drm_i915_private *dev_priv = dev->dev_private;
3790 struct intel_ring_buffer *ring;
Ville Syrjälä73008b92013-06-25 19:21:01 +03003791 u32 gtfifodbg, val;
Jesse Barnes0a073b82013-04-17 15:54:58 -07003792 int i;
3793
3794 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3795
3796 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3797 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3798 I915_WRITE(GTFIFODBG, gtfifodbg);
3799 }
3800
Jesse Barnesc9cddff2013-05-08 10:45:13 -07003801 valleyview_setup_pctx(dev);
3802
Jesse Barnes0a073b82013-04-17 15:54:58 -07003803 gen6_gt_force_wake_get(dev_priv);
3804
3805 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3806 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3807 I915_WRITE(GEN6_RP_UP_EI, 66000);
3808 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3809
3810 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3811
3812 I915_WRITE(GEN6_RP_CONTROL,
3813 GEN6_RP_MEDIA_TURBO |
3814 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3815 GEN6_RP_MEDIA_IS_GFX |
3816 GEN6_RP_ENABLE |
3817 GEN6_RP_UP_BUSY_AVG |
3818 GEN6_RP_DOWN_IDLE_CONT);
3819
3820 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
3821 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3822 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3823
3824 for_each_ring(ring, dev_priv, i)
3825 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3826
3827 I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
3828
3829 /* allows RC6 residency counter to work */
3830 I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
3831 I915_WRITE(GEN6_RC_CONTROL,
3832 GEN7_RC_CTL_TO_MODE);
3833
Jani Nikula64936252013-05-22 15:36:20 +03003834 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
Jesse Barnes24459662013-05-02 10:48:08 -07003835 switch ((val >> 6) & 3) {
3836 case 0:
3837 case 1:
3838 dev_priv->mem_freq = 800;
3839 break;
3840 case 2:
3841 dev_priv->mem_freq = 1066;
3842 break;
3843 case 3:
3844 dev_priv->mem_freq = 1333;
3845 break;
3846 }
Jesse Barnes0a073b82013-04-17 15:54:58 -07003847 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
3848
3849 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
3850 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3851
Jesse Barnes0a073b82013-04-17 15:54:58 -07003852 dev_priv->rps.cur_delay = (val >> 8) & 0xff;
Ville Syrjälä73008b92013-06-25 19:21:01 +03003853 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3854 vlv_gpu_freq(dev_priv->mem_freq,
3855 dev_priv->rps.cur_delay),
3856 dev_priv->rps.cur_delay);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003857
3858 dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
3859 dev_priv->rps.hw_max = dev_priv->rps.max_delay;
Ville Syrjälä73008b92013-06-25 19:21:01 +03003860 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3861 vlv_gpu_freq(dev_priv->mem_freq,
3862 dev_priv->rps.max_delay),
3863 dev_priv->rps.max_delay);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003864
Ville Syrjälä73008b92013-06-25 19:21:01 +03003865 dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
3866 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3867 vlv_gpu_freq(dev_priv->mem_freq,
3868 dev_priv->rps.rpe_delay),
3869 dev_priv->rps.rpe_delay);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003870
Ville Syrjälä73008b92013-06-25 19:21:01 +03003871 dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
3872 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3873 vlv_gpu_freq(dev_priv->mem_freq,
3874 dev_priv->rps.min_delay),
3875 dev_priv->rps.min_delay);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003876
Ville Syrjälä73008b92013-06-25 19:21:01 +03003877 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3878 vlv_gpu_freq(dev_priv->mem_freq,
3879 dev_priv->rps.rpe_delay),
3880 dev_priv->rps.rpe_delay);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003881
Jesse Barnes52ceb902013-04-23 10:09:26 -07003882 INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
3883
Ville Syrjälä73008b92013-06-25 19:21:01 +03003884 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003885
Daniel Vetter44fc7d52013-07-12 22:43:27 +02003886 gen6_enable_rps_interrupts(dev);
Jesse Barnes0a073b82013-04-17 15:54:58 -07003887
3888 gen6_gt_force_wake_put(dev_priv);
3889}
3890
Daniel Vetter930ebb42012-06-29 23:32:16 +02003891void ironlake_teardown_rc6(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003892{
3893 struct drm_i915_private *dev_priv = dev->dev_private;
3894
Daniel Vetter3e373942012-11-02 19:55:04 +01003895 if (dev_priv->ips.renderctx) {
3896 i915_gem_object_unpin(dev_priv->ips.renderctx);
3897 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
3898 dev_priv->ips.renderctx = NULL;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003899 }
3900
Daniel Vetter3e373942012-11-02 19:55:04 +01003901 if (dev_priv->ips.pwrctx) {
3902 i915_gem_object_unpin(dev_priv->ips.pwrctx);
3903 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
3904 dev_priv->ips.pwrctx = NULL;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003905 }
3906}
3907
Daniel Vetter930ebb42012-06-29 23:32:16 +02003908static void ironlake_disable_rc6(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003909{
3910 struct drm_i915_private *dev_priv = dev->dev_private;
3911
3912 if (I915_READ(PWRCTXA)) {
3913 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
3914 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
3915 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
3916 50);
3917
3918 I915_WRITE(PWRCTXA, 0);
3919 POSTING_READ(PWRCTXA);
3920
3921 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3922 POSTING_READ(RSTDBYCTL);
3923 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003924}
3925
3926static int ironlake_setup_rc6(struct drm_device *dev)
3927{
3928 struct drm_i915_private *dev_priv = dev->dev_private;
3929
Daniel Vetter3e373942012-11-02 19:55:04 +01003930 if (dev_priv->ips.renderctx == NULL)
3931 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
3932 if (!dev_priv->ips.renderctx)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003933 return -ENOMEM;
3934
Daniel Vetter3e373942012-11-02 19:55:04 +01003935 if (dev_priv->ips.pwrctx == NULL)
3936 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
3937 if (!dev_priv->ips.pwrctx) {
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003938 ironlake_teardown_rc6(dev);
3939 return -ENOMEM;
3940 }
3941
3942 return 0;
3943}
3944
Daniel Vetter930ebb42012-06-29 23:32:16 +02003945static void ironlake_enable_rc6(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003946{
3947 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter6d90c952012-04-26 23:28:05 +02003948 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
Chris Wilson3e960502012-11-27 16:22:54 +00003949 bool was_interruptible;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003950 int ret;
3951
3952 /* rc6 disabled by default due to repeated reports of hanging during
3953 * boot and resume.
3954 */
3955 if (!intel_enable_rc6(dev))
3956 return;
3957
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02003958 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3959
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003960 ret = ironlake_setup_rc6(dev);
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02003961 if (ret)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003962 return;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003963
Chris Wilson3e960502012-11-27 16:22:54 +00003964 was_interruptible = dev_priv->mm.interruptible;
3965 dev_priv->mm.interruptible = false;
3966
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003967 /*
3968 * GPU can automatically power down the render unit if given a page
3969 * to save state.
3970 */
Daniel Vetter6d90c952012-04-26 23:28:05 +02003971 ret = intel_ring_begin(ring, 6);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003972 if (ret) {
3973 ironlake_teardown_rc6(dev);
Chris Wilson3e960502012-11-27 16:22:54 +00003974 dev_priv->mm.interruptible = was_interruptible;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003975 return;
3976 }
3977
Daniel Vetter6d90c952012-04-26 23:28:05 +02003978 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
3979 intel_ring_emit(ring, MI_SET_CONTEXT);
Ben Widawskyf343c5f2013-07-05 14:41:04 -07003980 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
Daniel Vetter6d90c952012-04-26 23:28:05 +02003981 MI_MM_SPACE_GTT |
3982 MI_SAVE_EXT_STATE_EN |
3983 MI_RESTORE_EXT_STATE_EN |
3984 MI_RESTORE_INHIBIT);
3985 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
3986 intel_ring_emit(ring, MI_NOOP);
3987 intel_ring_emit(ring, MI_FLUSH);
3988 intel_ring_advance(ring);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003989
3990 /*
3991 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
3992 * does an implicit flush, combined with MI_FLUSH above, it should be
3993 * safe to assume that renderctx is valid
3994 */
Chris Wilson3e960502012-11-27 16:22:54 +00003995 ret = intel_ring_idle(ring);
3996 dev_priv->mm.interruptible = was_interruptible;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003997 if (ret) {
Jani Nikuladef27a52013-03-12 10:49:19 +02003998 DRM_ERROR("failed to enable ironlake power savings\n");
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03003999 ironlake_teardown_rc6(dev);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004000 return;
4001 }
4002
Ben Widawskyf343c5f2013-07-05 14:41:04 -07004003 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004004 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004005}
4006
Eugeni Dodonovdde18882012-04-18 15:29:24 -03004007static unsigned long intel_pxfreq(u32 vidfreq)
4008{
4009 unsigned long freq;
4010 int div = (vidfreq & 0x3f0000) >> 16;
4011 int post = (vidfreq & 0x3000) >> 12;
4012 int pre = (vidfreq & 0x7);
4013
4014 if (!pre)
4015 return 0;
4016
4017 freq = ((div * 133333) / ((1<<post) * pre));
4018
4019 return freq;
4020}
4021
Daniel Vettereb48eb02012-04-26 23:28:12 +02004022static const struct cparams {
4023 u16 i;
4024 u16 t;
4025 u16 m;
4026 u16 c;
4027} cparams[] = {
4028 { 1, 1333, 301, 28664 },
4029 { 1, 1066, 294, 24460 },
4030 { 1, 800, 294, 25192 },
4031 { 0, 1333, 276, 27605 },
4032 { 0, 1066, 276, 27605 },
4033 { 0, 800, 231, 23784 },
4034};
4035
Chris Wilsonf531dcb2012-09-25 10:16:12 +01004036static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
Daniel Vettereb48eb02012-04-26 23:28:12 +02004037{
4038 u64 total_count, diff, ret;
4039 u32 count1, count2, count3, m = 0, c = 0;
4040 unsigned long now = jiffies_to_msecs(jiffies), diff1;
4041 int i;
4042
Daniel Vetter02d71952012-08-09 16:44:54 +02004043 assert_spin_locked(&mchdev_lock);
4044
Daniel Vetter20e4d402012-08-08 23:35:39 +02004045 diff1 = now - dev_priv->ips.last_time1;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004046
4047 /* Prevent division-by-zero if we are asking too fast.
4048 * Also, we don't get interesting results if we are polling
4049 * faster than once in 10ms, so just return the saved value
4050 * in such cases.
4051 */
4052 if (diff1 <= 10)
Daniel Vetter20e4d402012-08-08 23:35:39 +02004053 return dev_priv->ips.chipset_power;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004054
4055 count1 = I915_READ(DMIEC);
4056 count2 = I915_READ(DDREC);
4057 count3 = I915_READ(CSIEC);
4058
4059 total_count = count1 + count2 + count3;
4060
4061 /* FIXME: handle per-counter overflow */
Daniel Vetter20e4d402012-08-08 23:35:39 +02004062 if (total_count < dev_priv->ips.last_count1) {
4063 diff = ~0UL - dev_priv->ips.last_count1;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004064 diff += total_count;
4065 } else {
Daniel Vetter20e4d402012-08-08 23:35:39 +02004066 diff = total_count - dev_priv->ips.last_count1;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004067 }
4068
4069 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
Daniel Vetter20e4d402012-08-08 23:35:39 +02004070 if (cparams[i].i == dev_priv->ips.c_m &&
4071 cparams[i].t == dev_priv->ips.r_t) {
Daniel Vettereb48eb02012-04-26 23:28:12 +02004072 m = cparams[i].m;
4073 c = cparams[i].c;
4074 break;
4075 }
4076 }
4077
4078 diff = div_u64(diff, diff1);
4079 ret = ((m * diff) + c);
4080 ret = div_u64(ret, 10);
4081
Daniel Vetter20e4d402012-08-08 23:35:39 +02004082 dev_priv->ips.last_count1 = total_count;
4083 dev_priv->ips.last_time1 = now;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004084
Daniel Vetter20e4d402012-08-08 23:35:39 +02004085 dev_priv->ips.chipset_power = ret;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004086
4087 return ret;
4088}
4089
Chris Wilsonf531dcb2012-09-25 10:16:12 +01004090unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
4091{
4092 unsigned long val;
4093
4094 if (dev_priv->info->gen != 5)
4095 return 0;
4096
4097 spin_lock_irq(&mchdev_lock);
4098
4099 val = __i915_chipset_val(dev_priv);
4100
4101 spin_unlock_irq(&mchdev_lock);
4102
4103 return val;
4104}
4105
Daniel Vettereb48eb02012-04-26 23:28:12 +02004106unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
4107{
4108 unsigned long m, x, b;
4109 u32 tsfs;
4110
4111 tsfs = I915_READ(TSFS);
4112
4113 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
4114 x = I915_READ8(TR1);
4115
4116 b = tsfs & TSFS_INTR_MASK;
4117
4118 return ((m * x) / 127) - b;
4119}
4120
4121static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
4122{
4123 static const struct v_table {
4124 u16 vd; /* in .1 mil */
4125 u16 vm; /* in .1 mil */
4126 } v_table[] = {
4127 { 0, 0, },
4128 { 375, 0, },
4129 { 500, 0, },
4130 { 625, 0, },
4131 { 750, 0, },
4132 { 875, 0, },
4133 { 1000, 0, },
4134 { 1125, 0, },
4135 { 4125, 3000, },
4136 { 4125, 3000, },
4137 { 4125, 3000, },
4138 { 4125, 3000, },
4139 { 4125, 3000, },
4140 { 4125, 3000, },
4141 { 4125, 3000, },
4142 { 4125, 3000, },
4143 { 4125, 3000, },
4144 { 4125, 3000, },
4145 { 4125, 3000, },
4146 { 4125, 3000, },
4147 { 4125, 3000, },
4148 { 4125, 3000, },
4149 { 4125, 3000, },
4150 { 4125, 3000, },
4151 { 4125, 3000, },
4152 { 4125, 3000, },
4153 { 4125, 3000, },
4154 { 4125, 3000, },
4155 { 4125, 3000, },
4156 { 4125, 3000, },
4157 { 4125, 3000, },
4158 { 4125, 3000, },
4159 { 4250, 3125, },
4160 { 4375, 3250, },
4161 { 4500, 3375, },
4162 { 4625, 3500, },
4163 { 4750, 3625, },
4164 { 4875, 3750, },
4165 { 5000, 3875, },
4166 { 5125, 4000, },
4167 { 5250, 4125, },
4168 { 5375, 4250, },
4169 { 5500, 4375, },
4170 { 5625, 4500, },
4171 { 5750, 4625, },
4172 { 5875, 4750, },
4173 { 6000, 4875, },
4174 { 6125, 5000, },
4175 { 6250, 5125, },
4176 { 6375, 5250, },
4177 { 6500, 5375, },
4178 { 6625, 5500, },
4179 { 6750, 5625, },
4180 { 6875, 5750, },
4181 { 7000, 5875, },
4182 { 7125, 6000, },
4183 { 7250, 6125, },
4184 { 7375, 6250, },
4185 { 7500, 6375, },
4186 { 7625, 6500, },
4187 { 7750, 6625, },
4188 { 7875, 6750, },
4189 { 8000, 6875, },
4190 { 8125, 7000, },
4191 { 8250, 7125, },
4192 { 8375, 7250, },
4193 { 8500, 7375, },
4194 { 8625, 7500, },
4195 { 8750, 7625, },
4196 { 8875, 7750, },
4197 { 9000, 7875, },
4198 { 9125, 8000, },
4199 { 9250, 8125, },
4200 { 9375, 8250, },
4201 { 9500, 8375, },
4202 { 9625, 8500, },
4203 { 9750, 8625, },
4204 { 9875, 8750, },
4205 { 10000, 8875, },
4206 { 10125, 9000, },
4207 { 10250, 9125, },
4208 { 10375, 9250, },
4209 { 10500, 9375, },
4210 { 10625, 9500, },
4211 { 10750, 9625, },
4212 { 10875, 9750, },
4213 { 11000, 9875, },
4214 { 11125, 10000, },
4215 { 11250, 10125, },
4216 { 11375, 10250, },
4217 { 11500, 10375, },
4218 { 11625, 10500, },
4219 { 11750, 10625, },
4220 { 11875, 10750, },
4221 { 12000, 10875, },
4222 { 12125, 11000, },
4223 { 12250, 11125, },
4224 { 12375, 11250, },
4225 { 12500, 11375, },
4226 { 12625, 11500, },
4227 { 12750, 11625, },
4228 { 12875, 11750, },
4229 { 13000, 11875, },
4230 { 13125, 12000, },
4231 { 13250, 12125, },
4232 { 13375, 12250, },
4233 { 13500, 12375, },
4234 { 13625, 12500, },
4235 { 13750, 12625, },
4236 { 13875, 12750, },
4237 { 14000, 12875, },
4238 { 14125, 13000, },
4239 { 14250, 13125, },
4240 { 14375, 13250, },
4241 { 14500, 13375, },
4242 { 14625, 13500, },
4243 { 14750, 13625, },
4244 { 14875, 13750, },
4245 { 15000, 13875, },
4246 { 15125, 14000, },
4247 { 15250, 14125, },
4248 { 15375, 14250, },
4249 { 15500, 14375, },
4250 { 15625, 14500, },
4251 { 15750, 14625, },
4252 { 15875, 14750, },
4253 { 16000, 14875, },
4254 { 16125, 15000, },
4255 };
4256 if (dev_priv->info->is_mobile)
4257 return v_table[pxvid].vm;
4258 else
4259 return v_table[pxvid].vd;
4260}
4261
Daniel Vetter02d71952012-08-09 16:44:54 +02004262static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
Daniel Vettereb48eb02012-04-26 23:28:12 +02004263{
4264 struct timespec now, diff1;
4265 u64 diff;
4266 unsigned long diffms;
4267 u32 count;
4268
Daniel Vetter02d71952012-08-09 16:44:54 +02004269 assert_spin_locked(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004270
4271 getrawmonotonic(&now);
Daniel Vetter20e4d402012-08-08 23:35:39 +02004272 diff1 = timespec_sub(now, dev_priv->ips.last_time2);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004273
4274 /* Don't divide by 0 */
4275 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
4276 if (!diffms)
4277 return;
4278
4279 count = I915_READ(GFXEC);
4280
Daniel Vetter20e4d402012-08-08 23:35:39 +02004281 if (count < dev_priv->ips.last_count2) {
4282 diff = ~0UL - dev_priv->ips.last_count2;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004283 diff += count;
4284 } else {
Daniel Vetter20e4d402012-08-08 23:35:39 +02004285 diff = count - dev_priv->ips.last_count2;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004286 }
4287
Daniel Vetter20e4d402012-08-08 23:35:39 +02004288 dev_priv->ips.last_count2 = count;
4289 dev_priv->ips.last_time2 = now;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004290
4291 /* More magic constants... */
4292 diff = diff * 1181;
4293 diff = div_u64(diff, diffms * 10);
Daniel Vetter20e4d402012-08-08 23:35:39 +02004294 dev_priv->ips.gfx_power = diff;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004295}
4296
Daniel Vetter02d71952012-08-09 16:44:54 +02004297void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4298{
4299 if (dev_priv->info->gen != 5)
4300 return;
4301
Daniel Vetter92703882012-08-09 16:46:01 +02004302 spin_lock_irq(&mchdev_lock);
Daniel Vetter02d71952012-08-09 16:44:54 +02004303
4304 __i915_update_gfx_val(dev_priv);
4305
Daniel Vetter92703882012-08-09 16:46:01 +02004306 spin_unlock_irq(&mchdev_lock);
Daniel Vetter02d71952012-08-09 16:44:54 +02004307}
4308
Chris Wilsonf531dcb2012-09-25 10:16:12 +01004309static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
Daniel Vettereb48eb02012-04-26 23:28:12 +02004310{
4311 unsigned long t, corr, state1, corr2, state2;
4312 u32 pxvid, ext_v;
4313
Daniel Vetter02d71952012-08-09 16:44:54 +02004314 assert_spin_locked(&mchdev_lock);
4315
Daniel Vetterc6a828d2012-08-08 23:35:35 +02004316 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
Daniel Vettereb48eb02012-04-26 23:28:12 +02004317 pxvid = (pxvid >> 24) & 0x7f;
4318 ext_v = pvid_to_extvid(dev_priv, pxvid);
4319
4320 state1 = ext_v;
4321
4322 t = i915_mch_val(dev_priv);
4323
4324 /* Revel in the empirically derived constants */
4325
4326 /* Correction factor in 1/100000 units */
4327 if (t > 80)
4328 corr = ((t * 2349) + 135940);
4329 else if (t >= 50)
4330 corr = ((t * 964) + 29317);
4331 else /* < 50 */
4332 corr = ((t * 301) + 1004);
4333
4334 corr = corr * ((150142 * state1) / 10000 - 78642);
4335 corr /= 100000;
Daniel Vetter20e4d402012-08-08 23:35:39 +02004336 corr2 = (corr * dev_priv->ips.corr);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004337
4338 state2 = (corr2 * state1) / 10000;
4339 state2 /= 100; /* convert to mW */
4340
Daniel Vetter02d71952012-08-09 16:44:54 +02004341 __i915_update_gfx_val(dev_priv);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004342
Daniel Vetter20e4d402012-08-08 23:35:39 +02004343 return dev_priv->ips.gfx_power + state2;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004344}
4345
Chris Wilsonf531dcb2012-09-25 10:16:12 +01004346unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4347{
4348 unsigned long val;
4349
4350 if (dev_priv->info->gen != 5)
4351 return 0;
4352
4353 spin_lock_irq(&mchdev_lock);
4354
4355 val = __i915_gfx_val(dev_priv);
4356
4357 spin_unlock_irq(&mchdev_lock);
4358
4359 return val;
4360}
4361
Daniel Vettereb48eb02012-04-26 23:28:12 +02004362/**
4363 * i915_read_mch_val - return value for IPS use
4364 *
4365 * Calculate and return a value for the IPS driver to use when deciding whether
4366 * we have thermal and power headroom to increase CPU or GPU power budget.
4367 */
4368unsigned long i915_read_mch_val(void)
4369{
4370 struct drm_i915_private *dev_priv;
4371 unsigned long chipset_val, graphics_val, ret = 0;
4372
Daniel Vetter92703882012-08-09 16:46:01 +02004373 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004374 if (!i915_mch_dev)
4375 goto out_unlock;
4376 dev_priv = i915_mch_dev;
4377
Chris Wilsonf531dcb2012-09-25 10:16:12 +01004378 chipset_val = __i915_chipset_val(dev_priv);
4379 graphics_val = __i915_gfx_val(dev_priv);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004380
4381 ret = chipset_val + graphics_val;
4382
4383out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02004384 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004385
4386 return ret;
4387}
4388EXPORT_SYMBOL_GPL(i915_read_mch_val);
4389
4390/**
4391 * i915_gpu_raise - raise GPU frequency limit
4392 *
4393 * Raise the limit; IPS indicates we have thermal headroom.
4394 */
4395bool i915_gpu_raise(void)
4396{
4397 struct drm_i915_private *dev_priv;
4398 bool ret = true;
4399
Daniel Vetter92703882012-08-09 16:46:01 +02004400 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004401 if (!i915_mch_dev) {
4402 ret = false;
4403 goto out_unlock;
4404 }
4405 dev_priv = i915_mch_dev;
4406
Daniel Vetter20e4d402012-08-08 23:35:39 +02004407 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
4408 dev_priv->ips.max_delay--;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004409
4410out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02004411 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004412
4413 return ret;
4414}
4415EXPORT_SYMBOL_GPL(i915_gpu_raise);
4416
4417/**
4418 * i915_gpu_lower - lower GPU frequency limit
4419 *
4420 * IPS indicates we're close to a thermal limit, so throttle back the GPU
4421 * frequency maximum.
4422 */
4423bool i915_gpu_lower(void)
4424{
4425 struct drm_i915_private *dev_priv;
4426 bool ret = true;
4427
Daniel Vetter92703882012-08-09 16:46:01 +02004428 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004429 if (!i915_mch_dev) {
4430 ret = false;
4431 goto out_unlock;
4432 }
4433 dev_priv = i915_mch_dev;
4434
Daniel Vetter20e4d402012-08-08 23:35:39 +02004435 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
4436 dev_priv->ips.max_delay++;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004437
4438out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02004439 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004440
4441 return ret;
4442}
4443EXPORT_SYMBOL_GPL(i915_gpu_lower);
4444
4445/**
4446 * i915_gpu_busy - indicate GPU business to IPS
4447 *
4448 * Tell the IPS driver whether or not the GPU is busy.
4449 */
4450bool i915_gpu_busy(void)
4451{
4452 struct drm_i915_private *dev_priv;
Chris Wilsonf047e392012-07-21 12:31:41 +01004453 struct intel_ring_buffer *ring;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004454 bool ret = false;
Chris Wilsonf047e392012-07-21 12:31:41 +01004455 int i;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004456
Daniel Vetter92703882012-08-09 16:46:01 +02004457 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004458 if (!i915_mch_dev)
4459 goto out_unlock;
4460 dev_priv = i915_mch_dev;
4461
Chris Wilsonf047e392012-07-21 12:31:41 +01004462 for_each_ring(ring, dev_priv, i)
4463 ret |= !list_empty(&ring->request_list);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004464
4465out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02004466 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004467
4468 return ret;
4469}
4470EXPORT_SYMBOL_GPL(i915_gpu_busy);
4471
4472/**
4473 * i915_gpu_turbo_disable - disable graphics turbo
4474 *
4475 * Disable graphics turbo by resetting the max frequency and setting the
4476 * current frequency to the default.
4477 */
4478bool i915_gpu_turbo_disable(void)
4479{
4480 struct drm_i915_private *dev_priv;
4481 bool ret = true;
4482
Daniel Vetter92703882012-08-09 16:46:01 +02004483 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004484 if (!i915_mch_dev) {
4485 ret = false;
4486 goto out_unlock;
4487 }
4488 dev_priv = i915_mch_dev;
4489
Daniel Vetter20e4d402012-08-08 23:35:39 +02004490 dev_priv->ips.max_delay = dev_priv->ips.fstart;
Daniel Vettereb48eb02012-04-26 23:28:12 +02004491
Daniel Vetter20e4d402012-08-08 23:35:39 +02004492 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
Daniel Vettereb48eb02012-04-26 23:28:12 +02004493 ret = false;
4494
4495out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02004496 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004497
4498 return ret;
4499}
4500EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
4501
4502/**
4503 * Tells the intel_ips driver that the i915 driver is now loaded, if
4504 * IPS got loaded first.
4505 *
4506 * This awkward dance is so that neither module has to depend on the
4507 * other in order for IPS to do the appropriate communication of
4508 * GPU turbo limits to i915.
4509 */
4510static void
4511ips_ping_for_i915_load(void)
4512{
4513 void (*link)(void);
4514
4515 link = symbol_get(ips_link_to_i915_driver);
4516 if (link) {
4517 link();
4518 symbol_put(ips_link_to_i915_driver);
4519 }
4520}
4521
4522void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
4523{
Daniel Vetter02d71952012-08-09 16:44:54 +02004524 /* We only register the i915 ips part with intel-ips once everything is
4525 * set up, to avoid intel-ips sneaking in and reading bogus values. */
Daniel Vetter92703882012-08-09 16:46:01 +02004526 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004527 i915_mch_dev = dev_priv;
Daniel Vetter92703882012-08-09 16:46:01 +02004528 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004529
4530 ips_ping_for_i915_load();
4531}
4532
4533void intel_gpu_ips_teardown(void)
4534{
Daniel Vetter92703882012-08-09 16:46:01 +02004535 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004536 i915_mch_dev = NULL;
Daniel Vetter92703882012-08-09 16:46:01 +02004537 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02004538}
Daniel Vetter8090c6b2012-06-24 16:42:32 +02004539static void intel_init_emon(struct drm_device *dev)
Eugeni Dodonovdde18882012-04-18 15:29:24 -03004540{
4541 struct drm_i915_private *dev_priv = dev->dev_private;
4542 u32 lcfuse;
4543 u8 pxw[16];
4544 int i;
4545
4546 /* Disable to program */
4547 I915_WRITE(ECR, 0);
4548 POSTING_READ(ECR);
4549
4550 /* Program energy weights for various events */
4551 I915_WRITE(SDEW, 0x15040d00);
4552 I915_WRITE(CSIEW0, 0x007f0000);
4553 I915_WRITE(CSIEW1, 0x1e220004);
4554 I915_WRITE(CSIEW2, 0x04000004);
4555
4556 for (i = 0; i < 5; i++)
4557 I915_WRITE(PEW + (i * 4), 0);
4558 for (i = 0; i < 3; i++)
4559 I915_WRITE(DEW + (i * 4), 0);
4560
4561 /* Program P-state weights to account for frequency power adjustment */
4562 for (i = 0; i < 16; i++) {
4563 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
4564 unsigned long freq = intel_pxfreq(pxvidfreq);
4565 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
4566 PXVFREQ_PX_SHIFT;
4567 unsigned long val;
4568
4569 val = vid * vid;
4570 val *= (freq / 1000);
4571 val *= 255;
4572 val /= (127*127*900);
4573 if (val > 0xff)
4574 DRM_ERROR("bad pxval: %ld\n", val);
4575 pxw[i] = val;
4576 }
4577 /* Render standby states get 0 weight */
4578 pxw[14] = 0;
4579 pxw[15] = 0;
4580
4581 for (i = 0; i < 4; i++) {
4582 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
4583 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
4584 I915_WRITE(PXW + (i * 4), val);
4585 }
4586
4587 /* Adjust magic regs to magic values (more experimental results) */
4588 I915_WRITE(OGW0, 0);
4589 I915_WRITE(OGW1, 0);
4590 I915_WRITE(EG0, 0x00007f00);
4591 I915_WRITE(EG1, 0x0000000e);
4592 I915_WRITE(EG2, 0x000e0000);
4593 I915_WRITE(EG3, 0x68000300);
4594 I915_WRITE(EG4, 0x42000000);
4595 I915_WRITE(EG5, 0x00140031);
4596 I915_WRITE(EG6, 0);
4597 I915_WRITE(EG7, 0);
4598
4599 for (i = 0; i < 8; i++)
4600 I915_WRITE(PXWL + (i * 4), 0);
4601
4602 /* Enable PMON + select events */
4603 I915_WRITE(ECR, 0x80000019);
4604
4605 lcfuse = I915_READ(LCFUSE02);
4606
Daniel Vetter20e4d402012-08-08 23:35:39 +02004607 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
Eugeni Dodonovdde18882012-04-18 15:29:24 -03004608}
4609
Daniel Vetter8090c6b2012-06-24 16:42:32 +02004610void intel_disable_gt_powersave(struct drm_device *dev)
4611{
Jesse Barnes1a01ab32012-11-02 11:14:00 -07004612 struct drm_i915_private *dev_priv = dev->dev_private;
4613
Daniel Vetterfd0c0642013-04-24 11:13:35 +02004614 /* Interrupts should be disabled already to avoid re-arming. */
4615 WARN_ON(dev->irq_enabled);
4616
Daniel Vetter930ebb42012-06-29 23:32:16 +02004617 if (IS_IRONLAKE_M(dev)) {
Daniel Vetter8090c6b2012-06-24 16:42:32 +02004618 ironlake_disable_drps(dev);
Daniel Vetter930ebb42012-06-29 23:32:16 +02004619 ironlake_disable_rc6(dev);
Jesse Barnes0a073b82013-04-17 15:54:58 -07004620 } else if (INTEL_INFO(dev)->gen >= 6) {
Jesse Barnes1a01ab32012-11-02 11:14:00 -07004621 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
Jesse Barnes250848c2013-04-23 10:09:27 -07004622 cancel_work_sync(&dev_priv->rps.work);
Jesse Barnes52ceb902013-04-23 10:09:26 -07004623 if (IS_VALLEYVIEW(dev))
4624 cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
Jesse Barnes4fc688c2012-11-02 11:14:01 -07004625 mutex_lock(&dev_priv->rps.hw_lock);
Jesse Barnesd20d4f02013-04-23 10:09:28 -07004626 if (IS_VALLEYVIEW(dev))
4627 valleyview_disable_rps(dev);
4628 else
4629 gen6_disable_rps(dev);
Jesse Barnes4fc688c2012-11-02 11:14:01 -07004630 mutex_unlock(&dev_priv->rps.hw_lock);
Daniel Vetter930ebb42012-06-29 23:32:16 +02004631 }
Daniel Vetter8090c6b2012-06-24 16:42:32 +02004632}
4633
Jesse Barnes1a01ab32012-11-02 11:14:00 -07004634static void intel_gen6_powersave_work(struct work_struct *work)
4635{
4636 struct drm_i915_private *dev_priv =
4637 container_of(work, struct drm_i915_private,
4638 rps.delayed_resume_work.work);
4639 struct drm_device *dev = dev_priv->dev;
4640
Jesse Barnes4fc688c2012-11-02 11:14:01 -07004641 mutex_lock(&dev_priv->rps.hw_lock);
Jesse Barnes0a073b82013-04-17 15:54:58 -07004642
4643 if (IS_VALLEYVIEW(dev)) {
4644 valleyview_enable_rps(dev);
4645 } else {
4646 gen6_enable_rps(dev);
4647 gen6_update_ring_freq(dev);
4648 }
Jesse Barnes4fc688c2012-11-02 11:14:01 -07004649 mutex_unlock(&dev_priv->rps.hw_lock);
Jesse Barnes1a01ab32012-11-02 11:14:00 -07004650}
4651
Daniel Vetter8090c6b2012-06-24 16:42:32 +02004652void intel_enable_gt_powersave(struct drm_device *dev)
4653{
Jesse Barnes1a01ab32012-11-02 11:14:00 -07004654 struct drm_i915_private *dev_priv = dev->dev_private;
4655
Daniel Vetter8090c6b2012-06-24 16:42:32 +02004656 if (IS_IRONLAKE_M(dev)) {
4657 ironlake_enable_drps(dev);
4658 ironlake_enable_rc6(dev);
4659 intel_init_emon(dev);
Jesse Barnes0a073b82013-04-17 15:54:58 -07004660 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
Jesse Barnes1a01ab32012-11-02 11:14:00 -07004661 /*
4662 * PCU communication is slow and this doesn't need to be
4663 * done at any specific time, so do this out of our fast path
4664 * to make resume and init faster.
4665 */
4666 schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
4667 round_jiffies_up_relative(HZ));
Daniel Vetter8090c6b2012-06-24 16:42:32 +02004668 }
4669}
4670
Daniel Vetter3107bd42012-10-31 22:52:31 +01004671static void ibx_init_clock_gating(struct drm_device *dev)
4672{
4673 struct drm_i915_private *dev_priv = dev->dev_private;
4674
4675 /*
4676 * On Ibex Peak and Cougar Point, we need to disable clock
4677 * gating for the panel power sequencer or it will fail to
4678 * start up when no ports are active.
4679 */
4680 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4681}
4682
Ville Syrjälä0e088b82013-06-07 10:47:04 +03004683static void g4x_disable_trickle_feed(struct drm_device *dev)
4684{
4685 struct drm_i915_private *dev_priv = dev->dev_private;
4686 int pipe;
4687
4688 for_each_pipe(pipe) {
4689 I915_WRITE(DSPCNTR(pipe),
4690 I915_READ(DSPCNTR(pipe)) |
4691 DISPPLANE_TRICKLE_FEED_DISABLE);
4692 intel_flush_display_plane(dev_priv, pipe);
4693 }
4694}
4695
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03004696static void ironlake_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004697{
4698 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiau231e54f2012-10-19 17:55:41 +01004699 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004700
Damien Lespiauf1e8fa52013-06-07 17:41:09 +01004701 /*
4702 * Required for FBC
4703 * WaFbcDisableDpfcClockGating:ilk
4704 */
Damien Lespiau4d47e4f2012-10-19 17:55:42 +01004705 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
4706 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
4707 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004708
4709 I915_WRITE(PCH_3DCGDIS0,
4710 MARIUNIT_CLOCK_GATE_DISABLE |
4711 SVSMUNIT_CLOCK_GATE_DISABLE);
4712 I915_WRITE(PCH_3DCGDIS1,
4713 VFMUNIT_CLOCK_GATE_DISABLE);
4714
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004715 /*
4716 * According to the spec the following bits should be set in
4717 * order to enable memory self-refresh
4718 * The bit 22/21 of 0x42004
4719 * The bit 5 of 0x42020
4720 * The bit 15 of 0x45000
4721 */
4722 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4723 (I915_READ(ILK_DISPLAY_CHICKEN2) |
4724 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
Damien Lespiau4d47e4f2012-10-19 17:55:42 +01004725 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004726 I915_WRITE(DISP_ARB_CTL,
4727 (I915_READ(DISP_ARB_CTL) |
4728 DISP_FBC_WM_DIS));
4729 I915_WRITE(WM3_LP_ILK, 0);
4730 I915_WRITE(WM2_LP_ILK, 0);
4731 I915_WRITE(WM1_LP_ILK, 0);
4732
4733 /*
4734 * Based on the document from hardware guys the following bits
4735 * should be set unconditionally in order to enable FBC.
4736 * The bit 22 of 0x42000
4737 * The bit 22 of 0x42004
4738 * The bit 7,8,9 of 0x42020.
4739 */
4740 if (IS_IRONLAKE_M(dev)) {
Damien Lespiau4bb35332013-06-14 15:23:24 +01004741 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004742 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4743 I915_READ(ILK_DISPLAY_CHICKEN1) |
4744 ILK_FBCQ_DIS);
4745 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4746 I915_READ(ILK_DISPLAY_CHICKEN2) |
4747 ILK_DPARB_GATE);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004748 }
4749
Damien Lespiau4d47e4f2012-10-19 17:55:42 +01004750 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4751
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004752 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4753 I915_READ(ILK_DISPLAY_CHICKEN2) |
4754 ILK_ELPIN_409_SELECT);
4755 I915_WRITE(_3D_CHICKEN2,
4756 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
4757 _3D_CHICKEN2_WM_READ_PIPELINED);
Daniel Vetter4358a372012-10-18 11:49:51 +02004758
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01004759 /* WaDisableRenderCachePipelinedFlush:ilk */
Daniel Vetter4358a372012-10-18 11:49:51 +02004760 I915_WRITE(CACHE_MODE_0,
4761 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
Daniel Vetter3107bd42012-10-31 22:52:31 +01004762
Ville Syrjälä0e088b82013-06-07 10:47:04 +03004763 g4x_disable_trickle_feed(dev);
Ville Syrjäläbdad2b22013-06-07 10:47:03 +03004764
Daniel Vetter3107bd42012-10-31 22:52:31 +01004765 ibx_init_clock_gating(dev);
4766}
4767
4768static void cpt_init_clock_gating(struct drm_device *dev)
4769{
4770 struct drm_i915_private *dev_priv = dev->dev_private;
4771 int pipe;
Paulo Zanoni3f704fa2013-04-08 15:48:07 -03004772 uint32_t val;
Daniel Vetter3107bd42012-10-31 22:52:31 +01004773
4774 /*
4775 * On Ibex Peak and Cougar Point, we need to disable clock
4776 * gating for the panel power sequencer or it will fail to
4777 * start up when no ports are active.
4778 */
4779 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4780 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
4781 DPLS_EDP_PPS_FIX_DIS);
Takashi Iwai335c07b2012-12-11 11:46:29 +01004782 /* The below fixes the weird display corruption, a few pixels shifted
4783 * downward, on (only) LVDS of some HP laptops with IVY.
4784 */
Paulo Zanoni3f704fa2013-04-08 15:48:07 -03004785 for_each_pipe(pipe) {
Paulo Zanonidc4bd2d2013-04-08 15:48:08 -03004786 val = I915_READ(TRANS_CHICKEN2(pipe));
4787 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
4788 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03004789 if (dev_priv->vbt.fdi_rx_polarity_inverted)
Paulo Zanoni3f704fa2013-04-08 15:48:07 -03004790 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
Paulo Zanonidc4bd2d2013-04-08 15:48:08 -03004791 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
4792 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
4793 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
Paulo Zanoni3f704fa2013-04-08 15:48:07 -03004794 I915_WRITE(TRANS_CHICKEN2(pipe), val);
4795 }
Daniel Vetter3107bd42012-10-31 22:52:31 +01004796 /* WADP0ClockGatingDisable */
4797 for_each_pipe(pipe) {
4798 I915_WRITE(TRANS_CHICKEN1(pipe),
4799 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4800 }
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004801}
4802
Daniel Vetter1d7aaa02013-02-09 21:03:42 +01004803static void gen6_check_mch_setup(struct drm_device *dev)
4804{
4805 struct drm_i915_private *dev_priv = dev->dev_private;
4806 uint32_t tmp;
4807
4808 tmp = I915_READ(MCH_SSKPD);
4809 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
4810 DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
4811 DRM_INFO("This can cause pipe underruns and display issues.\n");
4812 DRM_INFO("Please upgrade your BIOS to fix this.\n");
4813 }
4814}
4815
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03004816static void gen6_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004817{
4818 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiau231e54f2012-10-19 17:55:41 +01004819 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004820
Damien Lespiau231e54f2012-10-19 17:55:41 +01004821 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004822
4823 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4824 I915_READ(ILK_DISPLAY_CHICKEN2) |
4825 ILK_ELPIN_409_SELECT);
4826
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01004827 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
Daniel Vetter42839082012-12-14 23:38:28 +01004828 I915_WRITE(_3D_CHICKEN,
4829 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
4830
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01004831 /* WaSetupGtModeTdRowDispatch:snb */
Daniel Vetter6547fbd2012-12-14 23:38:29 +01004832 if (IS_SNB_GT1(dev))
4833 I915_WRITE(GEN6_GT_MODE,
4834 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
4835
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004836 I915_WRITE(WM3_LP_ILK, 0);
4837 I915_WRITE(WM2_LP_ILK, 0);
4838 I915_WRITE(WM1_LP_ILK, 0);
4839
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004840 I915_WRITE(CACHE_MODE_0,
Daniel Vetter50743292012-04-26 22:02:54 +02004841 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004842
4843 I915_WRITE(GEN6_UCGCTL1,
4844 I915_READ(GEN6_UCGCTL1) |
4845 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
4846 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
4847
4848 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4849 * gating disable must be set. Failure to set it results in
4850 * flickering pixels due to Z write ordering failures after
4851 * some amount of runtime in the Mesa "fire" demo, and Unigine
4852 * Sanctuary and Tropics, and apparently anything else with
4853 * alpha test or pixel discard.
4854 *
4855 * According to the spec, bit 11 (RCCUNIT) must also be set,
4856 * but we didn't debug actual testcases to find it out.
Jesse Barnes0f846f82012-06-14 11:04:47 -07004857 *
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01004858 * Also apply WaDisableVDSUnitClockGating:snb and
4859 * WaDisableRCPBUnitClockGating:snb.
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004860 */
4861 I915_WRITE(GEN6_UCGCTL2,
Jesse Barnes0f846f82012-06-14 11:04:47 -07004862 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004863 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4864 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4865
4866 /* Bspec says we need to always set all mask bits. */
Kenneth Graunke26b6e442012-10-07 08:51:07 -07004867 I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
4868 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004869
4870 /*
4871 * According to the spec the following bits should be
4872 * set in order to enable memory self-refresh and fbc:
4873 * The bit21 and bit22 of 0x42000
4874 * The bit21 and bit22 of 0x42004
4875 * The bit5 and bit7 of 0x42020
4876 * The bit14 of 0x70180
4877 * The bit14 of 0x71180
Damien Lespiau4bb35332013-06-14 15:23:24 +01004878 *
4879 * WaFbcAsynchFlipDisableFbcQueue:snb
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004880 */
4881 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4882 I915_READ(ILK_DISPLAY_CHICKEN1) |
4883 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
4884 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4885 I915_READ(ILK_DISPLAY_CHICKEN2) |
4886 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
Damien Lespiau231e54f2012-10-19 17:55:41 +01004887 I915_WRITE(ILK_DSPCLK_GATE_D,
4888 I915_READ(ILK_DSPCLK_GATE_D) |
4889 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
4890 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004891
Ville Syrjälä0e088b82013-06-07 10:47:04 +03004892 g4x_disable_trickle_feed(dev);
Ben Widawskyf8f2ac92012-10-03 19:34:24 -07004893
4894 /* The default value should be 0x200 according to docs, but the two
4895 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
4896 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
4897 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
Daniel Vetter3107bd42012-10-31 22:52:31 +01004898
4899 cpt_init_clock_gating(dev);
Daniel Vetter1d7aaa02013-02-09 21:03:42 +01004900
4901 gen6_check_mch_setup(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004902}
4903
4904static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
4905{
4906 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
4907
4908 reg &= ~GEN7_FF_SCHED_MASK;
4909 reg |= GEN7_FF_TS_SCHED_HW;
4910 reg |= GEN7_FF_VS_SCHED_HW;
4911 reg |= GEN7_FF_DS_SCHED_HW;
4912
Ben Widawsky41c0b3a2013-01-26 11:52:00 -08004913 if (IS_HASWELL(dev_priv->dev))
4914 reg &= ~GEN7_FF_VS_REF_CNT_FFME;
4915
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004916 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
4917}
4918
Paulo Zanoni17a303e2012-11-20 15:12:07 -02004919static void lpt_init_clock_gating(struct drm_device *dev)
4920{
4921 struct drm_i915_private *dev_priv = dev->dev_private;
4922
4923 /*
4924 * TODO: this bit should only be enabled when really needed, then
4925 * disabled when not needed anymore in order to save power.
4926 */
4927 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
4928 I915_WRITE(SOUTH_DSPCLK_GATE_D,
4929 I915_READ(SOUTH_DSPCLK_GATE_D) |
4930 PCH_LP_PARTITION_LEVEL_DISABLE);
Paulo Zanoni0a790cd2013-04-17 18:15:49 -03004931
4932 /* WADPOClockGatingDisable:hsw */
4933 I915_WRITE(_TRANSA_CHICKEN1,
4934 I915_READ(_TRANSA_CHICKEN1) |
4935 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
Paulo Zanoni17a303e2012-11-20 15:12:07 -02004936}
4937
Imre Deak7d708ee2013-04-17 14:04:50 +03004938static void lpt_suspend_hw(struct drm_device *dev)
4939{
4940 struct drm_i915_private *dev_priv = dev->dev_private;
4941
4942 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
4943 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
4944
4945 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4946 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4947 }
4948}
4949
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03004950static void haswell_init_clock_gating(struct drm_device *dev)
4951{
4952 struct drm_i915_private *dev_priv = dev->dev_private;
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03004953
4954 I915_WRITE(WM3_LP_ILK, 0);
4955 I915_WRITE(WM2_LP_ILK, 0);
4956 I915_WRITE(WM1_LP_ILK, 0);
4957
4958 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01004959 * This implements the WaDisableRCZUnitClockGating:hsw workaround.
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03004960 */
4961 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
4962
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01004963 /* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03004964 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4965 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4966
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01004967 /* WaApplyL3ControlAndL3ChickenMode:hsw */
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03004968 I915_WRITE(GEN7_L3CNTLREG1,
4969 GEN7_WA_FOR_GEN7_L3_CONTROL);
4970 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
4971 GEN7_WA_L3_CHICKEN_MODE);
4972
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01004973 /* This is required by WaCatErrorRejectionIssue:hsw */
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03004974 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4975 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4976 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4977
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01004978 /* WaVSRefCountFullforceMissDisable:hsw */
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03004979 gen7_setup_fixed_func_scheduler(dev_priv);
4980
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01004981 /* WaDisable4x2SubspanOptimization:hsw */
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03004982 I915_WRITE(CACHE_MODE_1,
4983 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
Eugeni Dodonov1544d9d2012-07-02 11:51:10 -03004984
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01004985 /* WaSwitchSolVfFArbitrationPriority:hsw */
Ben Widawskye3dff582013-03-20 14:49:14 -07004986 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4987
Paulo Zanoni90a88642013-05-03 17:23:45 -03004988 /* WaRsPkgCStateDisplayPMReq:hsw */
4989 I915_WRITE(CHICKEN_PAR1_1,
4990 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
Eugeni Dodonov1544d9d2012-07-02 11:51:10 -03004991
Paulo Zanoni17a303e2012-11-20 15:12:07 -02004992 lpt_init_clock_gating(dev);
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03004993}
4994
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03004995static void ivybridge_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004996{
4997 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky20848222012-05-04 18:58:59 -07004998 uint32_t snpcr;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03004999
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005000 I915_WRITE(WM3_LP_ILK, 0);
5001 I915_WRITE(WM2_LP_ILK, 0);
5002 I915_WRITE(WM1_LP_ILK, 0);
5003
Damien Lespiau231e54f2012-10-19 17:55:41 +01005004 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005005
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005006 /* WaDisableEarlyCull:ivb */
Jesse Barnes87f80202012-10-02 17:43:41 -05005007 I915_WRITE(_3D_CHICKEN3,
5008 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5009
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005010 /* WaDisableBackToBackFlipFix:ivb */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005011 I915_WRITE(IVB_CHICKEN3,
5012 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5013 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5014
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005015 /* WaDisablePSDDualDispatchEnable:ivb */
Jesse Barnes12f33822012-10-25 12:15:45 -07005016 if (IS_IVB_GT1(dev))
5017 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5018 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5019 else
5020 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
5021 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5022
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005023 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005024 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5025 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5026
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005027 /* WaApplyL3ControlAndL3ChickenMode:ivb */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005028 I915_WRITE(GEN7_L3CNTLREG1,
5029 GEN7_WA_FOR_GEN7_L3_CONTROL);
5030 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
Jesse Barnes8ab43972012-10-25 12:15:42 -07005031 GEN7_WA_L3_CHICKEN_MODE);
5032 if (IS_IVB_GT1(dev))
5033 I915_WRITE(GEN7_ROW_CHICKEN2,
5034 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5035 else
5036 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
5037 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5038
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005039
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005040 /* WaForceL3Serialization:ivb */
Jesse Barnes61939d92012-10-02 17:43:38 -05005041 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5042 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5043
Jesse Barnes0f846f82012-06-14 11:04:47 -07005044 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5045 * gating disable must be set. Failure to set it results in
5046 * flickering pixels due to Z write ordering failures after
5047 * some amount of runtime in the Mesa "fire" demo, and Unigine
5048 * Sanctuary and Tropics, and apparently anything else with
5049 * alpha test or pixel discard.
5050 *
5051 * According to the spec, bit 11 (RCCUNIT) must also be set,
5052 * but we didn't debug actual testcases to find it out.
5053 *
5054 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005055 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
Jesse Barnes0f846f82012-06-14 11:04:47 -07005056 */
5057 I915_WRITE(GEN6_UCGCTL2,
5058 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
5059 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5060
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005061 /* This is required by WaCatErrorRejectionIssue:ivb */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005062 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5063 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5064 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5065
Ville Syrjälä0e088b82013-06-07 10:47:04 +03005066 g4x_disable_trickle_feed(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005067
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005068 /* WaVSRefCountFullforceMissDisable:ivb */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005069 gen7_setup_fixed_func_scheduler(dev_priv);
Daniel Vetter97e19302012-04-24 16:00:21 +02005070
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005071 /* WaDisable4x2SubspanOptimization:ivb */
Daniel Vetter97e19302012-04-24 16:00:21 +02005072 I915_WRITE(CACHE_MODE_1,
5073 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
Ben Widawsky20848222012-05-04 18:58:59 -07005074
5075 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5076 snpcr &= ~GEN6_MBC_SNPCR_MASK;
5077 snpcr |= GEN6_MBC_SNPCR_MED;
5078 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
Daniel Vetter3107bd42012-10-31 22:52:31 +01005079
Ben Widawskyab5c6082013-04-05 13:12:41 -07005080 if (!HAS_PCH_NOP(dev))
5081 cpt_init_clock_gating(dev);
Daniel Vetter1d7aaa02013-02-09 21:03:42 +01005082
5083 gen6_check_mch_setup(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005084}
5085
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005086static void valleyview_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005087{
5088 struct drm_i915_private *dev_priv = dev->dev_private;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005089
Ville Syrjäläd7fe0cc2013-05-21 18:01:50 +03005090 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005091
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005092 /* WaDisableEarlyCull:vlv */
Jesse Barnes87f80202012-10-02 17:43:41 -05005093 I915_WRITE(_3D_CHICKEN3,
5094 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5095
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005096 /* WaDisableBackToBackFlipFix:vlv */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005097 I915_WRITE(IVB_CHICKEN3,
5098 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5099 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5100
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005101 /* WaDisablePSDDualDispatchEnable:vlv */
Jesse Barnes12f33822012-10-25 12:15:45 -07005102 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
Jesse Barnesd3bc0302013-03-08 10:45:51 -08005103 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5104 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
Jesse Barnes12f33822012-10-25 12:15:45 -07005105
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005106 /* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005107 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5108 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5109
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005110 /* WaApplyL3ControlAndL3ChickenMode:vlv */
Jesse Barnesd0cf5ea2012-10-25 12:15:41 -07005111 I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005112 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
5113
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005114 /* WaForceL3Serialization:vlv */
Jesse Barnes61939d92012-10-02 17:43:38 -05005115 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5116 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5117
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005118 /* WaDisableDopClockGating:vlv */
Jesse Barnes8ab43972012-10-25 12:15:42 -07005119 I915_WRITE(GEN7_ROW_CHICKEN2,
5120 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5121
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005122 /* This is required by WaCatErrorRejectionIssue:vlv */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005123 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5124 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5125 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5126
Jesse Barnes0f846f82012-06-14 11:04:47 -07005127 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5128 * gating disable must be set. Failure to set it results in
5129 * flickering pixels due to Z write ordering failures after
5130 * some amount of runtime in the Mesa "fire" demo, and Unigine
5131 * Sanctuary and Tropics, and apparently anything else with
5132 * alpha test or pixel discard.
5133 *
5134 * According to the spec, bit 11 (RCCUNIT) must also be set,
5135 * but we didn't debug actual testcases to find it out.
5136 *
5137 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005138 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
Jesse Barnes0f846f82012-06-14 11:04:47 -07005139 *
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005140 * Also apply WaDisableVDSUnitClockGating:vlv and
5141 * WaDisableRCPBUnitClockGating:vlv.
Jesse Barnes0f846f82012-06-14 11:04:47 -07005142 */
5143 I915_WRITE(GEN6_UCGCTL2,
5144 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
Jesse Barnes6edaa7f2012-06-14 11:04:49 -07005145 GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
Jesse Barnes0f846f82012-06-14 11:04:47 -07005146 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
5147 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
5148 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5149
Jesse Barnese3f33d42012-06-14 11:04:50 -07005150 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
5151
Ville Syrjäläe0d8d592013-06-12 22:11:18 +03005152 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005153
Daniel Vetter6b26c862012-04-24 14:04:12 +02005154 I915_WRITE(CACHE_MODE_1,
5155 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
Jesse Barnes79831172012-06-20 10:53:12 -07005156
5157 /*
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01005158 * WaDisableVLVClockGating_VBIIssue:vlv
Jesse Barnes2d809572012-10-25 12:15:44 -07005159 * Disable clock gating on th GCFG unit to prevent a delay
5160 * in the reporting of vblank events.
5161 */
Jesse Barnes4e8c84a2013-03-08 10:45:54 -08005162 I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
5163
5164 /* Conservative clock gating settings for now */
5165 I915_WRITE(0x9400, 0xffffffff);
5166 I915_WRITE(0x9404, 0xffffffff);
5167 I915_WRITE(0x9408, 0xffffffff);
5168 I915_WRITE(0x940c, 0xffffffff);
5169 I915_WRITE(0x9410, 0xffffffff);
5170 I915_WRITE(0x9414, 0xffffffff);
5171 I915_WRITE(0x9418, 0xffffffff);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005172}
5173
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005174static void g4x_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005175{
5176 struct drm_i915_private *dev_priv = dev->dev_private;
5177 uint32_t dspclk_gate;
5178
5179 I915_WRITE(RENCLK_GATE_D1, 0);
5180 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
5181 GS_UNIT_CLOCK_GATE_DISABLE |
5182 CL_UNIT_CLOCK_GATE_DISABLE);
5183 I915_WRITE(RAMCLK_GATE_D, 0);
5184 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
5185 OVRUNIT_CLOCK_GATE_DISABLE |
5186 OVCUNIT_CLOCK_GATE_DISABLE;
5187 if (IS_GM45(dev))
5188 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
5189 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
Daniel Vetter4358a372012-10-18 11:49:51 +02005190
5191 /* WaDisableRenderCachePipelinedFlush */
5192 I915_WRITE(CACHE_MODE_0,
5193 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
Ville Syrjäläde1aa622013-06-07 10:47:01 +03005194
Ville Syrjälä0e088b82013-06-07 10:47:04 +03005195 g4x_disable_trickle_feed(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005196}
5197
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005198static void crestline_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005199{
5200 struct drm_i915_private *dev_priv = dev->dev_private;
5201
5202 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
5203 I915_WRITE(RENCLK_GATE_D2, 0);
5204 I915_WRITE(DSPCLK_GATE_D, 0);
5205 I915_WRITE(RAMCLK_GATE_D, 0);
5206 I915_WRITE16(DEUC, 0);
Ville Syrjälä20f94962013-06-07 10:47:02 +03005207 I915_WRITE(MI_ARB_STATE,
5208 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005209}
5210
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005211static void broadwater_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005212{
5213 struct drm_i915_private *dev_priv = dev->dev_private;
5214
5215 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
5216 I965_RCC_CLOCK_GATE_DISABLE |
5217 I965_RCPB_CLOCK_GATE_DISABLE |
5218 I965_ISC_CLOCK_GATE_DISABLE |
5219 I965_FBC_CLOCK_GATE_DISABLE);
5220 I915_WRITE(RENCLK_GATE_D2, 0);
Ville Syrjälä20f94962013-06-07 10:47:02 +03005221 I915_WRITE(MI_ARB_STATE,
5222 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005223}
5224
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005225static void gen3_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005226{
5227 struct drm_i915_private *dev_priv = dev->dev_private;
5228 u32 dstate = I915_READ(D_STATE);
5229
5230 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
5231 DSTATE_DOT_CLOCK_GATING;
5232 I915_WRITE(D_STATE, dstate);
Chris Wilson13a86b82012-04-24 14:51:43 +01005233
5234 if (IS_PINEVIEW(dev))
5235 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
Daniel Vetter974a3b02012-09-09 11:54:16 +02005236
5237 /* IIR "flip pending" means done if this bit is set */
5238 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005239}
5240
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005241static void i85x_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005242{
5243 struct drm_i915_private *dev_priv = dev->dev_private;
5244
5245 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
5246}
5247
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005248static void i830_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005249{
5250 struct drm_i915_private *dev_priv = dev->dev_private;
5251
5252 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
5253}
5254
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005255void intel_init_clock_gating(struct drm_device *dev)
5256{
5257 struct drm_i915_private *dev_priv = dev->dev_private;
5258
5259 dev_priv->display.init_clock_gating(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03005260}
5261
Imre Deak7d708ee2013-04-17 14:04:50 +03005262void intel_suspend_hw(struct drm_device *dev)
5263{
5264 if (HAS_PCH_LPT(dev))
5265 lpt_suspend_hw(dev);
5266}
5267
Paulo Zanoni15d199e2013-03-22 14:14:13 -03005268/**
5269 * We should only use the power well if we explicitly asked the hardware to
5270 * enable it, so check if it's enabled and also check if we've requested it to
5271 * be enabled.
5272 */
Paulo Zanonib97186f2013-05-03 12:15:36 -03005273bool intel_display_power_enabled(struct drm_device *dev,
5274 enum intel_display_power_domain domain)
Paulo Zanoni15d199e2013-03-22 14:14:13 -03005275{
5276 struct drm_i915_private *dev_priv = dev->dev_private;
5277
Paulo Zanonib97186f2013-05-03 12:15:36 -03005278 if (!HAS_POWER_WELL(dev))
5279 return true;
5280
5281 switch (domain) {
5282 case POWER_DOMAIN_PIPE_A:
5283 case POWER_DOMAIN_TRANSCODER_EDP:
5284 return true;
5285 case POWER_DOMAIN_PIPE_B:
5286 case POWER_DOMAIN_PIPE_C:
5287 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5288 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5289 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5290 case POWER_DOMAIN_TRANSCODER_A:
5291 case POWER_DOMAIN_TRANSCODER_B:
5292 case POWER_DOMAIN_TRANSCODER_C:
Paulo Zanoni15d199e2013-03-22 14:14:13 -03005293 return I915_READ(HSW_PWR_WELL_DRIVER) ==
Paulo Zanoni6aedd1f2013-08-02 16:22:25 -03005294 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
Paulo Zanonib97186f2013-05-03 12:15:36 -03005295 default:
5296 BUG();
5297 }
Paulo Zanoni15d199e2013-03-22 14:14:13 -03005298}
5299
Wang Xingchaoa38911a2013-05-30 22:07:11 +08005300static void __intel_set_power_well(struct drm_device *dev, bool enable)
Eugeni Dodonovd0d3e512012-05-09 15:37:16 -03005301{
5302 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonifa42e232013-01-25 16:59:11 -02005303 bool is_enabled, enable_requested;
5304 uint32_t tmp;
Eugeni Dodonovd0d3e512012-05-09 15:37:16 -03005305
Paulo Zanonifa42e232013-01-25 16:59:11 -02005306 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
Paulo Zanoni6aedd1f2013-08-02 16:22:25 -03005307 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5308 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
Eugeni Dodonovd0d3e512012-05-09 15:37:16 -03005309
Paulo Zanonifa42e232013-01-25 16:59:11 -02005310 if (enable) {
5311 if (!enable_requested)
Paulo Zanoni6aedd1f2013-08-02 16:22:25 -03005312 I915_WRITE(HSW_PWR_WELL_DRIVER,
5313 HSW_PWR_WELL_ENABLE_REQUEST);
Eugeni Dodonovd0d3e512012-05-09 15:37:16 -03005314
Paulo Zanonifa42e232013-01-25 16:59:11 -02005315 if (!is_enabled) {
5316 DRM_DEBUG_KMS("Enabling power well\n");
5317 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
Paulo Zanoni6aedd1f2013-08-02 16:22:25 -03005318 HSW_PWR_WELL_STATE_ENABLED), 20))
Paulo Zanonifa42e232013-01-25 16:59:11 -02005319 DRM_ERROR("Timeout enabling power well\n");
5320 }
5321 } else {
5322 if (enable_requested) {
Paulo Zanoni9dbd8fe2013-07-23 10:48:11 -03005323 unsigned long irqflags;
5324 enum pipe p;
5325
Paulo Zanonifa42e232013-01-25 16:59:11 -02005326 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
Paulo Zanoni9dbd8fe2013-07-23 10:48:11 -03005327 POSTING_READ(HSW_PWR_WELL_DRIVER);
Paulo Zanonifa42e232013-01-25 16:59:11 -02005328 DRM_DEBUG_KMS("Requesting to disable the power well\n");
Paulo Zanoni9dbd8fe2013-07-23 10:48:11 -03005329
5330 /*
5331 * After this, the registers on the pipes that are part
5332 * of the power well will become zero, so we have to
5333 * adjust our counters according to that.
5334 *
5335 * FIXME: Should we do this in general in
5336 * drm_vblank_post_modeset?
5337 */
5338 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5339 for_each_pipe(p)
5340 if (p != PIPE_A)
5341 dev->last_vblank[p] = 0;
5342 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
Eugeni Dodonovd0d3e512012-05-09 15:37:16 -03005343 }
5344 }
Paulo Zanonifa42e232013-01-25 16:59:11 -02005345}
Eugeni Dodonovd0d3e512012-05-09 15:37:16 -03005346
Wang Xingchaoa38911a2013-05-30 22:07:11 +08005347static struct i915_power_well *hsw_pwr;
5348
5349/* Display audio driver power well request */
5350void i915_request_power_well(void)
5351{
5352 if (WARN_ON(!hsw_pwr))
5353 return;
5354
5355 spin_lock_irq(&hsw_pwr->lock);
5356 if (!hsw_pwr->count++ &&
5357 !hsw_pwr->i915_request)
5358 __intel_set_power_well(hsw_pwr->device, true);
5359 spin_unlock_irq(&hsw_pwr->lock);
5360}
5361EXPORT_SYMBOL_GPL(i915_request_power_well);
5362
5363/* Display audio driver power well release */
5364void i915_release_power_well(void)
5365{
5366 if (WARN_ON(!hsw_pwr))
5367 return;
5368
5369 spin_lock_irq(&hsw_pwr->lock);
5370 WARN_ON(!hsw_pwr->count);
5371 if (!--hsw_pwr->count &&
5372 !hsw_pwr->i915_request)
5373 __intel_set_power_well(hsw_pwr->device, false);
5374 spin_unlock_irq(&hsw_pwr->lock);
5375}
5376EXPORT_SYMBOL_GPL(i915_release_power_well);
5377
5378int i915_init_power_well(struct drm_device *dev)
5379{
5380 struct drm_i915_private *dev_priv = dev->dev_private;
5381
5382 hsw_pwr = &dev_priv->power_well;
5383
5384 hsw_pwr->device = dev;
5385 spin_lock_init(&hsw_pwr->lock);
5386 hsw_pwr->count = 0;
5387
5388 return 0;
5389}
5390
5391void i915_remove_power_well(struct drm_device *dev)
5392{
5393 hsw_pwr = NULL;
5394}
5395
5396void intel_set_power_well(struct drm_device *dev, bool enable)
5397{
5398 struct drm_i915_private *dev_priv = dev->dev_private;
5399 struct i915_power_well *power_well = &dev_priv->power_well;
5400
5401 if (!HAS_POWER_WELL(dev))
5402 return;
5403
5404 if (!i915_disable_power_well && !enable)
5405 return;
5406
5407 spin_lock_irq(&power_well->lock);
5408 power_well->i915_request = enable;
5409
5410 /* only reject "disable" power well request */
5411 if (power_well->count && !enable) {
5412 spin_unlock_irq(&power_well->lock);
5413 return;
5414 }
5415
5416 __intel_set_power_well(dev, enable);
5417 spin_unlock_irq(&power_well->lock);
5418}
5419
Paulo Zanonifa42e232013-01-25 16:59:11 -02005420/*
5421 * Starting with Haswell, we have a "Power Down Well" that can be turned off
5422 * when not needed anymore. We have 4 registers that can request the power well
5423 * to be enabled, and it will only be disabled if none of the registers is
5424 * requesting it to be enabled.
5425 */
5426void intel_init_power_well(struct drm_device *dev)
5427{
5428 struct drm_i915_private *dev_priv = dev->dev_private;
Eugeni Dodonovd0d3e512012-05-09 15:37:16 -03005429
Paulo Zanoni86d52df2013-03-06 20:03:18 -03005430 if (!HAS_POWER_WELL(dev))
Eugeni Dodonovd0d3e512012-05-09 15:37:16 -03005431 return;
5432
Paulo Zanonifa42e232013-01-25 16:59:11 -02005433 /* For now, we need the power well to be always enabled. */
5434 intel_set_power_well(dev, true);
Eugeni Dodonovd0d3e512012-05-09 15:37:16 -03005435
Paulo Zanonifa42e232013-01-25 16:59:11 -02005436 /* We're taking over the BIOS, so clear any requests made by it since
5437 * the driver is in charge now. */
Paulo Zanoni6aedd1f2013-08-02 16:22:25 -03005438 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
Paulo Zanonifa42e232013-01-25 16:59:11 -02005439 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
Eugeni Dodonovd0d3e512012-05-09 15:37:16 -03005440}
5441
Paulo Zanonic67a4702013-08-19 13:18:09 -03005442/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
5443void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
5444{
5445 hsw_disable_package_c8(dev_priv);
5446}
5447
5448void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
5449{
5450 hsw_enable_package_c8(dev_priv);
5451}
5452
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005453/* Set up chip specific power management-related functions */
5454void intel_init_pm(struct drm_device *dev)
5455{
5456 struct drm_i915_private *dev_priv = dev->dev_private;
5457
5458 if (I915_HAS_FBC(dev)) {
5459 if (HAS_PCH_SPLIT(dev)) {
5460 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
Rodrigo Vivi891348b2013-05-06 19:37:36 -03005461 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
Rodrigo Viviabe959c2013-05-06 19:37:33 -03005462 dev_priv->display.enable_fbc =
5463 gen7_enable_fbc;
5464 else
5465 dev_priv->display.enable_fbc =
5466 ironlake_enable_fbc;
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005467 dev_priv->display.disable_fbc = ironlake_disable_fbc;
5468 } else if (IS_GM45(dev)) {
5469 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5470 dev_priv->display.enable_fbc = g4x_enable_fbc;
5471 dev_priv->display.disable_fbc = g4x_disable_fbc;
5472 } else if (IS_CRESTLINE(dev)) {
5473 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
5474 dev_priv->display.enable_fbc = i8xx_enable_fbc;
5475 dev_priv->display.disable_fbc = i8xx_disable_fbc;
5476 }
5477 /* 855GM needs testing */
5478 }
5479
Daniel Vetterc921aba2012-04-26 23:28:17 +02005480 /* For cxsr */
5481 if (IS_PINEVIEW(dev))
5482 i915_pineview_get_mem_freq(dev);
5483 else if (IS_GEN5(dev))
5484 i915_ironlake_get_mem_freq(dev);
5485
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005486 /* For FIFO watermark updates */
5487 if (HAS_PCH_SPLIT(dev)) {
Ville Syrjälä53615a52013-08-01 16:18:50 +03005488 intel_setup_wm_latency(dev);
5489
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005490 if (IS_GEN5(dev)) {
Ville Syrjälä53615a52013-08-01 16:18:50 +03005491 if (dev_priv->wm.pri_latency[1] &&
5492 dev_priv->wm.spr_latency[1] &&
5493 dev_priv->wm.cur_latency[1])
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005494 dev_priv->display.update_wm = ironlake_update_wm;
5495 else {
5496 DRM_DEBUG_KMS("Failed to get proper latency. "
5497 "Disable CxSR\n");
5498 dev_priv->display.update_wm = NULL;
5499 }
5500 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
5501 } else if (IS_GEN6(dev)) {
Ville Syrjälä53615a52013-08-01 16:18:50 +03005502 if (dev_priv->wm.pri_latency[0] &&
5503 dev_priv->wm.spr_latency[0] &&
5504 dev_priv->wm.cur_latency[0]) {
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005505 dev_priv->display.update_wm = sandybridge_update_wm;
5506 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5507 } else {
5508 DRM_DEBUG_KMS("Failed to read display plane latency. "
5509 "Disable CxSR\n");
5510 dev_priv->display.update_wm = NULL;
5511 }
5512 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
5513 } else if (IS_IVYBRIDGE(dev)) {
Ville Syrjälä53615a52013-08-01 16:18:50 +03005514 if (dev_priv->wm.pri_latency[0] &&
5515 dev_priv->wm.spr_latency[0] &&
5516 dev_priv->wm.cur_latency[0]) {
Chris Wilsonc43d0182012-12-11 12:01:42 +00005517 dev_priv->display.update_wm = ivybridge_update_wm;
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005518 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5519 } else {
5520 DRM_DEBUG_KMS("Failed to read display plane latency. "
5521 "Disable CxSR\n");
5522 dev_priv->display.update_wm = NULL;
5523 }
5524 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
Eugeni Dodonov6b8a5ee2012-05-09 15:37:23 -03005525 } else if (IS_HASWELL(dev)) {
Ville Syrjälä53615a52013-08-01 16:18:50 +03005526 if (dev_priv->wm.pri_latency[0] &&
5527 dev_priv->wm.spr_latency[0] &&
5528 dev_priv->wm.cur_latency[0]) {
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03005529 dev_priv->display.update_wm = haswell_update_wm;
Paulo Zanoni526682e2013-05-24 11:59:18 -03005530 dev_priv->display.update_sprite_wm =
5531 haswell_update_sprite_wm;
Eugeni Dodonov6b8a5ee2012-05-09 15:37:23 -03005532 } else {
5533 DRM_DEBUG_KMS("Failed to read display plane latency. "
5534 "Disable CxSR\n");
5535 dev_priv->display.update_wm = NULL;
5536 }
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03005537 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005538 } else
5539 dev_priv->display.update_wm = NULL;
5540 } else if (IS_VALLEYVIEW(dev)) {
5541 dev_priv->display.update_wm = valleyview_update_wm;
5542 dev_priv->display.init_clock_gating =
5543 valleyview_init_clock_gating;
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03005544 } else if (IS_PINEVIEW(dev)) {
5545 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
5546 dev_priv->is_ddr3,
5547 dev_priv->fsb_freq,
5548 dev_priv->mem_freq)) {
5549 DRM_INFO("failed to find known CxSR latency "
5550 "(found ddr%s fsb freq %d, mem freq %d), "
5551 "disabling CxSR\n",
5552 (dev_priv->is_ddr3 == 1) ? "3" : "2",
5553 dev_priv->fsb_freq, dev_priv->mem_freq);
5554 /* Disable CxSR and never update its watermark again */
5555 pineview_disable_cxsr(dev);
5556 dev_priv->display.update_wm = NULL;
5557 } else
5558 dev_priv->display.update_wm = pineview_update_wm;
5559 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
5560 } else if (IS_G4X(dev)) {
5561 dev_priv->display.update_wm = g4x_update_wm;
5562 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
5563 } else if (IS_GEN4(dev)) {
5564 dev_priv->display.update_wm = i965_update_wm;
5565 if (IS_CRESTLINE(dev))
5566 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
5567 else if (IS_BROADWATER(dev))
5568 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
5569 } else if (IS_GEN3(dev)) {
5570 dev_priv->display.update_wm = i9xx_update_wm;
5571 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
5572 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
5573 } else if (IS_I865G(dev)) {
5574 dev_priv->display.update_wm = i830_update_wm;
5575 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5576 dev_priv->display.get_fifo_size = i830_get_fifo_size;
5577 } else if (IS_I85X(dev)) {
5578 dev_priv->display.update_wm = i9xx_update_wm;
5579 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
5580 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5581 } else {
5582 dev_priv->display.update_wm = i830_update_wm;
5583 dev_priv->display.init_clock_gating = i830_init_clock_gating;
5584 if (IS_845G(dev))
5585 dev_priv->display.get_fifo_size = i845_get_fifo_size;
5586 else
5587 dev_priv->display.get_fifo_size = i830_get_fifo_size;
5588 }
5589}
5590
Ben Widawsky42c05262012-09-26 10:34:00 -07005591int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
5592{
Jesse Barnes4fc688c2012-11-02 11:14:01 -07005593 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Ben Widawsky42c05262012-09-26 10:34:00 -07005594
5595 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
5596 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
5597 return -EAGAIN;
5598 }
5599
5600 I915_WRITE(GEN6_PCODE_DATA, *val);
5601 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
5602
5603 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
5604 500)) {
5605 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
5606 return -ETIMEDOUT;
5607 }
5608
5609 *val = I915_READ(GEN6_PCODE_DATA);
5610 I915_WRITE(GEN6_PCODE_DATA, 0);
5611
5612 return 0;
5613}
5614
5615int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
5616{
Jesse Barnes4fc688c2012-11-02 11:14:01 -07005617 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Ben Widawsky42c05262012-09-26 10:34:00 -07005618
5619 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
5620 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
5621 return -EAGAIN;
5622 }
5623
5624 I915_WRITE(GEN6_PCODE_DATA, val);
5625 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
5626
5627 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
5628 500)) {
5629 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
5630 return -ETIMEDOUT;
5631 }
5632
5633 I915_WRITE(GEN6_PCODE_DATA, 0);
5634
5635 return 0;
5636}
Jesse Barnesa0e4e192013-04-02 11:23:05 -07005637
Jesse Barnes855ba3b2013-04-17 15:54:57 -07005638int vlv_gpu_freq(int ddr_freq, int val)
5639{
5640 int mult, base;
5641
5642 switch (ddr_freq) {
5643 case 800:
5644 mult = 20;
5645 base = 120;
5646 break;
5647 case 1066:
5648 mult = 22;
5649 base = 133;
5650 break;
5651 case 1333:
5652 mult = 21;
5653 base = 125;
5654 break;
5655 default:
5656 return -1;
5657 }
5658
5659 return ((val - 0xbd) * mult) + base;
5660}
5661
5662int vlv_freq_opcode(int ddr_freq, int val)
5663{
5664 int mult, base;
5665
5666 switch (ddr_freq) {
5667 case 800:
5668 mult = 20;
5669 base = 120;
5670 break;
5671 case 1066:
5672 mult = 22;
5673 base = 133;
5674 break;
5675 case 1333:
5676 mult = 21;
5677 base = 125;
5678 break;
5679 default:
5680 return -1;
5681 }
5682
5683 val /= mult;
5684 val -= base / mult;
5685 val += 0xbd;
5686
5687 if (val > 0xea)
5688 val = 0xea;
5689
5690 return val;
5691}
5692
Chris Wilson907b28c2013-07-19 20:36:52 +01005693void intel_pm_init(struct drm_device *dev)
5694{
5695 struct drm_i915_private *dev_priv = dev->dev_private;
5696
5697 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5698 intel_gen6_powersave_work);
5699}
5700