blob: 3a48c4f21f1181144cf3bfa8cec51a381445173e [file] [log] [blame]
Eugeni Dodonov85208be2012-04-16 22:20:34 -03001/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -030028#include <linux/cpufreq.h>
Eugeni Dodonov85208be2012-04-16 22:20:34 -030029#include "i915_drv.h"
30#include "intel_drv.h"
Daniel Vettereb48eb02012-04-26 23:28:12 +020031#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h>
Eugeni Dodonov85208be2012-04-16 22:20:34 -030033
Ben Widawskydc39fff2013-10-18 12:32:07 -070034/**
35 * RC6 is a special power stage which allows the GPU to enter an very
36 * low-voltage mode when idle, using down to 0V while at this stage. This
37 * stage is entered automatically when the GPU is idle when RC6 support is
38 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
39 *
40 * There are different RC6 modes available in Intel GPU, which differentiate
41 * among each other with the latency required to enter and leave RC6 and
42 * voltage consumed by the GPU in different states.
43 *
44 * The combination of the following flags define which states GPU is allowed
45 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
46 * RC6pp is deepest RC6. Their support by hardware varies according to the
47 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
48 * which brings the most power savings; deeper states save more power, but
49 * require higher latency to switch to and wake up.
50 */
51#define INTEL_RC6_ENABLE (1<<0)
52#define INTEL_RC6p_ENABLE (1<<1)
53#define INTEL_RC6pp_ENABLE (1<<2)
54
Damien Lespiauda2078c2013-02-13 15:27:27 +000055static void gen9_init_clock_gating(struct drm_device *dev)
56{
Damien Lespiauacd5c342014-03-26 16:55:46 +000057 struct drm_i915_private *dev_priv = dev->dev_private;
58
Damien Lespiau77719d22015-02-09 19:33:13 +000059 /* WaEnableLbsSlaRetryTimerDecrement:skl */
60 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
61 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
Nick Hoath6381b552015-07-14 14:41:15 +010062
63 /* WaDisableKillLogic:bxt,skl */
64 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
65 ECOCHK_DIS_TLB);
Damien Lespiau77719d22015-02-09 19:33:13 +000066}
Damien Lespiau91e41d12014-03-26 17:42:50 +000067
Damien Lespiau45db2192015-02-09 19:33:09 +000068static void skl_init_clock_gating(struct drm_device *dev)
Damien Lespiauda2078c2013-02-13 15:27:27 +000069{
Damien Lespiauacd5c342014-03-26 16:55:46 +000070 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiau3ca5da42014-03-26 18:18:01 +000071
Damien Lespiau77719d22015-02-09 19:33:13 +000072 gen9_init_clock_gating(dev);
73
Damien Lespiau669506e2015-02-26 18:20:38 +000074 if (INTEL_REVID(dev) <= SKL_REVID_B0) {
Hoath, Nicholas3dcd0202015-02-05 10:47:21 +000075 /*
76 * WaDisableSDEUnitClockGating:skl
Damien Lespiau9253c2e2015-02-09 19:33:10 +000077 * WaSetGAPSunitClckGateDisable:skl
Hoath, Nicholas3dcd0202015-02-05 10:47:21 +000078 */
79 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
Damien Lespiau9253c2e2015-02-09 19:33:10 +000080 GEN8_GAPSUNIT_CLOCK_GATE_DISABLE |
Hoath, Nicholas3dcd0202015-02-05 10:47:21 +000081 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
Damien Lespiauf9fc42f2015-02-26 18:20:39 +000082
83 /* WaDisableVFUnitClockGating:skl */
84 I915_WRITE(GEN6_UCGCTL2, I915_READ(GEN6_UCGCTL2) |
85 GEN6_VFUNIT_CLOCK_GATE_DISABLE);
Hoath, Nicholas3dcd0202015-02-05 10:47:21 +000086 }
Damien Lespiau8bc0ccf2015-02-09 19:33:18 +000087
Damien Lespiau2caa3b22015-02-09 19:33:20 +000088 if (INTEL_REVID(dev) <= SKL_REVID_D0) {
Damien Lespiau81e231a2015-02-09 19:33:19 +000089 /* WaDisableHDCInvalidation:skl */
90 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
91 BDW_DISABLE_HDC_INVALIDATION);
92
Damien Lespiau2caa3b22015-02-09 19:33:20 +000093 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
94 I915_WRITE(FF_SLICE_CS_CHICKEN2,
Damien Lespiauf1d3d342015-05-06 14:36:27 +010095 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
Damien Lespiau2caa3b22015-02-09 19:33:20 +000096 }
Damien Lespiau81e231a2015-02-09 19:33:19 +000097
Damien Lespiau8bc0ccf2015-02-09 19:33:18 +000098 if (INTEL_REVID(dev) <= SKL_REVID_E0)
99 /* WaDisableLSQCROPERFforOCL:skl */
100 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
101 GEN8_LQSC_RO_PERF_DIS);
Damien Lespiauda2078c2013-02-13 15:27:27 +0000102}
103
Imre Deaka82abe42015-03-27 14:00:04 +0200104static void bxt_init_clock_gating(struct drm_device *dev)
105{
Imre Deak32608ca2015-03-11 11:10:27 +0200106 struct drm_i915_private *dev_priv = dev->dev_private;
107
Imre Deaka82abe42015-03-27 14:00:04 +0200108 gen9_init_clock_gating(dev);
Imre Deak32608ca2015-03-11 11:10:27 +0200109
110 /*
111 * FIXME:
112 * GEN8_SDEUNIT_CLOCK_GATE_DISABLE applies on A0 only.
Ben Widawsky868434c2015-03-11 10:49:32 +0200113 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
Imre Deak32608ca2015-03-11 11:10:27 +0200114 */
115 /* WaDisableSDEUnitClockGating:bxt */
116 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
Ben Widawsky868434c2015-03-11 10:49:32 +0200117 GEN8_SDEUNIT_CLOCK_GATE_DISABLE |
118 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
Imre Deak32608ca2015-03-11 11:10:27 +0200119
Robert Beckette3a29052015-03-11 10:28:25 +0200120 /* FIXME: apply on A0 only */
121 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
Imre Deaka82abe42015-03-27 14:00:04 +0200122}
123
Daniel Vetterc921aba2012-04-26 23:28:17 +0200124static void i915_pineview_get_mem_freq(struct drm_device *dev)
125{
Jani Nikula50227e12014-03-31 14:27:21 +0300126 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200127 u32 tmp;
128
129 tmp = I915_READ(CLKCFG);
130
131 switch (tmp & CLKCFG_FSB_MASK) {
132 case CLKCFG_FSB_533:
133 dev_priv->fsb_freq = 533; /* 133*4 */
134 break;
135 case CLKCFG_FSB_800:
136 dev_priv->fsb_freq = 800; /* 200*4 */
137 break;
138 case CLKCFG_FSB_667:
139 dev_priv->fsb_freq = 667; /* 167*4 */
140 break;
141 case CLKCFG_FSB_400:
142 dev_priv->fsb_freq = 400; /* 100*4 */
143 break;
144 }
145
146 switch (tmp & CLKCFG_MEM_MASK) {
147 case CLKCFG_MEM_533:
148 dev_priv->mem_freq = 533;
149 break;
150 case CLKCFG_MEM_667:
151 dev_priv->mem_freq = 667;
152 break;
153 case CLKCFG_MEM_800:
154 dev_priv->mem_freq = 800;
155 break;
156 }
157
158 /* detect pineview DDR3 setting */
159 tmp = I915_READ(CSHRDDR3CTL);
160 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
161}
162
163static void i915_ironlake_get_mem_freq(struct drm_device *dev)
164{
Jani Nikula50227e12014-03-31 14:27:21 +0300165 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200166 u16 ddrpll, csipll;
167
168 ddrpll = I915_READ16(DDRMPLL1);
169 csipll = I915_READ16(CSIPLL0);
170
171 switch (ddrpll & 0xff) {
172 case 0xc:
173 dev_priv->mem_freq = 800;
174 break;
175 case 0x10:
176 dev_priv->mem_freq = 1066;
177 break;
178 case 0x14:
179 dev_priv->mem_freq = 1333;
180 break;
181 case 0x18:
182 dev_priv->mem_freq = 1600;
183 break;
184 default:
185 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
186 ddrpll & 0xff);
187 dev_priv->mem_freq = 0;
188 break;
189 }
190
Daniel Vetter20e4d402012-08-08 23:35:39 +0200191 dev_priv->ips.r_t = dev_priv->mem_freq;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200192
193 switch (csipll & 0x3ff) {
194 case 0x00c:
195 dev_priv->fsb_freq = 3200;
196 break;
197 case 0x00e:
198 dev_priv->fsb_freq = 3733;
199 break;
200 case 0x010:
201 dev_priv->fsb_freq = 4266;
202 break;
203 case 0x012:
204 dev_priv->fsb_freq = 4800;
205 break;
206 case 0x014:
207 dev_priv->fsb_freq = 5333;
208 break;
209 case 0x016:
210 dev_priv->fsb_freq = 5866;
211 break;
212 case 0x018:
213 dev_priv->fsb_freq = 6400;
214 break;
215 default:
216 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
217 csipll & 0x3ff);
218 dev_priv->fsb_freq = 0;
219 break;
220 }
221
222 if (dev_priv->fsb_freq == 3200) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200223 dev_priv->ips.c_m = 0;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200224 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200225 dev_priv->ips.c_m = 1;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200226 } else {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200227 dev_priv->ips.c_m = 2;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200228 }
229}
230
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300231static const struct cxsr_latency cxsr_latency_table[] = {
232 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
233 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
234 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
235 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
236 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
237
238 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
239 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
240 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
241 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
242 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
243
244 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
245 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
246 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
247 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
248 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
249
250 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
251 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
252 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
253 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
254 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
255
256 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
257 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
258 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
259 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
260 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
261
262 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
263 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
264 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
265 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
266 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
267};
268
Daniel Vetter63c62272012-04-21 23:17:55 +0200269static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300270 int is_ddr3,
271 int fsb,
272 int mem)
273{
274 const struct cxsr_latency *latency;
275 int i;
276
277 if (fsb == 0 || mem == 0)
278 return NULL;
279
280 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
281 latency = &cxsr_latency_table[i];
282 if (is_desktop == latency->is_desktop &&
283 is_ddr3 == latency->is_ddr3 &&
284 fsb == latency->fsb_freq && mem == latency->mem_freq)
285 return latency;
286 }
287
288 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
289
290 return NULL;
291}
292
Ville Syrjäläfc1ac8d2015-03-05 21:19:52 +0200293static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
294{
295 u32 val;
296
297 mutex_lock(&dev_priv->rps.hw_lock);
298
299 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
300 if (enable)
301 val &= ~FORCE_DDR_HIGH_FREQ;
302 else
303 val |= FORCE_DDR_HIGH_FREQ;
304 val &= ~FORCE_DDR_LOW_FREQ;
305 val |= FORCE_DDR_FREQ_REQ_ACK;
306 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
307
308 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
309 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
310 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
311
312 mutex_unlock(&dev_priv->rps.hw_lock);
313}
314
Ville Syrjäläcfb41412015-03-05 21:19:51 +0200315static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
316{
317 u32 val;
318
319 mutex_lock(&dev_priv->rps.hw_lock);
320
321 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
322 if (enable)
323 val |= DSP_MAXFIFO_PM5_ENABLE;
324 else
325 val &= ~DSP_MAXFIFO_PM5_ENABLE;
326 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
327
328 mutex_unlock(&dev_priv->rps.hw_lock);
329}
330
Ville Syrjäläf4998962015-03-10 17:02:21 +0200331#define FW_WM(value, plane) \
332 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
333
Imre Deak5209b1f2014-07-01 12:36:17 +0300334void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300335{
Imre Deak5209b1f2014-07-01 12:36:17 +0300336 struct drm_device *dev = dev_priv->dev;
337 u32 val;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300338
Imre Deak5209b1f2014-07-01 12:36:17 +0300339 if (IS_VALLEYVIEW(dev)) {
340 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
Ville Syrjäläa7a6c492015-06-24 22:00:01 +0300341 POSTING_READ(FW_BLC_SELF_VLV);
Ville Syrjälä852eb002015-06-24 22:00:07 +0300342 dev_priv->wm.vlv.cxsr = enable;
Imre Deak5209b1f2014-07-01 12:36:17 +0300343 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
344 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
Ville Syrjäläa7a6c492015-06-24 22:00:01 +0300345 POSTING_READ(FW_BLC_SELF);
Imre Deak5209b1f2014-07-01 12:36:17 +0300346 } else if (IS_PINEVIEW(dev)) {
347 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
348 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
349 I915_WRITE(DSPFW3, val);
Ville Syrjäläa7a6c492015-06-24 22:00:01 +0300350 POSTING_READ(DSPFW3);
Imre Deak5209b1f2014-07-01 12:36:17 +0300351 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
352 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
353 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
354 I915_WRITE(FW_BLC_SELF, val);
Ville Syrjäläa7a6c492015-06-24 22:00:01 +0300355 POSTING_READ(FW_BLC_SELF);
Imre Deak5209b1f2014-07-01 12:36:17 +0300356 } else if (IS_I915GM(dev)) {
357 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
358 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
359 I915_WRITE(INSTPM, val);
Ville Syrjäläa7a6c492015-06-24 22:00:01 +0300360 POSTING_READ(INSTPM);
Imre Deak5209b1f2014-07-01 12:36:17 +0300361 } else {
362 return;
363 }
364
365 DRM_DEBUG_KMS("memory self-refresh is %s\n",
366 enable ? "enabled" : "disabled");
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300367}
368
Ville Syrjäläfc1ac8d2015-03-05 21:19:52 +0200369
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300370/*
371 * Latency for FIFO fetches is dependent on several factors:
372 * - memory configuration (speed, channels)
373 * - chipset
374 * - current MCH state
375 * It can be fairly high in some situations, so here we assume a fairly
376 * pessimal value. It's a tradeoff between extra memory fetches (if we
377 * set this value too high, the FIFO will fetch frequently to stay full)
378 * and power consumption (set it too low to save power and we might see
379 * FIFO underruns and display "flicker").
380 *
381 * A value of 5us seems to be a good balance; safe for very low end
382 * platforms but not overly aggressive on lower latency configs.
383 */
Chris Wilson5aef6002014-09-03 11:56:07 +0100384static const int pessimal_latency_ns = 5000;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300385
Ville Syrjäläb5004722015-03-05 21:19:47 +0200386#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
387 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
388
389static int vlv_get_fifo_size(struct drm_device *dev,
390 enum pipe pipe, int plane)
391{
392 struct drm_i915_private *dev_priv = dev->dev_private;
393 int sprite0_start, sprite1_start, size;
394
395 switch (pipe) {
396 uint32_t dsparb, dsparb2, dsparb3;
397 case PIPE_A:
398 dsparb = I915_READ(DSPARB);
399 dsparb2 = I915_READ(DSPARB2);
400 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
401 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
402 break;
403 case PIPE_B:
404 dsparb = I915_READ(DSPARB);
405 dsparb2 = I915_READ(DSPARB2);
406 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
407 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
408 break;
409 case PIPE_C:
410 dsparb2 = I915_READ(DSPARB2);
411 dsparb3 = I915_READ(DSPARB3);
412 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
413 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
414 break;
415 default:
416 return 0;
417 }
418
419 switch (plane) {
420 case 0:
421 size = sprite0_start;
422 break;
423 case 1:
424 size = sprite1_start - sprite0_start;
425 break;
426 case 2:
427 size = 512 - 1 - sprite1_start;
428 break;
429 default:
430 return 0;
431 }
432
433 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
434 pipe_name(pipe), plane == 0 ? "primary" : "sprite",
435 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
436 size);
437
438 return size;
439}
440
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300441static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300442{
443 struct drm_i915_private *dev_priv = dev->dev_private;
444 uint32_t dsparb = I915_READ(DSPARB);
445 int size;
446
447 size = dsparb & 0x7f;
448 if (plane)
449 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
450
451 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
452 plane ? "B" : "A", size);
453
454 return size;
455}
456
Daniel Vetterfeb56b92013-12-14 20:38:30 -0200457static int i830_get_fifo_size(struct drm_device *dev, int plane)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300458{
459 struct drm_i915_private *dev_priv = dev->dev_private;
460 uint32_t dsparb = I915_READ(DSPARB);
461 int size;
462
463 size = dsparb & 0x1ff;
464 if (plane)
465 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
466 size >>= 1; /* Convert to cachelines */
467
468 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
469 plane ? "B" : "A", size);
470
471 return size;
472}
473
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300474static int i845_get_fifo_size(struct drm_device *dev, int plane)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300475{
476 struct drm_i915_private *dev_priv = dev->dev_private;
477 uint32_t dsparb = I915_READ(DSPARB);
478 int size;
479
480 size = dsparb & 0x7f;
481 size >>= 2; /* Convert to cachelines */
482
483 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
484 plane ? "B" : "A",
485 size);
486
487 return size;
488}
489
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300490/* Pineview has different values for various configs */
491static const struct intel_watermark_params pineview_display_wm = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300492 .fifo_size = PINEVIEW_DISPLAY_FIFO,
493 .max_wm = PINEVIEW_MAX_WM,
494 .default_wm = PINEVIEW_DFT_WM,
495 .guard_size = PINEVIEW_GUARD_WM,
496 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300497};
498static const struct intel_watermark_params pineview_display_hplloff_wm = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300499 .fifo_size = PINEVIEW_DISPLAY_FIFO,
500 .max_wm = PINEVIEW_MAX_WM,
501 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
502 .guard_size = PINEVIEW_GUARD_WM,
503 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300504};
505static const struct intel_watermark_params pineview_cursor_wm = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300506 .fifo_size = PINEVIEW_CURSOR_FIFO,
507 .max_wm = PINEVIEW_CURSOR_MAX_WM,
508 .default_wm = PINEVIEW_CURSOR_DFT_WM,
509 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
510 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300511};
512static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300513 .fifo_size = PINEVIEW_CURSOR_FIFO,
514 .max_wm = PINEVIEW_CURSOR_MAX_WM,
515 .default_wm = PINEVIEW_CURSOR_DFT_WM,
516 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
517 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300518};
519static const struct intel_watermark_params g4x_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300520 .fifo_size = G4X_FIFO_SIZE,
521 .max_wm = G4X_MAX_WM,
522 .default_wm = G4X_MAX_WM,
523 .guard_size = 2,
524 .cacheline_size = G4X_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300525};
526static const struct intel_watermark_params g4x_cursor_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300527 .fifo_size = I965_CURSOR_FIFO,
528 .max_wm = I965_CURSOR_MAX_WM,
529 .default_wm = I965_CURSOR_DFT_WM,
530 .guard_size = 2,
531 .cacheline_size = G4X_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300532};
533static const struct intel_watermark_params valleyview_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300534 .fifo_size = VALLEYVIEW_FIFO_SIZE,
535 .max_wm = VALLEYVIEW_MAX_WM,
536 .default_wm = VALLEYVIEW_MAX_WM,
537 .guard_size = 2,
538 .cacheline_size = G4X_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300539};
540static const struct intel_watermark_params valleyview_cursor_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300541 .fifo_size = I965_CURSOR_FIFO,
542 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
543 .default_wm = I965_CURSOR_DFT_WM,
544 .guard_size = 2,
545 .cacheline_size = G4X_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300546};
547static const struct intel_watermark_params i965_cursor_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300548 .fifo_size = I965_CURSOR_FIFO,
549 .max_wm = I965_CURSOR_MAX_WM,
550 .default_wm = I965_CURSOR_DFT_WM,
551 .guard_size = 2,
552 .cacheline_size = I915_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300553};
554static const struct intel_watermark_params i945_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300555 .fifo_size = I945_FIFO_SIZE,
556 .max_wm = I915_MAX_WM,
557 .default_wm = 1,
558 .guard_size = 2,
559 .cacheline_size = I915_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300560};
561static const struct intel_watermark_params i915_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300562 .fifo_size = I915_FIFO_SIZE,
563 .max_wm = I915_MAX_WM,
564 .default_wm = 1,
565 .guard_size = 2,
566 .cacheline_size = I915_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300567};
Ville Syrjälä9d539102014-08-15 01:21:53 +0300568static const struct intel_watermark_params i830_a_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300569 .fifo_size = I855GM_FIFO_SIZE,
570 .max_wm = I915_MAX_WM,
571 .default_wm = 1,
572 .guard_size = 2,
573 .cacheline_size = I830_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300574};
Ville Syrjälä9d539102014-08-15 01:21:53 +0300575static const struct intel_watermark_params i830_bc_wm_info = {
576 .fifo_size = I855GM_FIFO_SIZE,
577 .max_wm = I915_MAX_WM/2,
578 .default_wm = 1,
579 .guard_size = 2,
580 .cacheline_size = I830_FIFO_LINE_SIZE,
581};
Daniel Vetterfeb56b92013-12-14 20:38:30 -0200582static const struct intel_watermark_params i845_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300583 .fifo_size = I830_FIFO_SIZE,
584 .max_wm = I915_MAX_WM,
585 .default_wm = 1,
586 .guard_size = 2,
587 .cacheline_size = I830_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300588};
589
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300590/**
591 * intel_calculate_wm - calculate watermark level
592 * @clock_in_khz: pixel clock
593 * @wm: chip FIFO params
594 * @pixel_size: display pixel size
595 * @latency_ns: memory latency for the platform
596 *
597 * Calculate the watermark level (the level at which the display plane will
598 * start fetching from memory again). Each chip has a different display
599 * FIFO size and allocation, so the caller needs to figure that out and pass
600 * in the correct intel_watermark_params structure.
601 *
602 * As the pixel clock runs, the FIFO will be drained at a rate that depends
603 * on the pixel size. When it reaches the watermark level, it'll start
604 * fetching FIFO line sized based chunks from memory until the FIFO fills
605 * past the watermark point. If the FIFO drains completely, a FIFO underrun
606 * will occur, and a display engine hang could result.
607 */
608static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
609 const struct intel_watermark_params *wm,
610 int fifo_size,
611 int pixel_size,
612 unsigned long latency_ns)
613{
614 long entries_required, wm_size;
615
616 /*
617 * Note: we need to make sure we don't overflow for various clock &
618 * latency values.
619 * clocks go from a few thousand to several hundred thousand.
620 * latency is usually a few thousand
621 */
622 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
623 1000;
624 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
625
626 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
627
628 wm_size = fifo_size - (entries_required + wm->guard_size);
629
630 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
631
632 /* Don't promote wm_size to unsigned... */
633 if (wm_size > (long)wm->max_wm)
634 wm_size = wm->max_wm;
635 if (wm_size <= 0)
636 wm_size = wm->default_wm;
Ville Syrjäläd6feb192014-09-05 21:54:13 +0300637
638 /*
639 * Bspec seems to indicate that the value shouldn't be lower than
640 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
641 * Lets go for 8 which is the burst size since certain platforms
642 * already use a hardcoded 8 (which is what the spec says should be
643 * done).
644 */
645 if (wm_size <= 8)
646 wm_size = 8;
647
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300648 return wm_size;
649}
650
651static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
652{
653 struct drm_crtc *crtc, *enabled = NULL;
654
Damien Lespiau70e1e0e2014-05-13 23:32:24 +0100655 for_each_crtc(dev, crtc) {
Chris Wilson3490ea52013-01-07 10:11:40 +0000656 if (intel_crtc_active(crtc)) {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300657 if (enabled)
658 return NULL;
659 enabled = crtc;
660 }
661 }
662
663 return enabled;
664}
665
Ville Syrjälä46ba6142013-09-10 11:40:40 +0300666static void pineview_update_wm(struct drm_crtc *unused_crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300667{
Ville Syrjälä46ba6142013-09-10 11:40:40 +0300668 struct drm_device *dev = unused_crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300669 struct drm_i915_private *dev_priv = dev->dev_private;
670 struct drm_crtc *crtc;
671 const struct cxsr_latency *latency;
672 u32 reg;
673 unsigned long wm;
674
675 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
676 dev_priv->fsb_freq, dev_priv->mem_freq);
677 if (!latency) {
678 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
Imre Deak5209b1f2014-07-01 12:36:17 +0300679 intel_set_memory_cxsr(dev_priv, false);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300680 return;
681 }
682
683 crtc = single_enabled_crtc(dev);
684 if (crtc) {
Damien Lespiau241bfc32013-09-25 16:45:37 +0100685 const struct drm_display_mode *adjusted_mode;
Matt Roper59bea882015-02-27 10:12:01 -0800686 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
Damien Lespiau241bfc32013-09-25 16:45:37 +0100687 int clock;
688
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +0200689 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +0100690 clock = adjusted_mode->crtc_clock;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300691
692 /* Display SR */
693 wm = intel_calculate_wm(clock, &pineview_display_wm,
694 pineview_display_wm.fifo_size,
695 pixel_size, latency->display_sr);
696 reg = I915_READ(DSPFW1);
697 reg &= ~DSPFW_SR_MASK;
Ville Syrjäläf4998962015-03-10 17:02:21 +0200698 reg |= FW_WM(wm, SR);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300699 I915_WRITE(DSPFW1, reg);
700 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
701
702 /* cursor SR */
703 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
704 pineview_display_wm.fifo_size,
705 pixel_size, latency->cursor_sr);
706 reg = I915_READ(DSPFW3);
707 reg &= ~DSPFW_CURSOR_SR_MASK;
Ville Syrjäläf4998962015-03-10 17:02:21 +0200708 reg |= FW_WM(wm, CURSOR_SR);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300709 I915_WRITE(DSPFW3, reg);
710
711 /* Display HPLL off SR */
712 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
713 pineview_display_hplloff_wm.fifo_size,
714 pixel_size, latency->display_hpll_disable);
715 reg = I915_READ(DSPFW3);
716 reg &= ~DSPFW_HPLL_SR_MASK;
Ville Syrjäläf4998962015-03-10 17:02:21 +0200717 reg |= FW_WM(wm, HPLL_SR);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300718 I915_WRITE(DSPFW3, reg);
719
720 /* cursor HPLL off SR */
721 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
722 pineview_display_hplloff_wm.fifo_size,
723 pixel_size, latency->cursor_hpll_disable);
724 reg = I915_READ(DSPFW3);
725 reg &= ~DSPFW_HPLL_CURSOR_MASK;
Ville Syrjäläf4998962015-03-10 17:02:21 +0200726 reg |= FW_WM(wm, HPLL_CURSOR);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300727 I915_WRITE(DSPFW3, reg);
728 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
729
Imre Deak5209b1f2014-07-01 12:36:17 +0300730 intel_set_memory_cxsr(dev_priv, true);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300731 } else {
Imre Deak5209b1f2014-07-01 12:36:17 +0300732 intel_set_memory_cxsr(dev_priv, false);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300733 }
734}
735
736static bool g4x_compute_wm0(struct drm_device *dev,
737 int plane,
738 const struct intel_watermark_params *display,
739 int display_latency_ns,
740 const struct intel_watermark_params *cursor,
741 int cursor_latency_ns,
742 int *plane_wm,
743 int *cursor_wm)
744{
745 struct drm_crtc *crtc;
Ville Syrjälä4fe85902013-09-04 18:25:22 +0300746 const struct drm_display_mode *adjusted_mode;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300747 int htotal, hdisplay, clock, pixel_size;
748 int line_time_us, line_count;
749 int entries, tlb_miss;
750
751 crtc = intel_get_crtc_for_plane(dev, plane);
Chris Wilson3490ea52013-01-07 10:11:40 +0000752 if (!intel_crtc_active(crtc)) {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300753 *cursor_wm = cursor->guard_size;
754 *plane_wm = display->guard_size;
755 return false;
756 }
757
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +0200758 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +0100759 clock = adjusted_mode->crtc_clock;
Jesse Barnesfec8cba2013-11-27 11:10:26 -0800760 htotal = adjusted_mode->crtc_htotal;
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +0200761 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
Matt Roper59bea882015-02-27 10:12:01 -0800762 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300763
764 /* Use the small buffer method to calculate plane watermark */
765 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
766 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
767 if (tlb_miss > 0)
768 entries += tlb_miss;
769 entries = DIV_ROUND_UP(entries, display->cacheline_size);
770 *plane_wm = entries + display->guard_size;
771 if (*plane_wm > (int)display->max_wm)
772 *plane_wm = display->max_wm;
773
774 /* Use the large buffer method to calculate cursor watermark */
Ville Syrjälä922044c2014-02-14 14:18:57 +0200775 line_time_us = max(htotal * 1000 / clock, 1);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300776 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
Matt Roper3dd512f2015-02-27 10:12:00 -0800777 entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300778 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
779 if (tlb_miss > 0)
780 entries += tlb_miss;
781 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
782 *cursor_wm = entries + cursor->guard_size;
783 if (*cursor_wm > (int)cursor->max_wm)
784 *cursor_wm = (int)cursor->max_wm;
785
786 return true;
787}
788
789/*
790 * Check the wm result.
791 *
792 * If any calculated watermark values is larger than the maximum value that
793 * can be programmed into the associated watermark register, that watermark
794 * must be disabled.
795 */
796static bool g4x_check_srwm(struct drm_device *dev,
797 int display_wm, int cursor_wm,
798 const struct intel_watermark_params *display,
799 const struct intel_watermark_params *cursor)
800{
801 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
802 display_wm, cursor_wm);
803
804 if (display_wm > display->max_wm) {
805 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
806 display_wm, display->max_wm);
807 return false;
808 }
809
810 if (cursor_wm > cursor->max_wm) {
811 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
812 cursor_wm, cursor->max_wm);
813 return false;
814 }
815
816 if (!(display_wm || cursor_wm)) {
817 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
818 return false;
819 }
820
821 return true;
822}
823
824static bool g4x_compute_srwm(struct drm_device *dev,
825 int plane,
826 int latency_ns,
827 const struct intel_watermark_params *display,
828 const struct intel_watermark_params *cursor,
829 int *display_wm, int *cursor_wm)
830{
831 struct drm_crtc *crtc;
Ville Syrjälä4fe85902013-09-04 18:25:22 +0300832 const struct drm_display_mode *adjusted_mode;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300833 int hdisplay, htotal, pixel_size, clock;
834 unsigned long line_time_us;
835 int line_count, line_size;
836 int small, large;
837 int entries;
838
839 if (!latency_ns) {
840 *display_wm = *cursor_wm = 0;
841 return false;
842 }
843
844 crtc = intel_get_crtc_for_plane(dev, plane);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +0200845 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +0100846 clock = adjusted_mode->crtc_clock;
Jesse Barnesfec8cba2013-11-27 11:10:26 -0800847 htotal = adjusted_mode->crtc_htotal;
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +0200848 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
Matt Roper59bea882015-02-27 10:12:01 -0800849 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300850
Ville Syrjälä922044c2014-02-14 14:18:57 +0200851 line_time_us = max(htotal * 1000 / clock, 1);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300852 line_count = (latency_ns / line_time_us + 1000) / 1000;
853 line_size = hdisplay * pixel_size;
854
855 /* Use the minimum of the small and large buffer method for primary */
856 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
857 large = line_count * line_size;
858
859 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
860 *display_wm = entries + display->guard_size;
861
862 /* calculate the self-refresh watermark for display cursor */
Matt Roper3dd512f2015-02-27 10:12:00 -0800863 entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300864 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
865 *cursor_wm = entries + cursor->guard_size;
866
867 return g4x_check_srwm(dev,
868 *display_wm, *cursor_wm,
869 display, cursor);
870}
871
Ville Syrjälä15665972015-03-10 16:16:28 +0200872#define FW_WM_VLV(value, plane) \
873 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
874
Ville Syrjälä0018fda2015-03-05 21:19:45 +0200875static void vlv_write_wm_values(struct intel_crtc *crtc,
876 const struct vlv_wm_values *wm)
877{
878 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
879 enum pipe pipe = crtc->pipe;
880
881 I915_WRITE(VLV_DDL(pipe),
882 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
883 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
884 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
885 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
886
Ville Syrjäläae801522015-03-05 21:19:49 +0200887 I915_WRITE(DSPFW1,
Ville Syrjälä15665972015-03-10 16:16:28 +0200888 FW_WM(wm->sr.plane, SR) |
889 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
890 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
891 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
Ville Syrjäläae801522015-03-05 21:19:49 +0200892 I915_WRITE(DSPFW2,
Ville Syrjälä15665972015-03-10 16:16:28 +0200893 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
894 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
895 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
Ville Syrjäläae801522015-03-05 21:19:49 +0200896 I915_WRITE(DSPFW3,
Ville Syrjälä15665972015-03-10 16:16:28 +0200897 FW_WM(wm->sr.cursor, CURSOR_SR));
Ville Syrjäläae801522015-03-05 21:19:49 +0200898
899 if (IS_CHERRYVIEW(dev_priv)) {
900 I915_WRITE(DSPFW7_CHV,
Ville Syrjälä15665972015-03-10 16:16:28 +0200901 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
902 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
Ville Syrjäläae801522015-03-05 21:19:49 +0200903 I915_WRITE(DSPFW8_CHV,
Ville Syrjälä15665972015-03-10 16:16:28 +0200904 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
905 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
Ville Syrjäläae801522015-03-05 21:19:49 +0200906 I915_WRITE(DSPFW9_CHV,
Ville Syrjälä15665972015-03-10 16:16:28 +0200907 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
908 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
Ville Syrjäläae801522015-03-05 21:19:49 +0200909 I915_WRITE(DSPHOWM,
Ville Syrjälä15665972015-03-10 16:16:28 +0200910 FW_WM(wm->sr.plane >> 9, SR_HI) |
911 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
912 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
913 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
914 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
915 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
916 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
917 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
918 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
919 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
Ville Syrjäläae801522015-03-05 21:19:49 +0200920 } else {
921 I915_WRITE(DSPFW7,
Ville Syrjälä15665972015-03-10 16:16:28 +0200922 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
923 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
Ville Syrjäläae801522015-03-05 21:19:49 +0200924 I915_WRITE(DSPHOWM,
Ville Syrjälä15665972015-03-10 16:16:28 +0200925 FW_WM(wm->sr.plane >> 9, SR_HI) |
926 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
927 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
928 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
929 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
930 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
931 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
Ville Syrjäläae801522015-03-05 21:19:49 +0200932 }
933
Ville Syrjälä2cb389b2015-06-24 22:00:10 +0300934 /* zero (unused) WM1 watermarks */
935 I915_WRITE(DSPFW4, 0);
936 I915_WRITE(DSPFW5, 0);
937 I915_WRITE(DSPFW6, 0);
938 I915_WRITE(DSPHOWM1, 0);
939
Ville Syrjäläae801522015-03-05 21:19:49 +0200940 POSTING_READ(DSPFW1);
Ville Syrjälä0018fda2015-03-05 21:19:45 +0200941}
942
Ville Syrjälä15665972015-03-10 16:16:28 +0200943#undef FW_WM_VLV
944
Ville Syrjälä6eb1a682015-06-24 22:00:03 +0300945enum vlv_wm_level {
946 VLV_WM_LEVEL_PM2,
947 VLV_WM_LEVEL_PM5,
948 VLV_WM_LEVEL_DDR_DVFS,
949 CHV_WM_NUM_LEVELS,
950 VLV_WM_NUM_LEVELS = 1,
951};
952
Ville Syrjälä262cd2e2015-06-24 22:00:04 +0300953/* latency must be in 0.1us units. */
954static unsigned int vlv_wm_method2(unsigned int pixel_rate,
955 unsigned int pipe_htotal,
956 unsigned int horiz_pixels,
957 unsigned int bytes_per_pixel,
958 unsigned int latency)
959{
960 unsigned int ret;
961
962 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
963 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
964 ret = DIV_ROUND_UP(ret, 64);
965
966 return ret;
967}
968
969static void vlv_setup_wm_latency(struct drm_device *dev)
970{
971 struct drm_i915_private *dev_priv = dev->dev_private;
972
973 /* all latencies in usec */
974 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
975
976 if (IS_CHERRYVIEW(dev_priv)) {
977 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
978 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
979 }
980}
981
982static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
983 struct intel_crtc *crtc,
984 const struct intel_plane_state *state,
985 int level)
986{
987 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
988 int clock, htotal, pixel_size, width, wm;
989
990 if (dev_priv->wm.pri_latency[level] == 0)
991 return USHRT_MAX;
992
993 if (!state->visible)
994 return 0;
995
996 pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
997 clock = crtc->config->base.adjusted_mode.crtc_clock;
998 htotal = crtc->config->base.adjusted_mode.crtc_htotal;
999 width = crtc->config->pipe_src_w;
1000 if (WARN_ON(htotal == 0))
1001 htotal = 1;
1002
1003 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1004 /*
1005 * FIXME the formula gives values that are
1006 * too big for the cursor FIFO, and hence we
1007 * would never be able to use cursors. For
1008 * now just hardcode the watermark.
1009 */
1010 wm = 63;
1011 } else {
1012 wm = vlv_wm_method2(clock, htotal, width, pixel_size,
1013 dev_priv->wm.pri_latency[level] * 10);
1014 }
1015
1016 return min_t(int, wm, USHRT_MAX);
1017}
1018
Ville Syrjälä54f1b6e2015-06-24 22:00:05 +03001019static void vlv_compute_fifo(struct intel_crtc *crtc)
1020{
1021 struct drm_device *dev = crtc->base.dev;
1022 struct vlv_wm_state *wm_state = &crtc->wm_state;
1023 struct intel_plane *plane;
1024 unsigned int total_rate = 0;
1025 const int fifo_size = 512 - 1;
1026 int fifo_extra, fifo_left = fifo_size;
1027
1028 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1029 struct intel_plane_state *state =
1030 to_intel_plane_state(plane->base.state);
1031
1032 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1033 continue;
1034
1035 if (state->visible) {
1036 wm_state->num_active_planes++;
1037 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1038 }
1039 }
1040
1041 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1042 struct intel_plane_state *state =
1043 to_intel_plane_state(plane->base.state);
1044 unsigned int rate;
1045
1046 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1047 plane->wm.fifo_size = 63;
1048 continue;
1049 }
1050
1051 if (!state->visible) {
1052 plane->wm.fifo_size = 0;
1053 continue;
1054 }
1055
1056 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1057 plane->wm.fifo_size = fifo_size * rate / total_rate;
1058 fifo_left -= plane->wm.fifo_size;
1059 }
1060
1061 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1062
1063 /* spread the remainder evenly */
1064 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1065 int plane_extra;
1066
1067 if (fifo_left == 0)
1068 break;
1069
1070 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1071 continue;
1072
1073 /* give it all to the first plane if none are active */
1074 if (plane->wm.fifo_size == 0 &&
1075 wm_state->num_active_planes)
1076 continue;
1077
1078 plane_extra = min(fifo_extra, fifo_left);
1079 plane->wm.fifo_size += plane_extra;
1080 fifo_left -= plane_extra;
1081 }
1082
1083 WARN_ON(fifo_left != 0);
1084}
1085
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001086static void vlv_invert_wms(struct intel_crtc *crtc)
1087{
1088 struct vlv_wm_state *wm_state = &crtc->wm_state;
1089 int level;
1090
1091 for (level = 0; level < wm_state->num_levels; level++) {
1092 struct drm_device *dev = crtc->base.dev;
1093 const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1094 struct intel_plane *plane;
1095
1096 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
1097 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
1098
1099 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1100 switch (plane->base.type) {
1101 int sprite;
1102 case DRM_PLANE_TYPE_CURSOR:
1103 wm_state->wm[level].cursor = plane->wm.fifo_size -
1104 wm_state->wm[level].cursor;
1105 break;
1106 case DRM_PLANE_TYPE_PRIMARY:
1107 wm_state->wm[level].primary = plane->wm.fifo_size -
1108 wm_state->wm[level].primary;
1109 break;
1110 case DRM_PLANE_TYPE_OVERLAY:
1111 sprite = plane->plane;
1112 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
1113 wm_state->wm[level].sprite[sprite];
1114 break;
1115 }
1116 }
1117 }
1118}
1119
Ville Syrjälä26e1fe42015-06-24 22:00:06 +03001120static void vlv_compute_wm(struct intel_crtc *crtc)
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001121{
1122 struct drm_device *dev = crtc->base.dev;
1123 struct vlv_wm_state *wm_state = &crtc->wm_state;
1124 struct intel_plane *plane;
1125 int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1126 int level;
1127
1128 memset(wm_state, 0, sizeof(*wm_state));
1129
Ville Syrjälä852eb002015-06-24 22:00:07 +03001130 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001131 if (IS_CHERRYVIEW(dev))
1132 wm_state->num_levels = CHV_WM_NUM_LEVELS;
1133 else
1134 wm_state->num_levels = VLV_WM_NUM_LEVELS;
1135
1136 wm_state->num_active_planes = 0;
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001137
Ville Syrjälä54f1b6e2015-06-24 22:00:05 +03001138 vlv_compute_fifo(crtc);
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001139
1140 if (wm_state->num_active_planes != 1)
1141 wm_state->cxsr = false;
1142
1143 if (wm_state->cxsr) {
1144 for (level = 0; level < wm_state->num_levels; level++) {
1145 wm_state->sr[level].plane = sr_fifo_size;
1146 wm_state->sr[level].cursor = 63;
1147 }
1148 }
1149
1150 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1151 struct intel_plane_state *state =
1152 to_intel_plane_state(plane->base.state);
1153
1154 if (!state->visible)
1155 continue;
1156
1157 /* normal watermarks */
1158 for (level = 0; level < wm_state->num_levels; level++) {
1159 int wm = vlv_compute_wm_level(plane, crtc, state, level);
1160 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
1161
1162 /* hack */
1163 if (WARN_ON(level == 0 && wm > max_wm))
1164 wm = max_wm;
1165
1166 if (wm > plane->wm.fifo_size)
1167 break;
1168
1169 switch (plane->base.type) {
1170 int sprite;
1171 case DRM_PLANE_TYPE_CURSOR:
1172 wm_state->wm[level].cursor = wm;
1173 break;
1174 case DRM_PLANE_TYPE_PRIMARY:
1175 wm_state->wm[level].primary = wm;
1176 break;
1177 case DRM_PLANE_TYPE_OVERLAY:
1178 sprite = plane->plane;
1179 wm_state->wm[level].sprite[sprite] = wm;
1180 break;
1181 }
1182 }
1183
1184 wm_state->num_levels = level;
1185
1186 if (!wm_state->cxsr)
1187 continue;
1188
1189 /* maxfifo watermarks */
1190 switch (plane->base.type) {
1191 int sprite, level;
1192 case DRM_PLANE_TYPE_CURSOR:
1193 for (level = 0; level < wm_state->num_levels; level++)
1194 wm_state->sr[level].cursor =
1195 wm_state->sr[level].cursor;
1196 break;
1197 case DRM_PLANE_TYPE_PRIMARY:
1198 for (level = 0; level < wm_state->num_levels; level++)
1199 wm_state->sr[level].plane =
1200 min(wm_state->sr[level].plane,
1201 wm_state->wm[level].primary);
1202 break;
1203 case DRM_PLANE_TYPE_OVERLAY:
1204 sprite = plane->plane;
1205 for (level = 0; level < wm_state->num_levels; level++)
1206 wm_state->sr[level].plane =
1207 min(wm_state->sr[level].plane,
1208 wm_state->wm[level].sprite[sprite]);
1209 break;
1210 }
1211 }
1212
1213 /* clear any (partially) filled invalid levels */
1214 for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) {
1215 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1216 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1217 }
1218
1219 vlv_invert_wms(crtc);
1220}
1221
Ville Syrjälä54f1b6e2015-06-24 22:00:05 +03001222#define VLV_FIFO(plane, value) \
1223 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1224
1225static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
1226{
1227 struct drm_device *dev = crtc->base.dev;
1228 struct drm_i915_private *dev_priv = to_i915(dev);
1229 struct intel_plane *plane;
1230 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
1231
1232 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1233 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1234 WARN_ON(plane->wm.fifo_size != 63);
1235 continue;
1236 }
1237
1238 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
1239 sprite0_start = plane->wm.fifo_size;
1240 else if (plane->plane == 0)
1241 sprite1_start = sprite0_start + plane->wm.fifo_size;
1242 else
1243 fifo_size = sprite1_start + plane->wm.fifo_size;
1244 }
1245
1246 WARN_ON(fifo_size != 512 - 1);
1247
1248 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1249 pipe_name(crtc->pipe), sprite0_start,
1250 sprite1_start, fifo_size);
1251
1252 switch (crtc->pipe) {
1253 uint32_t dsparb, dsparb2, dsparb3;
1254 case PIPE_A:
1255 dsparb = I915_READ(DSPARB);
1256 dsparb2 = I915_READ(DSPARB2);
1257
1258 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1259 VLV_FIFO(SPRITEB, 0xff));
1260 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1261 VLV_FIFO(SPRITEB, sprite1_start));
1262
1263 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1264 VLV_FIFO(SPRITEB_HI, 0x1));
1265 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1266 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1267
1268 I915_WRITE(DSPARB, dsparb);
1269 I915_WRITE(DSPARB2, dsparb2);
1270 break;
1271 case PIPE_B:
1272 dsparb = I915_READ(DSPARB);
1273 dsparb2 = I915_READ(DSPARB2);
1274
1275 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1276 VLV_FIFO(SPRITED, 0xff));
1277 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1278 VLV_FIFO(SPRITED, sprite1_start));
1279
1280 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1281 VLV_FIFO(SPRITED_HI, 0xff));
1282 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1283 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1284
1285 I915_WRITE(DSPARB, dsparb);
1286 I915_WRITE(DSPARB2, dsparb2);
1287 break;
1288 case PIPE_C:
1289 dsparb3 = I915_READ(DSPARB3);
1290 dsparb2 = I915_READ(DSPARB2);
1291
1292 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1293 VLV_FIFO(SPRITEF, 0xff));
1294 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1295 VLV_FIFO(SPRITEF, sprite1_start));
1296
1297 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1298 VLV_FIFO(SPRITEF_HI, 0xff));
1299 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1300 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1301
1302 I915_WRITE(DSPARB3, dsparb3);
1303 I915_WRITE(DSPARB2, dsparb2);
1304 break;
1305 default:
1306 break;
1307 }
1308}
1309
1310#undef VLV_FIFO
1311
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001312static void vlv_merge_wm(struct drm_device *dev,
1313 struct vlv_wm_values *wm)
1314{
1315 struct intel_crtc *crtc;
1316 int num_active_crtcs = 0;
1317
1318 if (IS_CHERRYVIEW(dev))
1319 wm->level = VLV_WM_LEVEL_DDR_DVFS;
1320 else
1321 wm->level = VLV_WM_LEVEL_PM2;
1322 wm->cxsr = true;
1323
1324 for_each_intel_crtc(dev, crtc) {
1325 const struct vlv_wm_state *wm_state = &crtc->wm_state;
1326
1327 if (!crtc->active)
1328 continue;
1329
1330 if (!wm_state->cxsr)
1331 wm->cxsr = false;
1332
1333 num_active_crtcs++;
1334 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1335 }
1336
1337 if (num_active_crtcs != 1)
1338 wm->cxsr = false;
1339
Ville Syrjälä6f9c7842015-06-24 22:00:08 +03001340 if (num_active_crtcs > 1)
1341 wm->level = VLV_WM_LEVEL_PM2;
1342
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001343 for_each_intel_crtc(dev, crtc) {
1344 struct vlv_wm_state *wm_state = &crtc->wm_state;
1345 enum pipe pipe = crtc->pipe;
1346
1347 if (!crtc->active)
1348 continue;
1349
1350 wm->pipe[pipe] = wm_state->wm[wm->level];
1351 if (wm->cxsr)
1352 wm->sr = wm_state->sr[wm->level];
1353
1354 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
1355 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
1356 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
1357 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
1358 }
1359}
1360
1361static void vlv_update_wm(struct drm_crtc *crtc)
1362{
1363 struct drm_device *dev = crtc->dev;
1364 struct drm_i915_private *dev_priv = dev->dev_private;
1365 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1366 enum pipe pipe = intel_crtc->pipe;
1367 struct vlv_wm_values wm = {};
1368
Ville Syrjälä26e1fe42015-06-24 22:00:06 +03001369 vlv_compute_wm(intel_crtc);
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001370 vlv_merge_wm(dev, &wm);
1371
Ville Syrjälä54f1b6e2015-06-24 22:00:05 +03001372 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
1373 /* FIXME should be part of crtc atomic commit */
1374 vlv_pipe_set_fifo_size(intel_crtc);
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001375 return;
Ville Syrjälä54f1b6e2015-06-24 22:00:05 +03001376 }
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001377
1378 if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
1379 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
1380 chv_set_memory_dvfs(dev_priv, false);
1381
1382 if (wm.level < VLV_WM_LEVEL_PM5 &&
1383 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
1384 chv_set_memory_pm5(dev_priv, false);
1385
Ville Syrjälä852eb002015-06-24 22:00:07 +03001386 if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001387 intel_set_memory_cxsr(dev_priv, false);
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001388
Ville Syrjälä54f1b6e2015-06-24 22:00:05 +03001389 /* FIXME should be part of crtc atomic commit */
1390 vlv_pipe_set_fifo_size(intel_crtc);
1391
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001392 vlv_write_wm_values(intel_crtc, &wm);
1393
1394 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1395 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1396 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1397 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
1398 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
1399
Ville Syrjälä852eb002015-06-24 22:00:07 +03001400 if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001401 intel_set_memory_cxsr(dev_priv, true);
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001402
1403 if (wm.level >= VLV_WM_LEVEL_PM5 &&
1404 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
1405 chv_set_memory_pm5(dev_priv, true);
1406
1407 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
1408 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
1409 chv_set_memory_dvfs(dev_priv, true);
1410
1411 dev_priv->wm.vlv = wm;
Ville Syrjälä3c2777f2014-06-26 17:03:06 +03001412}
1413
Ville Syrjäläae801522015-03-05 21:19:49 +02001414#define single_plane_enabled(mask) is_power_of_2(mask)
1415
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001416static void g4x_update_wm(struct drm_crtc *crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001417{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001418 struct drm_device *dev = crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001419 static const int sr_latency_ns = 12000;
1420 struct drm_i915_private *dev_priv = dev->dev_private;
1421 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1422 int plane_sr, cursor_sr;
1423 unsigned int enabled = 0;
Imre Deak98584252014-06-13 14:54:20 +03001424 bool cxsr_enabled;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001425
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001426 if (g4x_compute_wm0(dev, PIPE_A,
Chris Wilson5aef6002014-09-03 11:56:07 +01001427 &g4x_wm_info, pessimal_latency_ns,
1428 &g4x_cursor_wm_info, pessimal_latency_ns,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001429 &planea_wm, &cursora_wm))
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001430 enabled |= 1 << PIPE_A;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001431
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001432 if (g4x_compute_wm0(dev, PIPE_B,
Chris Wilson5aef6002014-09-03 11:56:07 +01001433 &g4x_wm_info, pessimal_latency_ns,
1434 &g4x_cursor_wm_info, pessimal_latency_ns,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001435 &planeb_wm, &cursorb_wm))
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001436 enabled |= 1 << PIPE_B;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001437
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001438 if (single_plane_enabled(enabled) &&
1439 g4x_compute_srwm(dev, ffs(enabled) - 1,
1440 sr_latency_ns,
1441 &g4x_wm_info,
1442 &g4x_cursor_wm_info,
Chris Wilson52bd02d2012-12-07 10:43:24 +00001443 &plane_sr, &cursor_sr)) {
Imre Deak98584252014-06-13 14:54:20 +03001444 cxsr_enabled = true;
Chris Wilson52bd02d2012-12-07 10:43:24 +00001445 } else {
Imre Deak98584252014-06-13 14:54:20 +03001446 cxsr_enabled = false;
Imre Deak5209b1f2014-07-01 12:36:17 +03001447 intel_set_memory_cxsr(dev_priv, false);
Chris Wilson52bd02d2012-12-07 10:43:24 +00001448 plane_sr = cursor_sr = 0;
1449 }
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001450
Ville Syrjäläa5043452014-06-28 02:04:18 +03001451 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1452 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001453 planea_wm, cursora_wm,
1454 planeb_wm, cursorb_wm,
1455 plane_sr, cursor_sr);
1456
1457 I915_WRITE(DSPFW1,
Ville Syrjäläf4998962015-03-10 17:02:21 +02001458 FW_WM(plane_sr, SR) |
1459 FW_WM(cursorb_wm, CURSORB) |
1460 FW_WM(planeb_wm, PLANEB) |
1461 FW_WM(planea_wm, PLANEA));
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001462 I915_WRITE(DSPFW2,
Chris Wilson8c919b22012-12-04 16:33:19 +00001463 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
Ville Syrjäläf4998962015-03-10 17:02:21 +02001464 FW_WM(cursora_wm, CURSORA));
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001465 /* HPLL off in SR has some issues on G4x... disable it */
1466 I915_WRITE(DSPFW3,
Chris Wilson8c919b22012-12-04 16:33:19 +00001467 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
Ville Syrjäläf4998962015-03-10 17:02:21 +02001468 FW_WM(cursor_sr, CURSOR_SR));
Imre Deak98584252014-06-13 14:54:20 +03001469
1470 if (cxsr_enabled)
1471 intel_set_memory_cxsr(dev_priv, true);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001472}
1473
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001474static void i965_update_wm(struct drm_crtc *unused_crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001475{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001476 struct drm_device *dev = unused_crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001477 struct drm_i915_private *dev_priv = dev->dev_private;
1478 struct drm_crtc *crtc;
1479 int srwm = 1;
1480 int cursor_sr = 16;
Imre Deak98584252014-06-13 14:54:20 +03001481 bool cxsr_enabled;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001482
1483 /* Calc sr entries for one plane configs */
1484 crtc = single_enabled_crtc(dev);
1485 if (crtc) {
1486 /* self-refresh has much higher latency */
1487 static const int sr_latency_ns = 12000;
Ville Syrjälä4fe85902013-09-04 18:25:22 +03001488 const struct drm_display_mode *adjusted_mode =
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001489 &to_intel_crtc(crtc)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +01001490 int clock = adjusted_mode->crtc_clock;
Jesse Barnesfec8cba2013-11-27 11:10:26 -08001491 int htotal = adjusted_mode->crtc_htotal;
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001492 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
Matt Roper59bea882015-02-27 10:12:01 -08001493 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001494 unsigned long line_time_us;
1495 int entries;
1496
Ville Syrjälä922044c2014-02-14 14:18:57 +02001497 line_time_us = max(htotal * 1000 / clock, 1);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001498
1499 /* Use ns/us then divide to preserve precision */
1500 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1501 pixel_size * hdisplay;
1502 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1503 srwm = I965_FIFO_SIZE - entries;
1504 if (srwm < 0)
1505 srwm = 1;
1506 srwm &= 0x1ff;
1507 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1508 entries, srwm);
1509
1510 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
Matt Roper3dd512f2015-02-27 10:12:00 -08001511 pixel_size * crtc->cursor->state->crtc_w;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001512 entries = DIV_ROUND_UP(entries,
1513 i965_cursor_wm_info.cacheline_size);
1514 cursor_sr = i965_cursor_wm_info.fifo_size -
1515 (entries + i965_cursor_wm_info.guard_size);
1516
1517 if (cursor_sr > i965_cursor_wm_info.max_wm)
1518 cursor_sr = i965_cursor_wm_info.max_wm;
1519
1520 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1521 "cursor %d\n", srwm, cursor_sr);
1522
Imre Deak98584252014-06-13 14:54:20 +03001523 cxsr_enabled = true;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001524 } else {
Imre Deak98584252014-06-13 14:54:20 +03001525 cxsr_enabled = false;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001526 /* Turn off self refresh if both pipes are enabled */
Imre Deak5209b1f2014-07-01 12:36:17 +03001527 intel_set_memory_cxsr(dev_priv, false);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001528 }
1529
1530 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1531 srwm);
1532
1533 /* 965 has limitations... */
Ville Syrjäläf4998962015-03-10 17:02:21 +02001534 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1535 FW_WM(8, CURSORB) |
1536 FW_WM(8, PLANEB) |
1537 FW_WM(8, PLANEA));
1538 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1539 FW_WM(8, PLANEC_OLD));
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001540 /* update cursor SR watermark */
Ville Syrjäläf4998962015-03-10 17:02:21 +02001541 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
Imre Deak98584252014-06-13 14:54:20 +03001542
1543 if (cxsr_enabled)
1544 intel_set_memory_cxsr(dev_priv, true);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001545}
1546
Ville Syrjäläf4998962015-03-10 17:02:21 +02001547#undef FW_WM
1548
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001549static void i9xx_update_wm(struct drm_crtc *unused_crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001550{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001551 struct drm_device *dev = unused_crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001552 struct drm_i915_private *dev_priv = dev->dev_private;
1553 const struct intel_watermark_params *wm_info;
1554 uint32_t fwater_lo;
1555 uint32_t fwater_hi;
1556 int cwm, srwm = 1;
1557 int fifo_size;
1558 int planea_wm, planeb_wm;
1559 struct drm_crtc *crtc, *enabled = NULL;
1560
1561 if (IS_I945GM(dev))
1562 wm_info = &i945_wm_info;
1563 else if (!IS_GEN2(dev))
1564 wm_info = &i915_wm_info;
1565 else
Ville Syrjälä9d539102014-08-15 01:21:53 +03001566 wm_info = &i830_a_wm_info;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001567
1568 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1569 crtc = intel_get_crtc_for_plane(dev, 0);
Chris Wilson3490ea52013-01-07 10:11:40 +00001570 if (intel_crtc_active(crtc)) {
Damien Lespiau241bfc32013-09-25 16:45:37 +01001571 const struct drm_display_mode *adjusted_mode;
Matt Roper59bea882015-02-27 10:12:01 -08001572 int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
Chris Wilsonb9e0bda2012-10-22 12:32:15 +01001573 if (IS_GEN2(dev))
1574 cpp = 4;
1575
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001576 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +01001577 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
Chris Wilsonb9e0bda2012-10-22 12:32:15 +01001578 wm_info, fifo_size, cpp,
Chris Wilson5aef6002014-09-03 11:56:07 +01001579 pessimal_latency_ns);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001580 enabled = crtc;
Ville Syrjälä9d539102014-08-15 01:21:53 +03001581 } else {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001582 planea_wm = fifo_size - wm_info->guard_size;
Ville Syrjälä9d539102014-08-15 01:21:53 +03001583 if (planea_wm > (long)wm_info->max_wm)
1584 planea_wm = wm_info->max_wm;
1585 }
1586
1587 if (IS_GEN2(dev))
1588 wm_info = &i830_bc_wm_info;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001589
1590 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1591 crtc = intel_get_crtc_for_plane(dev, 1);
Chris Wilson3490ea52013-01-07 10:11:40 +00001592 if (intel_crtc_active(crtc)) {
Damien Lespiau241bfc32013-09-25 16:45:37 +01001593 const struct drm_display_mode *adjusted_mode;
Matt Roper59bea882015-02-27 10:12:01 -08001594 int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
Chris Wilsonb9e0bda2012-10-22 12:32:15 +01001595 if (IS_GEN2(dev))
1596 cpp = 4;
1597
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001598 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +01001599 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
Chris Wilsonb9e0bda2012-10-22 12:32:15 +01001600 wm_info, fifo_size, cpp,
Chris Wilson5aef6002014-09-03 11:56:07 +01001601 pessimal_latency_ns);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001602 if (enabled == NULL)
1603 enabled = crtc;
1604 else
1605 enabled = NULL;
Ville Syrjälä9d539102014-08-15 01:21:53 +03001606 } else {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001607 planeb_wm = fifo_size - wm_info->guard_size;
Ville Syrjälä9d539102014-08-15 01:21:53 +03001608 if (planeb_wm > (long)wm_info->max_wm)
1609 planeb_wm = wm_info->max_wm;
1610 }
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001611
1612 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1613
Daniel Vetter2ab1bc92014-04-07 08:54:21 +02001614 if (IS_I915GM(dev) && enabled) {
Matt Roper2ff8fde2014-07-08 07:50:07 -07001615 struct drm_i915_gem_object *obj;
Daniel Vetter2ab1bc92014-04-07 08:54:21 +02001616
Matt Roper59bea882015-02-27 10:12:01 -08001617 obj = intel_fb_obj(enabled->primary->state->fb);
Daniel Vetter2ab1bc92014-04-07 08:54:21 +02001618
1619 /* self-refresh seems busted with untiled */
Matt Roper2ff8fde2014-07-08 07:50:07 -07001620 if (obj->tiling_mode == I915_TILING_NONE)
Daniel Vetter2ab1bc92014-04-07 08:54:21 +02001621 enabled = NULL;
1622 }
1623
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001624 /*
1625 * Overlay gets an aggressive default since video jitter is bad.
1626 */
1627 cwm = 2;
1628
1629 /* Play safe and disable self-refresh before adjusting watermarks. */
Imre Deak5209b1f2014-07-01 12:36:17 +03001630 intel_set_memory_cxsr(dev_priv, false);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001631
1632 /* Calc sr entries for one plane configs */
1633 if (HAS_FW_BLC(dev) && enabled) {
1634 /* self-refresh has much higher latency */
1635 static const int sr_latency_ns = 6000;
Ville Syrjälä4fe85902013-09-04 18:25:22 +03001636 const struct drm_display_mode *adjusted_mode =
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001637 &to_intel_crtc(enabled)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +01001638 int clock = adjusted_mode->crtc_clock;
Jesse Barnesfec8cba2013-11-27 11:10:26 -08001639 int htotal = adjusted_mode->crtc_htotal;
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001640 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
Matt Roper59bea882015-02-27 10:12:01 -08001641 int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001642 unsigned long line_time_us;
1643 int entries;
1644
Ville Syrjälä922044c2014-02-14 14:18:57 +02001645 line_time_us = max(htotal * 1000 / clock, 1);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001646
1647 /* Use ns/us then divide to preserve precision */
1648 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1649 pixel_size * hdisplay;
1650 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1651 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1652 srwm = wm_info->fifo_size - entries;
1653 if (srwm < 0)
1654 srwm = 1;
1655
1656 if (IS_I945G(dev) || IS_I945GM(dev))
1657 I915_WRITE(FW_BLC_SELF,
1658 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1659 else if (IS_I915GM(dev))
1660 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1661 }
1662
1663 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1664 planea_wm, planeb_wm, cwm, srwm);
1665
1666 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1667 fwater_hi = (cwm & 0x1f);
1668
1669 /* Set request length to 8 cachelines per fetch */
1670 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1671 fwater_hi = fwater_hi | (1 << 8);
1672
1673 I915_WRITE(FW_BLC, fwater_lo);
1674 I915_WRITE(FW_BLC2, fwater_hi);
1675
Imre Deak5209b1f2014-07-01 12:36:17 +03001676 if (enabled)
1677 intel_set_memory_cxsr(dev_priv, true);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001678}
1679
Daniel Vetterfeb56b92013-12-14 20:38:30 -02001680static void i845_update_wm(struct drm_crtc *unused_crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001681{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001682 struct drm_device *dev = unused_crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001683 struct drm_i915_private *dev_priv = dev->dev_private;
1684 struct drm_crtc *crtc;
Damien Lespiau241bfc32013-09-25 16:45:37 +01001685 const struct drm_display_mode *adjusted_mode;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001686 uint32_t fwater_lo;
1687 int planea_wm;
1688
1689 crtc = single_enabled_crtc(dev);
1690 if (crtc == NULL)
1691 return;
1692
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001693 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +01001694 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
Daniel Vetterfeb56b92013-12-14 20:38:30 -02001695 &i845_wm_info,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001696 dev_priv->display.get_fifo_size(dev, 0),
Chris Wilson5aef6002014-09-03 11:56:07 +01001697 4, pessimal_latency_ns);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001698 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1699 fwater_lo |= (3<<8) | planea_wm;
1700
1701 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1702
1703 I915_WRITE(FW_BLC, fwater_lo);
1704}
1705
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03001706uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001707{
Chris Wilsonfd4daa92013-08-27 17:04:17 +01001708 uint32_t pixel_rate;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001709
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03001710 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001711
1712 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1713 * adjust the pixel_rate here. */
1714
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03001715 if (pipe_config->pch_pfit.enabled) {
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001716 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03001717 uint32_t pfit_size = pipe_config->pch_pfit.size;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001718
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03001719 pipe_w = pipe_config->pipe_src_w;
1720 pipe_h = pipe_config->pipe_src_h;
1721
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001722 pfit_w = (pfit_size >> 16) & 0xFFFF;
1723 pfit_h = pfit_size & 0xFFFF;
1724 if (pipe_w < pfit_w)
1725 pipe_w = pfit_w;
1726 if (pipe_h < pfit_h)
1727 pipe_h = pfit_h;
1728
1729 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1730 pfit_w * pfit_h);
1731 }
1732
1733 return pixel_rate;
1734}
1735
Ville Syrjälä37126462013-08-01 16:18:55 +03001736/* latency must be in 0.1us units. */
Ville Syrjälä23297042013-07-05 11:57:17 +03001737static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001738 uint32_t latency)
1739{
1740 uint64_t ret;
1741
Ville Syrjälä3312ba62013-08-01 16:18:53 +03001742 if (WARN(latency == 0, "Latency value missing\n"))
1743 return UINT_MAX;
1744
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001745 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1746 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1747
1748 return ret;
1749}
1750
Ville Syrjälä37126462013-08-01 16:18:55 +03001751/* latency must be in 0.1us units. */
Ville Syrjälä23297042013-07-05 11:57:17 +03001752static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001753 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1754 uint32_t latency)
1755{
1756 uint32_t ret;
1757
Ville Syrjälä3312ba62013-08-01 16:18:53 +03001758 if (WARN(latency == 0, "Latency value missing\n"))
1759 return UINT_MAX;
1760
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001761 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1762 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1763 ret = DIV_ROUND_UP(ret, 64) + 2;
1764 return ret;
1765}
1766
Ville Syrjälä23297042013-07-05 11:57:17 +03001767static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
Paulo Zanonicca32e92013-05-31 11:45:06 -03001768 uint8_t bytes_per_pixel)
1769{
1770 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1771}
1772
Pradeep Bhat2ac96d22014-11-04 17:06:40 +00001773struct skl_pipe_wm_parameters {
1774 bool active;
1775 uint32_t pipe_htotal;
1776 uint32_t pixel_rate; /* in KHz */
1777 struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
1778 struct intel_plane_wm_parameters cursor;
1779};
1780
Imre Deak820c1982013-12-17 14:46:36 +02001781struct ilk_pipe_wm_parameters {
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001782 bool active;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001783 uint32_t pipe_htotal;
1784 uint32_t pixel_rate;
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001785 struct intel_plane_wm_parameters pri;
1786 struct intel_plane_wm_parameters spr;
1787 struct intel_plane_wm_parameters cur;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001788};
1789
Imre Deak820c1982013-12-17 14:46:36 +02001790struct ilk_wm_maximums {
Paulo Zanonicca32e92013-05-31 11:45:06 -03001791 uint16_t pri;
1792 uint16_t spr;
1793 uint16_t cur;
1794 uint16_t fbc;
1795};
1796
Ville Syrjälä240264f2013-08-07 13:29:12 +03001797/* used in computing the new watermarks state */
1798struct intel_wm_config {
1799 unsigned int num_pipes_active;
1800 bool sprites_enabled;
1801 bool sprites_scaled;
Ville Syrjälä240264f2013-08-07 13:29:12 +03001802};
1803
Ville Syrjälä37126462013-08-01 16:18:55 +03001804/*
1805 * For both WM_PIPE and WM_LP.
1806 * mem_value must be in 0.1us units.
1807 */
Imre Deak820c1982013-12-17 14:46:36 +02001808static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
Paulo Zanonicca32e92013-05-31 11:45:06 -03001809 uint32_t mem_value,
1810 bool is_lp)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001811{
Paulo Zanonicca32e92013-05-31 11:45:06 -03001812 uint32_t method1, method2;
1813
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001814 if (!params->active || !params->pri.enabled)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001815 return 0;
1816
Ville Syrjälä23297042013-07-05 11:57:17 +03001817 method1 = ilk_wm_method1(params->pixel_rate,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001818 params->pri.bytes_per_pixel,
Paulo Zanonicca32e92013-05-31 11:45:06 -03001819 mem_value);
1820
1821 if (!is_lp)
1822 return method1;
1823
Ville Syrjälä23297042013-07-05 11:57:17 +03001824 method2 = ilk_wm_method2(params->pixel_rate,
Paulo Zanonicca32e92013-05-31 11:45:06 -03001825 params->pipe_htotal,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001826 params->pri.horiz_pixels,
1827 params->pri.bytes_per_pixel,
Paulo Zanonicca32e92013-05-31 11:45:06 -03001828 mem_value);
1829
1830 return min(method1, method2);
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001831}
1832
Ville Syrjälä37126462013-08-01 16:18:55 +03001833/*
1834 * For both WM_PIPE and WM_LP.
1835 * mem_value must be in 0.1us units.
1836 */
Imre Deak820c1982013-12-17 14:46:36 +02001837static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001838 uint32_t mem_value)
1839{
1840 uint32_t method1, method2;
1841
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001842 if (!params->active || !params->spr.enabled)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001843 return 0;
1844
Ville Syrjälä23297042013-07-05 11:57:17 +03001845 method1 = ilk_wm_method1(params->pixel_rate,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001846 params->spr.bytes_per_pixel,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001847 mem_value);
Ville Syrjälä23297042013-07-05 11:57:17 +03001848 method2 = ilk_wm_method2(params->pixel_rate,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001849 params->pipe_htotal,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001850 params->spr.horiz_pixels,
1851 params->spr.bytes_per_pixel,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001852 mem_value);
1853 return min(method1, method2);
1854}
1855
Ville Syrjälä37126462013-08-01 16:18:55 +03001856/*
1857 * For both WM_PIPE and WM_LP.
1858 * mem_value must be in 0.1us units.
1859 */
Imre Deak820c1982013-12-17 14:46:36 +02001860static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001861 uint32_t mem_value)
1862{
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001863 if (!params->active || !params->cur.enabled)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001864 return 0;
1865
Ville Syrjälä23297042013-07-05 11:57:17 +03001866 return ilk_wm_method2(params->pixel_rate,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001867 params->pipe_htotal,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001868 params->cur.horiz_pixels,
1869 params->cur.bytes_per_pixel,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001870 mem_value);
1871}
1872
Paulo Zanonicca32e92013-05-31 11:45:06 -03001873/* Only for WM_LP. */
Imre Deak820c1982013-12-17 14:46:36 +02001874static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
Ville Syrjälä1fda9882013-07-05 11:57:19 +03001875 uint32_t pri_val)
Paulo Zanonicca32e92013-05-31 11:45:06 -03001876{
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001877 if (!params->active || !params->pri.enabled)
Paulo Zanonicca32e92013-05-31 11:45:06 -03001878 return 0;
1879
Ville Syrjälä23297042013-07-05 11:57:17 +03001880 return ilk_wm_fbc(pri_val,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001881 params->pri.horiz_pixels,
1882 params->pri.bytes_per_pixel);
Paulo Zanonicca32e92013-05-31 11:45:06 -03001883}
1884
Ville Syrjälä158ae642013-08-07 13:28:19 +03001885static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1886{
Ville Syrjälä416f4722013-11-02 21:07:46 -07001887 if (INTEL_INFO(dev)->gen >= 8)
1888 return 3072;
1889 else if (INTEL_INFO(dev)->gen >= 7)
Ville Syrjälä158ae642013-08-07 13:28:19 +03001890 return 768;
1891 else
1892 return 512;
1893}
1894
Ville Syrjälä4e975082014-03-07 18:32:11 +02001895static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1896 int level, bool is_sprite)
1897{
1898 if (INTEL_INFO(dev)->gen >= 8)
1899 /* BDW primary/sprite plane watermarks */
1900 return level == 0 ? 255 : 2047;
1901 else if (INTEL_INFO(dev)->gen >= 7)
1902 /* IVB/HSW primary/sprite plane watermarks */
1903 return level == 0 ? 127 : 1023;
1904 else if (!is_sprite)
1905 /* ILK/SNB primary plane watermarks */
1906 return level == 0 ? 127 : 511;
1907 else
1908 /* ILK/SNB sprite plane watermarks */
1909 return level == 0 ? 63 : 255;
1910}
1911
1912static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1913 int level)
1914{
1915 if (INTEL_INFO(dev)->gen >= 7)
1916 return level == 0 ? 63 : 255;
1917 else
1918 return level == 0 ? 31 : 63;
1919}
1920
1921static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1922{
1923 if (INTEL_INFO(dev)->gen >= 8)
1924 return 31;
1925 else
1926 return 15;
1927}
1928
Ville Syrjälä158ae642013-08-07 13:28:19 +03001929/* Calculate the maximum primary/sprite plane watermark */
1930static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1931 int level,
Ville Syrjälä240264f2013-08-07 13:29:12 +03001932 const struct intel_wm_config *config,
Ville Syrjälä158ae642013-08-07 13:28:19 +03001933 enum intel_ddb_partitioning ddb_partitioning,
1934 bool is_sprite)
1935{
1936 unsigned int fifo_size = ilk_display_fifo_size(dev);
Ville Syrjälä158ae642013-08-07 13:28:19 +03001937
1938 /* if sprites aren't enabled, sprites get nothing */
Ville Syrjälä240264f2013-08-07 13:29:12 +03001939 if (is_sprite && !config->sprites_enabled)
Ville Syrjälä158ae642013-08-07 13:28:19 +03001940 return 0;
1941
1942 /* HSW allows LP1+ watermarks even with multiple pipes */
Ville Syrjälä240264f2013-08-07 13:29:12 +03001943 if (level == 0 || config->num_pipes_active > 1) {
Ville Syrjälä158ae642013-08-07 13:28:19 +03001944 fifo_size /= INTEL_INFO(dev)->num_pipes;
1945
1946 /*
1947 * For some reason the non self refresh
1948 * FIFO size is only half of the self
1949 * refresh FIFO size on ILK/SNB.
1950 */
1951 if (INTEL_INFO(dev)->gen <= 6)
1952 fifo_size /= 2;
1953 }
1954
Ville Syrjälä240264f2013-08-07 13:29:12 +03001955 if (config->sprites_enabled) {
Ville Syrjälä158ae642013-08-07 13:28:19 +03001956 /* level 0 is always calculated with 1:1 split */
1957 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1958 if (is_sprite)
1959 fifo_size *= 5;
1960 fifo_size /= 6;
1961 } else {
1962 fifo_size /= 2;
1963 }
1964 }
1965
1966 /* clamp to max that the registers can hold */
Ville Syrjälä4e975082014-03-07 18:32:11 +02001967 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
Ville Syrjälä158ae642013-08-07 13:28:19 +03001968}
1969
1970/* Calculate the maximum cursor plane watermark */
1971static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
Ville Syrjälä240264f2013-08-07 13:29:12 +03001972 int level,
1973 const struct intel_wm_config *config)
Ville Syrjälä158ae642013-08-07 13:28:19 +03001974{
1975 /* HSW LP1+ watermarks w/ multiple pipes */
Ville Syrjälä240264f2013-08-07 13:29:12 +03001976 if (level > 0 && config->num_pipes_active > 1)
Ville Syrjälä158ae642013-08-07 13:28:19 +03001977 return 64;
1978
1979 /* otherwise just report max that registers can hold */
Ville Syrjälä4e975082014-03-07 18:32:11 +02001980 return ilk_cursor_wm_reg_max(dev, level);
Ville Syrjälä158ae642013-08-07 13:28:19 +03001981}
1982
Damien Lespiaud34ff9c2014-01-06 19:17:23 +00001983static void ilk_compute_wm_maximums(const struct drm_device *dev,
Ville Syrjälä34982fe2013-10-09 19:18:09 +03001984 int level,
1985 const struct intel_wm_config *config,
1986 enum intel_ddb_partitioning ddb_partitioning,
Imre Deak820c1982013-12-17 14:46:36 +02001987 struct ilk_wm_maximums *max)
Ville Syrjälä158ae642013-08-07 13:28:19 +03001988{
Ville Syrjälä240264f2013-08-07 13:29:12 +03001989 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1990 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1991 max->cur = ilk_cursor_wm_max(dev, level, config);
Ville Syrjälä4e975082014-03-07 18:32:11 +02001992 max->fbc = ilk_fbc_wm_reg_max(dev);
Ville Syrjälä158ae642013-08-07 13:28:19 +03001993}
1994
Ville Syrjäläa3cb4042014-04-28 15:44:56 +03001995static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1996 int level,
1997 struct ilk_wm_maximums *max)
1998{
1999 max->pri = ilk_plane_wm_reg_max(dev, level, false);
2000 max->spr = ilk_plane_wm_reg_max(dev, level, true);
2001 max->cur = ilk_cursor_wm_reg_max(dev, level);
2002 max->fbc = ilk_fbc_wm_reg_max(dev);
2003}
2004
Ville Syrjäläd9395652013-10-09 19:18:10 +03002005static bool ilk_validate_wm_level(int level,
Imre Deak820c1982013-12-17 14:46:36 +02002006 const struct ilk_wm_maximums *max,
Ville Syrjäläd9395652013-10-09 19:18:10 +03002007 struct intel_wm_level *result)
Ville Syrjäläa9786a12013-08-07 13:24:47 +03002008{
2009 bool ret;
2010
2011 /* already determined to be invalid? */
2012 if (!result->enable)
2013 return false;
2014
2015 result->enable = result->pri_val <= max->pri &&
2016 result->spr_val <= max->spr &&
2017 result->cur_val <= max->cur;
2018
2019 ret = result->enable;
2020
2021 /*
2022 * HACK until we can pre-compute everything,
2023 * and thus fail gracefully if LP0 watermarks
2024 * are exceeded...
2025 */
2026 if (level == 0 && !result->enable) {
2027 if (result->pri_val > max->pri)
2028 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2029 level, result->pri_val, max->pri);
2030 if (result->spr_val > max->spr)
2031 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2032 level, result->spr_val, max->spr);
2033 if (result->cur_val > max->cur)
2034 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2035 level, result->cur_val, max->cur);
2036
2037 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2038 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2039 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2040 result->enable = true;
2041 }
2042
Ville Syrjäläa9786a12013-08-07 13:24:47 +03002043 return ret;
2044}
2045
Damien Lespiaud34ff9c2014-01-06 19:17:23 +00002046static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
Ville Syrjälä6f5ddd12013-08-06 22:24:02 +03002047 int level,
Imre Deak820c1982013-12-17 14:46:36 +02002048 const struct ilk_pipe_wm_parameters *p,
Ville Syrjälä1fd527c2013-08-06 22:24:05 +03002049 struct intel_wm_level *result)
Ville Syrjälä6f5ddd12013-08-06 22:24:02 +03002050{
2051 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2052 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2053 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2054
2055 /* WM1+ latency values stored in 0.5us units */
2056 if (level > 0) {
2057 pri_latency *= 5;
2058 spr_latency *= 5;
2059 cur_latency *= 5;
2060 }
2061
2062 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2063 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2064 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2065 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2066 result->enable = true;
2067}
2068
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002069static uint32_t
2070hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002071{
2072 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002073 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002074 struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
Paulo Zanoni85a02de2013-05-03 17:23:43 -03002075 u32 linetime, ips_linetime;
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002076
Matt Roper3ef00282015-03-09 10:19:24 -07002077 if (!intel_crtc->active)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002078 return 0;
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002079
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002080 /* The WM are computed with base on how long it takes to fill a single
2081 * row at the given clock rate, multiplied by 8.
2082 * */
Jesse Barnesfec8cba2013-11-27 11:10:26 -08002083 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2084 mode->crtc_clock);
2085 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
Ville Syrjälä05024da2015-06-03 15:45:08 +03002086 dev_priv->cdclk_freq);
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002087
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002088 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2089 PIPE_WM_LINETIME_TIME(linetime);
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002090}
2091
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002092static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
Ville Syrjälä12b134d2013-07-05 11:57:21 +03002093{
2094 struct drm_i915_private *dev_priv = dev->dev_private;
2095
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002096 if (IS_GEN9(dev)) {
2097 uint32_t val;
Vandana Kannan4f947382014-11-04 17:06:47 +00002098 int ret, i;
Vandana Kannan367294b2014-11-04 17:06:46 +00002099 int level, max_level = ilk_wm_max_level(dev);
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002100
2101 /* read the first set of memory latencies[0:3] */
2102 val = 0; /* data0 to be programmed to 0 for first set */
2103 mutex_lock(&dev_priv->rps.hw_lock);
2104 ret = sandybridge_pcode_read(dev_priv,
2105 GEN9_PCODE_READ_MEM_LATENCY,
2106 &val);
2107 mutex_unlock(&dev_priv->rps.hw_lock);
2108
2109 if (ret) {
2110 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2111 return;
2112 }
2113
2114 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2115 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2116 GEN9_MEM_LATENCY_LEVEL_MASK;
2117 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2118 GEN9_MEM_LATENCY_LEVEL_MASK;
2119 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2120 GEN9_MEM_LATENCY_LEVEL_MASK;
2121
2122 /* read the second set of memory latencies[4:7] */
2123 val = 1; /* data0 to be programmed to 1 for second set */
2124 mutex_lock(&dev_priv->rps.hw_lock);
2125 ret = sandybridge_pcode_read(dev_priv,
2126 GEN9_PCODE_READ_MEM_LATENCY,
2127 &val);
2128 mutex_unlock(&dev_priv->rps.hw_lock);
2129 if (ret) {
2130 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2131 return;
2132 }
2133
2134 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2135 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2136 GEN9_MEM_LATENCY_LEVEL_MASK;
2137 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2138 GEN9_MEM_LATENCY_LEVEL_MASK;
2139 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2140 GEN9_MEM_LATENCY_LEVEL_MASK;
2141
Vandana Kannan367294b2014-11-04 17:06:46 +00002142 /*
Damien Lespiau6f972352015-02-09 19:33:07 +00002143 * WaWmMemoryReadLatency:skl
2144 *
Vandana Kannan367294b2014-11-04 17:06:46 +00002145 * punit doesn't take into account the read latency so we need
2146 * to add 2us to the various latency levels we retrieve from
2147 * the punit.
2148 * - W0 is a bit special in that it's the only level that
2149 * can't be disabled if we want to have display working, so
2150 * we always add 2us there.
2151 * - For levels >=1, punit returns 0us latency when they are
2152 * disabled, so we respect that and don't add 2us then
Vandana Kannan4f947382014-11-04 17:06:47 +00002153 *
2154 * Additionally, if a level n (n > 1) has a 0us latency, all
2155 * levels m (m >= n) need to be disabled. We make sure to
2156 * sanitize the values out of the punit to satisfy this
2157 * requirement.
Vandana Kannan367294b2014-11-04 17:06:46 +00002158 */
2159 wm[0] += 2;
2160 for (level = 1; level <= max_level; level++)
2161 if (wm[level] != 0)
2162 wm[level] += 2;
Vandana Kannan4f947382014-11-04 17:06:47 +00002163 else {
2164 for (i = level + 1; i <= max_level; i++)
2165 wm[i] = 0;
Vandana Kannan367294b2014-11-04 17:06:46 +00002166
Vandana Kannan4f947382014-11-04 17:06:47 +00002167 break;
2168 }
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002169 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
Ville Syrjälä12b134d2013-07-05 11:57:21 +03002170 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2171
2172 wm[0] = (sskpd >> 56) & 0xFF;
2173 if (wm[0] == 0)
2174 wm[0] = sskpd & 0xF;
Ville Syrjäläe5d50192013-07-05 11:57:22 +03002175 wm[1] = (sskpd >> 4) & 0xFF;
2176 wm[2] = (sskpd >> 12) & 0xFF;
2177 wm[3] = (sskpd >> 20) & 0x1FF;
2178 wm[4] = (sskpd >> 32) & 0x1FF;
Ville Syrjälä63cf9a12013-07-05 11:57:23 +03002179 } else if (INTEL_INFO(dev)->gen >= 6) {
2180 uint32_t sskpd = I915_READ(MCH_SSKPD);
2181
2182 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2183 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2184 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2185 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
Ville Syrjälä3a88d0a2013-08-01 16:18:49 +03002186 } else if (INTEL_INFO(dev)->gen >= 5) {
2187 uint32_t mltr = I915_READ(MLTR_ILK);
2188
2189 /* ILK primary LP0 latency is 700 ns */
2190 wm[0] = 7;
2191 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2192 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
Ville Syrjälä12b134d2013-07-05 11:57:21 +03002193 }
2194}
2195
Ville Syrjälä53615a52013-08-01 16:18:50 +03002196static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2197{
2198 /* ILK sprite LP0 latency is 1300 ns */
2199 if (INTEL_INFO(dev)->gen == 5)
2200 wm[0] = 13;
2201}
2202
2203static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2204{
2205 /* ILK cursor LP0 latency is 1300 ns */
2206 if (INTEL_INFO(dev)->gen == 5)
2207 wm[0] = 13;
2208
2209 /* WaDoubleCursorLP3Latency:ivb */
2210 if (IS_IVYBRIDGE(dev))
2211 wm[3] *= 2;
2212}
2213
Damien Lespiau546c81f2014-05-13 15:30:26 +01002214int ilk_wm_max_level(const struct drm_device *dev)
Ville Syrjäläad0d6dc2013-08-30 14:30:25 +03002215{
2216 /* how many WM levels are we expecting */
Damien Lespiaub6e742f2015-05-09 02:05:55 +01002217 if (INTEL_INFO(dev)->gen >= 9)
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002218 return 7;
2219 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjäläad0d6dc2013-08-30 14:30:25 +03002220 return 4;
2221 else if (INTEL_INFO(dev)->gen >= 6)
2222 return 3;
2223 else
2224 return 2;
2225}
Daniel Vetter7526ed72014-09-29 15:07:19 +02002226
Ville Syrjälä26ec9712013-08-01 16:18:52 +03002227static void intel_print_wm_latency(struct drm_device *dev,
2228 const char *name,
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002229 const uint16_t wm[8])
Ville Syrjälä26ec9712013-08-01 16:18:52 +03002230{
Ville Syrjäläad0d6dc2013-08-30 14:30:25 +03002231 int level, max_level = ilk_wm_max_level(dev);
Ville Syrjälä26ec9712013-08-01 16:18:52 +03002232
2233 for (level = 0; level <= max_level; level++) {
2234 unsigned int latency = wm[level];
2235
2236 if (latency == 0) {
2237 DRM_ERROR("%s WM%d latency not provided\n",
2238 name, level);
2239 continue;
2240 }
2241
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002242 /*
2243 * - latencies are in us on gen9.
2244 * - before then, WM1+ latency values are in 0.5us units
2245 */
2246 if (IS_GEN9(dev))
2247 latency *= 10;
2248 else if (level > 0)
Ville Syrjälä26ec9712013-08-01 16:18:52 +03002249 latency *= 5;
2250
2251 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2252 name, level, wm[level],
2253 latency / 10, latency % 10);
2254 }
2255}
2256
Ville Syrjäläe95a2f72014-05-08 15:09:19 +03002257static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2258 uint16_t wm[5], uint16_t min)
2259{
2260 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2261
2262 if (wm[0] >= min)
2263 return false;
2264
2265 wm[0] = max(wm[0], min);
2266 for (level = 1; level <= max_level; level++)
2267 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2268
2269 return true;
2270}
2271
2272static void snb_wm_latency_quirk(struct drm_device *dev)
2273{
2274 struct drm_i915_private *dev_priv = dev->dev_private;
2275 bool changed;
2276
2277 /*
2278 * The BIOS provided WM memory latency values are often
2279 * inadequate for high resolution displays. Adjust them.
2280 */
2281 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2282 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2283 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2284
2285 if (!changed)
2286 return;
2287
2288 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2289 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2290 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2291 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2292}
2293
Damien Lespiaufa50ad62014-03-17 18:01:16 +00002294static void ilk_setup_wm_latency(struct drm_device *dev)
Ville Syrjälä53615a52013-08-01 16:18:50 +03002295{
2296 struct drm_i915_private *dev_priv = dev->dev_private;
2297
2298 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2299
2300 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2301 sizeof(dev_priv->wm.pri_latency));
2302 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2303 sizeof(dev_priv->wm.pri_latency));
2304
2305 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2306 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
Ville Syrjälä26ec9712013-08-01 16:18:52 +03002307
2308 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2309 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2310 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
Ville Syrjäläe95a2f72014-05-08 15:09:19 +03002311
2312 if (IS_GEN6(dev))
2313 snb_wm_latency_quirk(dev);
Ville Syrjälä53615a52013-08-01 16:18:50 +03002314}
2315
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002316static void skl_setup_wm_latency(struct drm_device *dev)
2317{
2318 struct drm_i915_private *dev_priv = dev->dev_private;
2319
2320 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2321 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2322}
2323
Imre Deak820c1982013-12-17 14:46:36 +02002324static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002325 struct ilk_pipe_wm_parameters *p)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002326{
Ville Syrjälä7c4a3952013-10-09 19:17:56 +03002327 struct drm_device *dev = crtc->dev;
2328 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2329 enum pipe pipe = intel_crtc->pipe;
Ville Syrjälä7c4a3952013-10-09 19:17:56 +03002330 struct drm_plane *plane;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002331
Matt Roper3ef00282015-03-09 10:19:24 -07002332 if (!intel_crtc->active)
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002333 return;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002334
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002335 p->active = true;
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002336 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03002337 p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config);
Matt Roperc9f038a2015-03-09 11:06:02 -07002338
Thomas Gummerer54da6912015-05-14 09:16:39 +02002339 if (crtc->primary->state->fb)
Matt Roperc9f038a2015-03-09 11:06:02 -07002340 p->pri.bytes_per_pixel =
2341 crtc->primary->state->fb->bits_per_pixel / 8;
Thomas Gummerer54da6912015-05-14 09:16:39 +02002342 else
2343 p->pri.bytes_per_pixel = 4;
Matt Roperc9f038a2015-03-09 11:06:02 -07002344
Thomas Gummerer54da6912015-05-14 09:16:39 +02002345 p->cur.bytes_per_pixel = 4;
2346 /*
2347 * TODO: for now, assume primary and cursor planes are always enabled.
2348 * Setting them to false makes the screen flicker.
2349 */
2350 p->pri.enabled = true;
2351 p->cur.enabled = true;
2352
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002353 p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
Matt Roper3dd512f2015-02-27 10:12:00 -08002354 p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
Ville Syrjälä7c4a3952013-10-09 19:17:56 +03002355
Matt Roperaf2b6532014-04-01 15:22:32 -07002356 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002357 struct intel_plane *intel_plane = to_intel_plane(plane);
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002358
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002359 if (intel_plane->pipe == pipe) {
Ville Syrjälä7c4a3952013-10-09 19:17:56 +03002360 p->spr = intel_plane->wm;
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002361 break;
2362 }
2363 }
2364}
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002365
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002366static void ilk_compute_wm_config(struct drm_device *dev,
2367 struct intel_wm_config *config)
2368{
2369 struct intel_crtc *intel_crtc;
2370
2371 /* Compute the currently _active_ config */
Damien Lespiaud3fcc802014-05-13 23:32:22 +01002372 for_each_intel_crtc(dev, intel_crtc) {
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002373 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2374
2375 if (!wm->pipe_enabled)
2376 continue;
2377
2378 config->sprites_enabled |= wm->sprites_enabled;
2379 config->sprites_scaled |= wm->sprites_scaled;
2380 config->num_pipes_active++;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002381 }
2382}
2383
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002384/* Compute new watermarks for the pipe */
2385static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
Imre Deak820c1982013-12-17 14:46:36 +02002386 const struct ilk_pipe_wm_parameters *params,
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002387 struct intel_pipe_wm *pipe_wm)
2388{
2389 struct drm_device *dev = crtc->dev;
Damien Lespiaud34ff9c2014-01-06 19:17:23 +00002390 const struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002391 int level, max_level = ilk_wm_max_level(dev);
2392 /* LP0 watermark maximums depend on this pipe alone */
2393 struct intel_wm_config config = {
2394 .num_pipes_active = 1,
2395 .sprites_enabled = params->spr.enabled,
2396 .sprites_scaled = params->spr.scaled,
2397 };
Imre Deak820c1982013-12-17 14:46:36 +02002398 struct ilk_wm_maximums max;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002399
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002400 pipe_wm->pipe_enabled = params->active;
2401 pipe_wm->sprites_enabled = params->spr.enabled;
2402 pipe_wm->sprites_scaled = params->spr.scaled;
2403
Ville Syrjälä7b39a0b2013-12-05 15:51:30 +02002404 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2405 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2406 max_level = 1;
2407
2408 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2409 if (params->spr.scaled)
2410 max_level = 0;
2411
Ville Syrjäläa3cb4042014-04-28 15:44:56 +03002412 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002413
Ville Syrjäläa42a5712014-01-07 16:14:08 +02002414 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjäläce0e0712013-12-05 15:51:36 +02002415 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002416
Ville Syrjäläa3cb4042014-04-28 15:44:56 +03002417 /* LP0 watermarks always use 1/2 DDB partitioning */
2418 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2419
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002420 /* At least LP0 must be valid */
Ville Syrjäläa3cb4042014-04-28 15:44:56 +03002421 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2422 return false;
2423
2424 ilk_compute_wm_reg_maximums(dev, 1, &max);
2425
2426 for (level = 1; level <= max_level; level++) {
2427 struct intel_wm_level wm = {};
2428
2429 ilk_compute_wm_level(dev_priv, level, params, &wm);
2430
2431 /*
2432 * Disable any watermark level that exceeds the
2433 * register maximums since such watermarks are
2434 * always invalid.
2435 */
2436 if (!ilk_validate_wm_level(level, &max, &wm))
2437 break;
2438
2439 pipe_wm->wm[level] = wm;
2440 }
2441
2442 return true;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002443}
2444
2445/*
2446 * Merge the watermarks from all active pipes for a specific level.
2447 */
2448static void ilk_merge_wm_level(struct drm_device *dev,
2449 int level,
2450 struct intel_wm_level *ret_wm)
2451{
2452 const struct intel_crtc *intel_crtc;
2453
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002454 ret_wm->enable = true;
2455
Damien Lespiaud3fcc802014-05-13 23:32:22 +01002456 for_each_intel_crtc(dev, intel_crtc) {
Ville Syrjäläfe392ef2014-03-07 18:32:10 +02002457 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2458 const struct intel_wm_level *wm = &active->wm[level];
2459
2460 if (!active->pipe_enabled)
2461 continue;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002462
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002463 /*
2464 * The watermark values may have been used in the past,
2465 * so we must maintain them in the registers for some
2466 * time even if the level is now disabled.
2467 */
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002468 if (!wm->enable)
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002469 ret_wm->enable = false;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002470
2471 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2472 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2473 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2474 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2475 }
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002476}
2477
2478/*
2479 * Merge all low power watermarks for all active pipes.
2480 */
2481static void ilk_wm_merge(struct drm_device *dev,
Ville Syrjälä0ba22e22013-12-05 15:51:34 +02002482 const struct intel_wm_config *config,
Imre Deak820c1982013-12-17 14:46:36 +02002483 const struct ilk_wm_maximums *max,
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002484 struct intel_pipe_wm *merged)
2485{
Paulo Zanoni7733b492015-07-07 15:26:04 -03002486 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002487 int level, max_level = ilk_wm_max_level(dev);
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002488 int last_enabled_level = max_level;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002489
Ville Syrjälä0ba22e22013-12-05 15:51:34 +02002490 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2491 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2492 config->num_pipes_active > 1)
2493 return;
2494
Ville Syrjälä6c8b6c22013-12-05 15:51:35 +02002495 /* ILK: FBC WM must be disabled always */
2496 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002497
2498 /* merge each WM1+ level */
2499 for (level = 1; level <= max_level; level++) {
2500 struct intel_wm_level *wm = &merged->wm[level];
2501
2502 ilk_merge_wm_level(dev, level, wm);
2503
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002504 if (level > last_enabled_level)
2505 wm->enable = false;
2506 else if (!ilk_validate_wm_level(level, max, wm))
2507 /* make sure all following levels get disabled */
2508 last_enabled_level = level - 1;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002509
2510 /*
2511 * The spec says it is preferred to disable
2512 * FBC WMs instead of disabling a WM level.
2513 */
2514 if (wm->fbc_val > max->fbc) {
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002515 if (wm->enable)
2516 merged->fbc_wm_enabled = false;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002517 wm->fbc_val = 0;
2518 }
2519 }
Ville Syrjälä6c8b6c22013-12-05 15:51:35 +02002520
2521 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2522 /*
2523 * FIXME this is racy. FBC might get enabled later.
2524 * What we should check here is whether FBC can be
2525 * enabled sometime later.
2526 */
Paulo Zanoni7733b492015-07-07 15:26:04 -03002527 if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
2528 intel_fbc_enabled(dev_priv)) {
Ville Syrjälä6c8b6c22013-12-05 15:51:35 +02002529 for (level = 2; level <= max_level; level++) {
2530 struct intel_wm_level *wm = &merged->wm[level];
2531
2532 wm->enable = false;
2533 }
2534 }
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002535}
2536
Ville Syrjäläb380ca32013-10-09 19:18:01 +03002537static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2538{
2539 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2540 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2541}
2542
Ville Syrjäläa68d68e2013-12-05 15:51:29 +02002543/* The value we need to program into the WM_LPx latency field */
2544static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2545{
2546 struct drm_i915_private *dev_priv = dev->dev_private;
2547
Ville Syrjäläa42a5712014-01-07 16:14:08 +02002548 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjäläa68d68e2013-12-05 15:51:29 +02002549 return 2 * level;
2550 else
2551 return dev_priv->wm.pri_latency[level];
2552}
2553
Imre Deak820c1982013-12-17 14:46:36 +02002554static void ilk_compute_wm_results(struct drm_device *dev,
Ville Syrjälä0362c782013-10-09 19:17:57 +03002555 const struct intel_pipe_wm *merged,
Ville Syrjälä609cede2013-10-09 19:18:03 +03002556 enum intel_ddb_partitioning partitioning,
Imre Deak820c1982013-12-17 14:46:36 +02002557 struct ilk_wm_values *results)
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002558{
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002559 struct intel_crtc *intel_crtc;
2560 int level, wm_lp;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002561
Ville Syrjälä0362c782013-10-09 19:17:57 +03002562 results->enable_fbc_wm = merged->fbc_wm_enabled;
Ville Syrjälä609cede2013-10-09 19:18:03 +03002563 results->partitioning = partitioning;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002564
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002565 /* LP1+ register values */
Paulo Zanonicca32e92013-05-31 11:45:06 -03002566 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
Ville Syrjälä1fd527c2013-08-06 22:24:05 +03002567 const struct intel_wm_level *r;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002568
Ville Syrjäläb380ca32013-10-09 19:18:01 +03002569 level = ilk_wm_lp_to_level(wm_lp, merged);
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002570
Ville Syrjälä0362c782013-10-09 19:17:57 +03002571 r = &merged->wm[level];
Paulo Zanonicca32e92013-05-31 11:45:06 -03002572
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002573 /*
2574 * Maintain the watermark values even if the level is
2575 * disabled. Doing otherwise could cause underruns.
2576 */
2577 results->wm_lp[wm_lp - 1] =
Ville Syrjäläa68d68e2013-12-05 15:51:29 +02002578 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
Ville Syrjälä416f4722013-11-02 21:07:46 -07002579 (r->pri_val << WM1_LP_SR_SHIFT) |
2580 r->cur_val;
2581
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002582 if (r->enable)
2583 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2584
Ville Syrjälä416f4722013-11-02 21:07:46 -07002585 if (INTEL_INFO(dev)->gen >= 8)
2586 results->wm_lp[wm_lp - 1] |=
2587 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2588 else
2589 results->wm_lp[wm_lp - 1] |=
2590 r->fbc_val << WM1_LP_FBC_SHIFT;
2591
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002592 /*
2593 * Always set WM1S_LP_EN when spr_val != 0, even if the
2594 * level is disabled. Doing otherwise could cause underruns.
2595 */
Ville Syrjälä6cef2b8a2013-12-05 15:51:32 +02002596 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2597 WARN_ON(wm_lp != 1);
2598 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2599 } else
2600 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002601 }
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002602
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002603 /* LP0 register values */
Damien Lespiaud3fcc802014-05-13 23:32:22 +01002604 for_each_intel_crtc(dev, intel_crtc) {
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002605 enum pipe pipe = intel_crtc->pipe;
2606 const struct intel_wm_level *r =
2607 &intel_crtc->wm.active.wm[0];
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002608
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002609 if (WARN_ON(!r->enable))
2610 continue;
2611
2612 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2613
2614 results->wm_pipe[pipe] =
2615 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2616 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2617 r->cur_val;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002618 }
2619}
2620
Paulo Zanoni861f3382013-05-31 10:19:21 -03002621/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2622 * case both are at the same level. Prefer r1 in case they're the same. */
Imre Deak820c1982013-12-17 14:46:36 +02002623static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
Ville Syrjälä198a1e92013-10-09 19:17:58 +03002624 struct intel_pipe_wm *r1,
2625 struct intel_pipe_wm *r2)
Paulo Zanoni861f3382013-05-31 10:19:21 -03002626{
Ville Syrjälä198a1e92013-10-09 19:17:58 +03002627 int level, max_level = ilk_wm_max_level(dev);
2628 int level1 = 0, level2 = 0;
Paulo Zanoni861f3382013-05-31 10:19:21 -03002629
Ville Syrjälä198a1e92013-10-09 19:17:58 +03002630 for (level = 1; level <= max_level; level++) {
2631 if (r1->wm[level].enable)
2632 level1 = level;
2633 if (r2->wm[level].enable)
2634 level2 = level;
Paulo Zanoni861f3382013-05-31 10:19:21 -03002635 }
2636
Ville Syrjälä198a1e92013-10-09 19:17:58 +03002637 if (level1 == level2) {
2638 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
Paulo Zanoni861f3382013-05-31 10:19:21 -03002639 return r2;
2640 else
2641 return r1;
Ville Syrjälä198a1e92013-10-09 19:17:58 +03002642 } else if (level1 > level2) {
Paulo Zanoni861f3382013-05-31 10:19:21 -03002643 return r1;
2644 } else {
2645 return r2;
2646 }
2647}
2648
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002649/* dirty bits used to track which watermarks need changes */
2650#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2651#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2652#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2653#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2654#define WM_DIRTY_FBC (1 << 24)
2655#define WM_DIRTY_DDB (1 << 25)
2656
Damien Lespiau055e3932014-08-18 13:49:10 +01002657static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
Imre Deak820c1982013-12-17 14:46:36 +02002658 const struct ilk_wm_values *old,
2659 const struct ilk_wm_values *new)
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002660{
2661 unsigned int dirty = 0;
2662 enum pipe pipe;
2663 int wm_lp;
2664
Damien Lespiau055e3932014-08-18 13:49:10 +01002665 for_each_pipe(dev_priv, pipe) {
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002666 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2667 dirty |= WM_DIRTY_LINETIME(pipe);
2668 /* Must disable LP1+ watermarks too */
2669 dirty |= WM_DIRTY_LP_ALL;
2670 }
2671
2672 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2673 dirty |= WM_DIRTY_PIPE(pipe);
2674 /* Must disable LP1+ watermarks too */
2675 dirty |= WM_DIRTY_LP_ALL;
2676 }
2677 }
2678
2679 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2680 dirty |= WM_DIRTY_FBC;
2681 /* Must disable LP1+ watermarks too */
2682 dirty |= WM_DIRTY_LP_ALL;
2683 }
2684
2685 if (old->partitioning != new->partitioning) {
2686 dirty |= WM_DIRTY_DDB;
2687 /* Must disable LP1+ watermarks too */
2688 dirty |= WM_DIRTY_LP_ALL;
2689 }
2690
2691 /* LP1+ watermarks already deemed dirty, no need to continue */
2692 if (dirty & WM_DIRTY_LP_ALL)
2693 return dirty;
2694
2695 /* Find the lowest numbered LP1+ watermark in need of an update... */
2696 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2697 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2698 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2699 break;
2700 }
2701
2702 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2703 for (; wm_lp <= 3; wm_lp++)
2704 dirty |= WM_DIRTY_LP(wm_lp);
2705
2706 return dirty;
2707}
2708
Ville Syrjälä8553c182013-12-05 15:51:39 +02002709static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2710 unsigned int dirty)
2711{
Imre Deak820c1982013-12-17 14:46:36 +02002712 struct ilk_wm_values *previous = &dev_priv->wm.hw;
Ville Syrjälä8553c182013-12-05 15:51:39 +02002713 bool changed = false;
2714
2715 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2716 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2717 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2718 changed = true;
2719 }
2720 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2721 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2722 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2723 changed = true;
2724 }
2725 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2726 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2727 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2728 changed = true;
2729 }
2730
2731 /*
2732 * Don't touch WM1S_LP_EN here.
2733 * Doing so could cause underruns.
2734 */
2735
2736 return changed;
2737}
2738
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002739/*
2740 * The spec says we shouldn't write when we don't need, because every write
2741 * causes WMs to be re-evaluated, expending some power.
2742 */
Imre Deak820c1982013-12-17 14:46:36 +02002743static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2744 struct ilk_wm_values *results)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002745{
Ville Syrjäläac9545f2013-12-05 15:51:28 +02002746 struct drm_device *dev = dev_priv->dev;
Imre Deak820c1982013-12-17 14:46:36 +02002747 struct ilk_wm_values *previous = &dev_priv->wm.hw;
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002748 unsigned int dirty;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002749 uint32_t val;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002750
Damien Lespiau055e3932014-08-18 13:49:10 +01002751 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002752 if (!dirty)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002753 return;
2754
Ville Syrjälä8553c182013-12-05 15:51:39 +02002755 _ilk_disable_lp_wm(dev_priv, dirty);
Ville Syrjälä6cef2b8a2013-12-05 15:51:32 +02002756
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002757 if (dirty & WM_DIRTY_PIPE(PIPE_A))
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002758 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002759 if (dirty & WM_DIRTY_PIPE(PIPE_B))
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002760 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002761 if (dirty & WM_DIRTY_PIPE(PIPE_C))
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002762 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2763
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002764 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002765 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002766 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002767 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002768 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002769 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2770
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002771 if (dirty & WM_DIRTY_DDB) {
Ville Syrjäläa42a5712014-01-07 16:14:08 +02002772 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
Ville Syrjäläac9545f2013-12-05 15:51:28 +02002773 val = I915_READ(WM_MISC);
2774 if (results->partitioning == INTEL_DDB_PART_1_2)
2775 val &= ~WM_MISC_DATA_PARTITION_5_6;
2776 else
2777 val |= WM_MISC_DATA_PARTITION_5_6;
2778 I915_WRITE(WM_MISC, val);
2779 } else {
2780 val = I915_READ(DISP_ARB_CTL2);
2781 if (results->partitioning == INTEL_DDB_PART_1_2)
2782 val &= ~DISP_DATA_PARTITION_5_6;
2783 else
2784 val |= DISP_DATA_PARTITION_5_6;
2785 I915_WRITE(DISP_ARB_CTL2, val);
2786 }
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002787 }
2788
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002789 if (dirty & WM_DIRTY_FBC) {
Paulo Zanonicca32e92013-05-31 11:45:06 -03002790 val = I915_READ(DISP_ARB_CTL);
2791 if (results->enable_fbc_wm)
2792 val &= ~DISP_FBC_WM_DIS;
2793 else
2794 val |= DISP_FBC_WM_DIS;
2795 I915_WRITE(DISP_ARB_CTL, val);
2796 }
2797
Imre Deak954911e2013-12-17 14:46:34 +02002798 if (dirty & WM_DIRTY_LP(1) &&
2799 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2800 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2801
2802 if (INTEL_INFO(dev)->gen >= 7) {
Ville Syrjälä6cef2b8a2013-12-05 15:51:32 +02002803 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2804 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2805 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2806 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2807 }
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002808
Ville Syrjäläfacd6192013-12-05 15:51:33 +02002809 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002810 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
Ville Syrjäläfacd6192013-12-05 15:51:33 +02002811 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002812 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
Ville Syrjäläfacd6192013-12-05 15:51:33 +02002813 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002814 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
Ville Syrjälä609cede2013-10-09 19:18:03 +03002815
2816 dev_priv->wm.hw = *results;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002817}
2818
Ville Syrjälä8553c182013-12-05 15:51:39 +02002819static bool ilk_disable_lp_wm(struct drm_device *dev)
2820{
2821 struct drm_i915_private *dev_priv = dev->dev_private;
2822
2823 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2824}
2825
Damien Lespiaub9cec072014-11-04 17:06:43 +00002826/*
2827 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2828 * different active planes.
2829 */
2830
2831#define SKL_DDB_SIZE 896 /* in blocks */
Damien Lespiau43d735a2015-03-17 11:39:34 +02002832#define BXT_DDB_SIZE 512
Damien Lespiaub9cec072014-11-04 17:06:43 +00002833
2834static void
2835skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2836 struct drm_crtc *for_crtc,
2837 const struct intel_wm_config *config,
2838 const struct skl_pipe_wm_parameters *params,
2839 struct skl_ddb_entry *alloc /* out */)
2840{
2841 struct drm_crtc *crtc;
2842 unsigned int pipe_size, ddb_size;
2843 int nth_active_pipe;
2844
2845 if (!params->active) {
2846 alloc->start = 0;
2847 alloc->end = 0;
2848 return;
2849 }
2850
Damien Lespiau43d735a2015-03-17 11:39:34 +02002851 if (IS_BROXTON(dev))
2852 ddb_size = BXT_DDB_SIZE;
2853 else
2854 ddb_size = SKL_DDB_SIZE;
Damien Lespiaub9cec072014-11-04 17:06:43 +00002855
2856 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2857
2858 nth_active_pipe = 0;
2859 for_each_crtc(dev, crtc) {
Matt Roper3ef00282015-03-09 10:19:24 -07002860 if (!to_intel_crtc(crtc)->active)
Damien Lespiaub9cec072014-11-04 17:06:43 +00002861 continue;
2862
2863 if (crtc == for_crtc)
2864 break;
2865
2866 nth_active_pipe++;
2867 }
2868
2869 pipe_size = ddb_size / config->num_pipes_active;
2870 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
Damien Lespiau16160e32014-11-04 17:06:53 +00002871 alloc->end = alloc->start + pipe_size;
Damien Lespiaub9cec072014-11-04 17:06:43 +00002872}
2873
2874static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
2875{
2876 if (config->num_pipes_active == 1)
2877 return 32;
2878
2879 return 8;
2880}
2881
Damien Lespiaua269c582014-11-04 17:06:49 +00002882static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
2883{
2884 entry->start = reg & 0x3ff;
2885 entry->end = (reg >> 16) & 0x3ff;
Damien Lespiau16160e32014-11-04 17:06:53 +00002886 if (entry->end)
2887 entry->end += 1;
Damien Lespiaua269c582014-11-04 17:06:49 +00002888}
2889
Damien Lespiau08db6652014-11-04 17:06:52 +00002890void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2891 struct skl_ddb_allocation *ddb /* out */)
Damien Lespiaua269c582014-11-04 17:06:49 +00002892{
Damien Lespiaua269c582014-11-04 17:06:49 +00002893 enum pipe pipe;
2894 int plane;
2895 u32 val;
2896
2897 for_each_pipe(dev_priv, pipe) {
Damien Lespiaudd740782015-02-28 14:54:08 +00002898 for_each_plane(dev_priv, pipe, plane) {
Damien Lespiaua269c582014-11-04 17:06:49 +00002899 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2900 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
2901 val);
2902 }
2903
2904 val = I915_READ(CUR_BUF_CFG(pipe));
2905 skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
2906 }
2907}
2908
Damien Lespiaub9cec072014-11-04 17:06:43 +00002909static unsigned int
Chandra Konduru2cd601c2015-04-27 15:47:37 -07002910skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y)
Damien Lespiaub9cec072014-11-04 17:06:43 +00002911{
Chandra Konduru2cd601c2015-04-27 15:47:37 -07002912
2913 /* for planar format */
2914 if (p->y_bytes_per_pixel) {
2915 if (y) /* y-plane data rate */
2916 return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel;
2917 else /* uv-plane data rate */
2918 return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel;
2919 }
2920
2921 /* for packed formats */
Damien Lespiaub9cec072014-11-04 17:06:43 +00002922 return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
2923}
2924
2925/*
2926 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
2927 * a 8192x4096@32bpp framebuffer:
2928 * 3 * 4096 * 8192 * 4 < 2^32
2929 */
2930static unsigned int
2931skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
2932 const struct skl_pipe_wm_parameters *params)
2933{
2934 unsigned int total_data_rate = 0;
2935 int plane;
2936
2937 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
2938 const struct intel_plane_wm_parameters *p;
2939
2940 p = &params->plane[plane];
2941 if (!p->enabled)
2942 continue;
2943
Chandra Konduru2cd601c2015-04-27 15:47:37 -07002944 total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */
2945 if (p->y_bytes_per_pixel) {
2946 total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */
2947 }
Damien Lespiaub9cec072014-11-04 17:06:43 +00002948 }
2949
2950 return total_data_rate;
2951}
2952
2953static void
2954skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2955 const struct intel_wm_config *config,
2956 const struct skl_pipe_wm_parameters *params,
2957 struct skl_ddb_allocation *ddb /* out */)
2958{
2959 struct drm_device *dev = crtc->dev;
Damien Lespiaudd740782015-02-28 14:54:08 +00002960 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiaub9cec072014-11-04 17:06:43 +00002961 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2962 enum pipe pipe = intel_crtc->pipe;
Damien Lespiau34bb56a2014-11-04 17:07:01 +00002963 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
Damien Lespiaub9cec072014-11-04 17:06:43 +00002964 uint16_t alloc_size, start, cursor_blocks;
Damien Lespiau80958152015-02-09 13:35:10 +00002965 uint16_t minimum[I915_MAX_PLANES];
Chandra Konduru2cd601c2015-04-27 15:47:37 -07002966 uint16_t y_minimum[I915_MAX_PLANES];
Damien Lespiaub9cec072014-11-04 17:06:43 +00002967 unsigned int total_data_rate;
2968 int plane;
2969
Damien Lespiau34bb56a2014-11-04 17:07:01 +00002970 skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
2971 alloc_size = skl_ddb_entry_size(alloc);
Damien Lespiaub9cec072014-11-04 17:06:43 +00002972 if (alloc_size == 0) {
2973 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
2974 memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
2975 return;
2976 }
2977
2978 cursor_blocks = skl_cursor_allocation(config);
Damien Lespiau34bb56a2014-11-04 17:07:01 +00002979 ddb->cursor[pipe].start = alloc->end - cursor_blocks;
2980 ddb->cursor[pipe].end = alloc->end;
Damien Lespiaub9cec072014-11-04 17:06:43 +00002981
2982 alloc_size -= cursor_blocks;
Damien Lespiau34bb56a2014-11-04 17:07:01 +00002983 alloc->end -= cursor_blocks;
Damien Lespiaub9cec072014-11-04 17:06:43 +00002984
Damien Lespiau80958152015-02-09 13:35:10 +00002985 /* 1. Allocate the mininum required blocks for each active plane */
Damien Lespiaudd740782015-02-28 14:54:08 +00002986 for_each_plane(dev_priv, pipe, plane) {
Damien Lespiau80958152015-02-09 13:35:10 +00002987 const struct intel_plane_wm_parameters *p;
2988
2989 p = &params->plane[plane];
2990 if (!p->enabled)
2991 continue;
2992
2993 minimum[plane] = 8;
2994 alloc_size -= minimum[plane];
Chandra Konduru2cd601c2015-04-27 15:47:37 -07002995 y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0;
2996 alloc_size -= y_minimum[plane];
Damien Lespiau80958152015-02-09 13:35:10 +00002997 }
2998
Damien Lespiaub9cec072014-11-04 17:06:43 +00002999 /*
Damien Lespiau80958152015-02-09 13:35:10 +00003000 * 2. Distribute the remaining space in proportion to the amount of
3001 * data each plane needs to fetch from memory.
Damien Lespiaub9cec072014-11-04 17:06:43 +00003002 *
3003 * FIXME: we may not allocate every single block here.
3004 */
3005 total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
3006
Damien Lespiau34bb56a2014-11-04 17:07:01 +00003007 start = alloc->start;
Damien Lespiaub9cec072014-11-04 17:06:43 +00003008 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
3009 const struct intel_plane_wm_parameters *p;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003010 unsigned int data_rate, y_data_rate;
3011 uint16_t plane_blocks, y_plane_blocks = 0;
Damien Lespiaub9cec072014-11-04 17:06:43 +00003012
3013 p = &params->plane[plane];
3014 if (!p->enabled)
3015 continue;
3016
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003017 data_rate = skl_plane_relative_data_rate(p, 0);
Damien Lespiaub9cec072014-11-04 17:06:43 +00003018
3019 /*
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003020 * allocation for (packed formats) or (uv-plane part of planar format):
Damien Lespiaub9cec072014-11-04 17:06:43 +00003021 * promote the expression to 64 bits to avoid overflowing, the
3022 * result is < available as data_rate / total_data_rate < 1
3023 */
Damien Lespiau80958152015-02-09 13:35:10 +00003024 plane_blocks = minimum[plane];
3025 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3026 total_data_rate);
Damien Lespiaub9cec072014-11-04 17:06:43 +00003027
3028 ddb->plane[pipe][plane].start = start;
Damien Lespiau16160e32014-11-04 17:06:53 +00003029 ddb->plane[pipe][plane].end = start + plane_blocks;
Damien Lespiaub9cec072014-11-04 17:06:43 +00003030
3031 start += plane_blocks;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003032
3033 /*
3034 * allocation for y_plane part of planar format:
3035 */
3036 if (p->y_bytes_per_pixel) {
3037 y_data_rate = skl_plane_relative_data_rate(p, 1);
3038 y_plane_blocks = y_minimum[plane];
3039 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3040 total_data_rate);
3041
3042 ddb->y_plane[pipe][plane].start = start;
3043 ddb->y_plane[pipe][plane].end = start + y_plane_blocks;
3044
3045 start += y_plane_blocks;
3046 }
3047
Damien Lespiaub9cec072014-11-04 17:06:43 +00003048 }
3049
3050}
3051
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02003052static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003053{
3054 /* TODO: Take into account the scalers once we support them */
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02003055 return config->base.adjusted_mode.crtc_clock;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003056}
3057
3058/*
3059 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3060 * for the read latency) and bytes_per_pixel should always be <= 8, so that
3061 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3062 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3063*/
3064static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
3065 uint32_t latency)
3066{
3067 uint32_t wm_intermediate_val, ret;
3068
3069 if (latency == 0)
3070 return UINT_MAX;
3071
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003072 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003073 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3074
3075 return ret;
3076}
3077
3078static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3079 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003080 uint64_t tiling, uint32_t latency)
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003081{
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003082 uint32_t ret;
3083 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3084 uint32_t wm_intermediate_val;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003085
3086 if (latency == 0)
3087 return UINT_MAX;
3088
3089 plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003090
3091 if (tiling == I915_FORMAT_MOD_Y_TILED ||
3092 tiling == I915_FORMAT_MOD_Yf_TILED) {
3093 plane_bytes_per_line *= 4;
3094 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3095 plane_blocks_per_line /= 4;
3096 } else {
3097 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3098 }
3099
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003100 wm_intermediate_val = latency * pixel_rate;
3101 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003102 plane_blocks_per_line;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003103
3104 return ret;
3105}
3106
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003107static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
3108 const struct intel_crtc *intel_crtc)
3109{
3110 struct drm_device *dev = intel_crtc->base.dev;
3111 struct drm_i915_private *dev_priv = dev->dev_private;
3112 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3113 enum pipe pipe = intel_crtc->pipe;
3114
3115 if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
3116 sizeof(new_ddb->plane[pipe])))
3117 return true;
3118
3119 if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
3120 sizeof(new_ddb->cursor[pipe])))
3121 return true;
3122
3123 return false;
3124}
3125
3126static void skl_compute_wm_global_parameters(struct drm_device *dev,
3127 struct intel_wm_config *config)
3128{
3129 struct drm_crtc *crtc;
3130 struct drm_plane *plane;
3131
3132 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
Matt Roper3ef00282015-03-09 10:19:24 -07003133 config->num_pipes_active += to_intel_crtc(crtc)->active;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003134
3135 /* FIXME: I don't think we need those two global parameters on SKL */
3136 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3137 struct intel_plane *intel_plane = to_intel_plane(plane);
3138
3139 config->sprites_enabled |= intel_plane->wm.enabled;
3140 config->sprites_scaled |= intel_plane->wm.scaled;
3141 }
3142}
3143
3144static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
3145 struct skl_pipe_wm_parameters *p)
3146{
3147 struct drm_device *dev = crtc->dev;
3148 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3149 enum pipe pipe = intel_crtc->pipe;
3150 struct drm_plane *plane;
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003151 struct drm_framebuffer *fb;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003152 int i = 1; /* Index for sprite planes start */
3153
Matt Roper3ef00282015-03-09 10:19:24 -07003154 p->active = intel_crtc->active;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003155 if (p->active) {
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02003156 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
3157 p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003158
Matt Roperc9f038a2015-03-09 11:06:02 -07003159 fb = crtc->primary->state->fb;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003160 /* For planar: Bpp is for uv plane, y_Bpp is for y plane */
Matt Roperc9f038a2015-03-09 11:06:02 -07003161 if (fb) {
3162 p->plane[0].enabled = true;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003163 p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
3164 drm_format_plane_cpp(fb->pixel_format, 1) : fb->bits_per_pixel / 8;
3165 p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
3166 drm_format_plane_cpp(fb->pixel_format, 0) : 0;
Matt Roperc9f038a2015-03-09 11:06:02 -07003167 p->plane[0].tiling = fb->modifier[0];
3168 } else {
3169 p->plane[0].enabled = false;
3170 p->plane[0].bytes_per_pixel = 0;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003171 p->plane[0].y_bytes_per_pixel = 0;
Matt Roperc9f038a2015-03-09 11:06:02 -07003172 p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
3173 }
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02003174 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
3175 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
Tvrtko Ursulin1fc0a8f2015-03-23 11:10:38 +00003176 p->plane[0].rotation = crtc->primary->state->rotation;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003177
Matt Roperc9f038a2015-03-09 11:06:02 -07003178 fb = crtc->cursor->state->fb;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003179 p->cursor.y_bytes_per_pixel = 0;
Matt Roperc9f038a2015-03-09 11:06:02 -07003180 if (fb) {
3181 p->cursor.enabled = true;
3182 p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8;
3183 p->cursor.horiz_pixels = crtc->cursor->state->crtc_w;
3184 p->cursor.vert_pixels = crtc->cursor->state->crtc_h;
3185 } else {
3186 p->cursor.enabled = false;
3187 p->cursor.bytes_per_pixel = 0;
3188 p->cursor.horiz_pixels = 64;
3189 p->cursor.vert_pixels = 64;
3190 }
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003191 }
3192
3193 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3194 struct intel_plane *intel_plane = to_intel_plane(plane);
3195
Sonika Jindala712f8e2014-12-09 10:59:15 +05303196 if (intel_plane->pipe == pipe &&
3197 plane->type == DRM_PLANE_TYPE_OVERLAY)
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003198 p->plane[i++] = intel_plane->wm;
3199 }
3200}
3201
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003202static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3203 struct skl_pipe_wm_parameters *p,
Damien Lespiauafb024a2014-11-04 17:06:59 +00003204 struct intel_plane_wm_parameters *p_params,
3205 uint16_t ddb_allocation,
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003206 int level,
Damien Lespiauafb024a2014-11-04 17:06:59 +00003207 uint16_t *out_blocks, /* out */
3208 uint8_t *out_lines /* out */)
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003209{
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003210 uint32_t latency = dev_priv->wm.skl_latency[level];
3211 uint32_t method1, method2;
3212 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3213 uint32_t res_blocks, res_lines;
3214 uint32_t selected_result;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003215 uint8_t bytes_per_pixel;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003216
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003217 if (latency == 0 || !p->active || !p_params->enabled)
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003218 return false;
3219
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003220 bytes_per_pixel = p_params->y_bytes_per_pixel ?
3221 p_params->y_bytes_per_pixel :
3222 p_params->bytes_per_pixel;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003223 method1 = skl_wm_method1(p->pixel_rate,
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003224 bytes_per_pixel,
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003225 latency);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003226 method2 = skl_wm_method2(p->pixel_rate,
3227 p->pipe_htotal,
3228 p_params->horiz_pixels,
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003229 bytes_per_pixel,
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003230 p_params->tiling,
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003231 latency);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003232
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003233 plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel;
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003234 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003235
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003236 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
3237 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
Tvrtko Ursulin1fc0a8f2015-03-23 11:10:38 +00003238 uint32_t min_scanlines = 4;
3239 uint32_t y_tile_minimum;
3240 if (intel_rotation_90_or_270(p_params->rotation)) {
3241 switch (p_params->bytes_per_pixel) {
3242 case 1:
3243 min_scanlines = 16;
3244 break;
3245 case 2:
3246 min_scanlines = 8;
3247 break;
3248 case 8:
3249 WARN(1, "Unsupported pixel depth for rotation");
kbuild test robot2f0b5792015-03-26 22:30:21 +08003250 }
Tvrtko Ursulin1fc0a8f2015-03-23 11:10:38 +00003251 }
3252 y_tile_minimum = plane_blocks_per_line * min_scanlines;
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003253 selected_result = max(method2, y_tile_minimum);
3254 } else {
3255 if ((ddb_allocation / plane_blocks_per_line) >= 1)
3256 selected_result = min(method1, method2);
3257 else
3258 selected_result = method1;
3259 }
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003260
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003261 res_blocks = selected_result + 1;
3262 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
Damien Lespiaue6d66172014-11-04 17:06:55 +00003263
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003264 if (level >= 1 && level <= 7) {
3265 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
3266 p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
3267 res_lines += 4;
3268 else
3269 res_blocks++;
3270 }
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003271
3272 if (res_blocks >= ddb_allocation || res_lines > 31)
Damien Lespiaue6d66172014-11-04 17:06:55 +00003273 return false;
3274
3275 *out_blocks = res_blocks;
3276 *out_lines = res_lines;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003277
3278 return true;
3279}
3280
3281static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3282 struct skl_ddb_allocation *ddb,
3283 struct skl_pipe_wm_parameters *p,
3284 enum pipe pipe,
3285 int level,
3286 int num_planes,
3287 struct skl_wm_level *result)
3288{
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003289 uint16_t ddb_blocks;
3290 int i;
3291
3292 for (i = 0; i < num_planes; i++) {
3293 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3294
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003295 result->plane_en[i] = skl_compute_plane_wm(dev_priv,
3296 p, &p->plane[i],
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003297 ddb_blocks,
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003298 level,
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003299 &result->plane_res_b[i],
3300 &result->plane_res_l[i]);
3301 }
3302
3303 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003304 result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor,
3305 ddb_blocks, level,
3306 &result->cursor_res_b,
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003307 &result->cursor_res_l);
3308}
3309
Damien Lespiau407b50f2014-11-04 17:06:57 +00003310static uint32_t
3311skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
3312{
Matt Roper3ef00282015-03-09 10:19:24 -07003313 if (!to_intel_crtc(crtc)->active)
Damien Lespiau407b50f2014-11-04 17:06:57 +00003314 return 0;
3315
3316 return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
3317
3318}
3319
3320static void skl_compute_transition_wm(struct drm_crtc *crtc,
3321 struct skl_pipe_wm_parameters *params,
Damien Lespiau9414f562014-11-04 17:06:58 +00003322 struct skl_wm_level *trans_wm /* out */)
Damien Lespiau407b50f2014-11-04 17:06:57 +00003323{
Damien Lespiau9414f562014-11-04 17:06:58 +00003324 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3325 int i;
3326
Damien Lespiau407b50f2014-11-04 17:06:57 +00003327 if (!params->active)
3328 return;
Damien Lespiau9414f562014-11-04 17:06:58 +00003329
3330 /* Until we know more, just disable transition WMs */
3331 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3332 trans_wm->plane_en[i] = false;
3333 trans_wm->cursor_en = false;
Damien Lespiau407b50f2014-11-04 17:06:57 +00003334}
3335
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003336static void skl_compute_pipe_wm(struct drm_crtc *crtc,
3337 struct skl_ddb_allocation *ddb,
3338 struct skl_pipe_wm_parameters *params,
3339 struct skl_pipe_wm *pipe_wm)
3340{
3341 struct drm_device *dev = crtc->dev;
3342 const struct drm_i915_private *dev_priv = dev->dev_private;
3343 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3344 int level, max_level = ilk_wm_max_level(dev);
3345
3346 for (level = 0; level <= max_level; level++) {
3347 skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
3348 level, intel_num_planes(intel_crtc),
3349 &pipe_wm->wm[level]);
3350 }
3351 pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
3352
Damien Lespiau9414f562014-11-04 17:06:58 +00003353 skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003354}
3355
3356static void skl_compute_wm_results(struct drm_device *dev,
3357 struct skl_pipe_wm_parameters *p,
3358 struct skl_pipe_wm *p_wm,
3359 struct skl_wm_values *r,
3360 struct intel_crtc *intel_crtc)
3361{
3362 int level, max_level = ilk_wm_max_level(dev);
3363 enum pipe pipe = intel_crtc->pipe;
Damien Lespiau9414f562014-11-04 17:06:58 +00003364 uint32_t temp;
3365 int i;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003366
3367 for (level = 0; level <= max_level; level++) {
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003368 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3369 temp = 0;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003370
3371 temp |= p_wm->wm[level].plane_res_l[i] <<
3372 PLANE_WM_LINES_SHIFT;
3373 temp |= p_wm->wm[level].plane_res_b[i];
3374 if (p_wm->wm[level].plane_en[i])
3375 temp |= PLANE_WM_EN;
3376
3377 r->plane[pipe][i][level] = temp;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003378 }
3379
3380 temp = 0;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003381
3382 temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
3383 temp |= p_wm->wm[level].cursor_res_b;
3384
3385 if (p_wm->wm[level].cursor_en)
3386 temp |= PLANE_WM_EN;
3387
3388 r->cursor[pipe][level] = temp;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003389
3390 }
3391
Damien Lespiau9414f562014-11-04 17:06:58 +00003392 /* transition WMs */
3393 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3394 temp = 0;
3395 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3396 temp |= p_wm->trans_wm.plane_res_b[i];
3397 if (p_wm->trans_wm.plane_en[i])
3398 temp |= PLANE_WM_EN;
3399
3400 r->plane_trans[pipe][i] = temp;
3401 }
3402
3403 temp = 0;
3404 temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
3405 temp |= p_wm->trans_wm.cursor_res_b;
3406 if (p_wm->trans_wm.cursor_en)
3407 temp |= PLANE_WM_EN;
3408
3409 r->cursor_trans[pipe] = temp;
3410
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003411 r->wm_linetime[pipe] = p_wm->linetime;
3412}
3413
Damien Lespiau16160e32014-11-04 17:06:53 +00003414static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
3415 const struct skl_ddb_entry *entry)
3416{
3417 if (entry->end)
3418 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3419 else
3420 I915_WRITE(reg, 0);
3421}
3422
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003423static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3424 const struct skl_wm_values *new)
3425{
3426 struct drm_device *dev = dev_priv->dev;
3427 struct intel_crtc *crtc;
3428
3429 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
3430 int i, level, max_level = ilk_wm_max_level(dev);
3431 enum pipe pipe = crtc->pipe;
3432
Damien Lespiau5d374d92014-11-04 17:07:00 +00003433 if (!new->dirty[pipe])
3434 continue;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003435
Damien Lespiau5d374d92014-11-04 17:07:00 +00003436 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
3437
3438 for (level = 0; level <= max_level; level++) {
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003439 for (i = 0; i < intel_num_planes(crtc); i++)
Damien Lespiau5d374d92014-11-04 17:07:00 +00003440 I915_WRITE(PLANE_WM(pipe, i, level),
3441 new->plane[pipe][i][level]);
3442 I915_WRITE(CUR_WM(pipe, level),
3443 new->cursor[pipe][level]);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003444 }
Damien Lespiau5d374d92014-11-04 17:07:00 +00003445 for (i = 0; i < intel_num_planes(crtc); i++)
3446 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3447 new->plane_trans[pipe][i]);
3448 I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
3449
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003450 for (i = 0; i < intel_num_planes(crtc); i++) {
Damien Lespiau5d374d92014-11-04 17:07:00 +00003451 skl_ddb_entry_write(dev_priv,
3452 PLANE_BUF_CFG(pipe, i),
3453 &new->ddb.plane[pipe][i]);
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003454 skl_ddb_entry_write(dev_priv,
3455 PLANE_NV12_BUF_CFG(pipe, i),
3456 &new->ddb.y_plane[pipe][i]);
3457 }
Damien Lespiau5d374d92014-11-04 17:07:00 +00003458
3459 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3460 &new->ddb.cursor[pipe]);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003461 }
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003462}
3463
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003464/*
3465 * When setting up a new DDB allocation arrangement, we need to correctly
3466 * sequence the times at which the new allocations for the pipes are taken into
3467 * account or we'll have pipes fetching from space previously allocated to
3468 * another pipe.
3469 *
3470 * Roughly the sequence looks like:
3471 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3472 * overlapping with a previous light-up pipe (another way to put it is:
3473 * pipes with their new allocation strickly included into their old ones).
3474 * 2. re-allocate the other pipes that get their allocation reduced
3475 * 3. allocate the pipes having their allocation increased
3476 *
3477 * Steps 1. and 2. are here to take care of the following case:
3478 * - Initially DDB looks like this:
3479 * | B | C |
3480 * - enable pipe A.
3481 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3482 * allocation
3483 * | A | B | C |
3484 *
3485 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3486 */
3487
Damien Lespiaud21b7952014-11-04 17:07:03 +00003488static void
3489skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003490{
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003491 int plane;
3492
Damien Lespiaud21b7952014-11-04 17:07:03 +00003493 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
3494
Damien Lespiaudd740782015-02-28 14:54:08 +00003495 for_each_plane(dev_priv, pipe, plane) {
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003496 I915_WRITE(PLANE_SURF(pipe, plane),
3497 I915_READ(PLANE_SURF(pipe, plane)));
3498 }
3499 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3500}
3501
3502static bool
3503skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3504 const struct skl_ddb_allocation *new,
3505 enum pipe pipe)
3506{
3507 uint16_t old_size, new_size;
3508
3509 old_size = skl_ddb_entry_size(&old->pipe[pipe]);
3510 new_size = skl_ddb_entry_size(&new->pipe[pipe]);
3511
3512 return old_size != new_size &&
3513 new->pipe[pipe].start >= old->pipe[pipe].start &&
3514 new->pipe[pipe].end <= old->pipe[pipe].end;
3515}
3516
3517static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3518 struct skl_wm_values *new_values)
3519{
3520 struct drm_device *dev = dev_priv->dev;
3521 struct skl_ddb_allocation *cur_ddb, *new_ddb;
Ville Syrjäläc929cb42015-04-02 18:28:07 +03003522 bool reallocated[I915_MAX_PIPES] = {};
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003523 struct intel_crtc *crtc;
3524 enum pipe pipe;
3525
3526 new_ddb = &new_values->ddb;
3527 cur_ddb = &dev_priv->wm.skl_hw.ddb;
3528
3529 /*
3530 * First pass: flush the pipes with the new allocation contained into
3531 * the old space.
3532 *
3533 * We'll wait for the vblank on those pipes to ensure we can safely
3534 * re-allocate the freed space without this pipe fetching from it.
3535 */
3536 for_each_intel_crtc(dev, crtc) {
3537 if (!crtc->active)
3538 continue;
3539
3540 pipe = crtc->pipe;
3541
3542 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
3543 continue;
3544
Damien Lespiaud21b7952014-11-04 17:07:03 +00003545 skl_wm_flush_pipe(dev_priv, pipe, 1);
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003546 intel_wait_for_vblank(dev, pipe);
3547
3548 reallocated[pipe] = true;
3549 }
3550
3551
3552 /*
3553 * Second pass: flush the pipes that are having their allocation
3554 * reduced, but overlapping with a previous allocation.
3555 *
3556 * Here as well we need to wait for the vblank to make sure the freed
3557 * space is not used anymore.
3558 */
3559 for_each_intel_crtc(dev, crtc) {
3560 if (!crtc->active)
3561 continue;
3562
3563 pipe = crtc->pipe;
3564
3565 if (reallocated[pipe])
3566 continue;
3567
3568 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3569 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
Damien Lespiaud21b7952014-11-04 17:07:03 +00003570 skl_wm_flush_pipe(dev_priv, pipe, 2);
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003571 intel_wait_for_vblank(dev, pipe);
Sonika Jindald9d8e6b2014-12-11 17:58:15 +05303572 reallocated[pipe] = true;
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003573 }
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003574 }
3575
3576 /*
3577 * Third pass: flush the pipes that got more space allocated.
3578 *
3579 * We don't need to actively wait for the update here, next vblank
3580 * will just get more DDB space with the correct WM values.
3581 */
3582 for_each_intel_crtc(dev, crtc) {
3583 if (!crtc->active)
3584 continue;
3585
3586 pipe = crtc->pipe;
3587
3588 /*
3589 * At this point, only the pipes more space than before are
3590 * left to re-allocate.
3591 */
3592 if (reallocated[pipe])
3593 continue;
3594
Damien Lespiaud21b7952014-11-04 17:07:03 +00003595 skl_wm_flush_pipe(dev_priv, pipe, 3);
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003596 }
3597}
3598
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003599static bool skl_update_pipe_wm(struct drm_crtc *crtc,
3600 struct skl_pipe_wm_parameters *params,
3601 struct intel_wm_config *config,
3602 struct skl_ddb_allocation *ddb, /* out */
3603 struct skl_pipe_wm *pipe_wm /* out */)
3604{
3605 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3606
3607 skl_compute_wm_pipe_parameters(crtc, params);
Damien Lespiaub9cec072014-11-04 17:06:43 +00003608 skl_allocate_pipe_ddb(crtc, config, params, ddb);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003609 skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
3610
3611 if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
3612 return false;
3613
3614 intel_crtc->wm.skl_active = *pipe_wm;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003615
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003616 return true;
3617}
3618
3619static void skl_update_other_pipe_wm(struct drm_device *dev,
3620 struct drm_crtc *crtc,
3621 struct intel_wm_config *config,
3622 struct skl_wm_values *r)
3623{
3624 struct intel_crtc *intel_crtc;
3625 struct intel_crtc *this_crtc = to_intel_crtc(crtc);
3626
3627 /*
3628 * If the WM update hasn't changed the allocation for this_crtc (the
3629 * crtc we are currently computing the new WM values for), other
3630 * enabled crtcs will keep the same allocation and we don't need to
3631 * recompute anything for them.
3632 */
3633 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
3634 return;
3635
3636 /*
3637 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3638 * other active pipes need new DDB allocation and WM values.
3639 */
3640 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
3641 base.head) {
3642 struct skl_pipe_wm_parameters params = {};
3643 struct skl_pipe_wm pipe_wm = {};
3644 bool wm_changed;
3645
3646 if (this_crtc->pipe == intel_crtc->pipe)
3647 continue;
3648
3649 if (!intel_crtc->active)
3650 continue;
3651
3652 wm_changed = skl_update_pipe_wm(&intel_crtc->base,
3653 &params, config,
3654 &r->ddb, &pipe_wm);
3655
3656 /*
3657 * If we end up re-computing the other pipe WM values, it's
3658 * because it was really needed, so we expect the WM values to
3659 * be different.
3660 */
3661 WARN_ON(!wm_changed);
3662
3663 skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc);
3664 r->dirty[intel_crtc->pipe] = true;
3665 }
3666}
3667
3668static void skl_update_wm(struct drm_crtc *crtc)
3669{
3670 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3671 struct drm_device *dev = crtc->dev;
3672 struct drm_i915_private *dev_priv = dev->dev_private;
3673 struct skl_pipe_wm_parameters params = {};
3674 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3675 struct skl_pipe_wm pipe_wm = {};
3676 struct intel_wm_config config = {};
3677
3678 memset(results, 0, sizeof(*results));
3679
3680 skl_compute_wm_global_parameters(dev, &config);
3681
3682 if (!skl_update_pipe_wm(crtc, &params, &config,
3683 &results->ddb, &pipe_wm))
3684 return;
3685
3686 skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc);
3687 results->dirty[intel_crtc->pipe] = true;
3688
3689 skl_update_other_pipe_wm(dev, crtc, &config, results);
3690 skl_write_wm_values(dev_priv, results);
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003691 skl_flush_wm_values(dev_priv, results);
Damien Lespiau53b0deb2014-11-04 17:06:48 +00003692
3693 /* store the new configuration */
3694 dev_priv->wm.skl_hw = *results;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003695}
3696
3697static void
3698skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3699 uint32_t sprite_width, uint32_t sprite_height,
3700 int pixel_size, bool enabled, bool scaled)
3701{
3702 struct intel_plane *intel_plane = to_intel_plane(plane);
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003703 struct drm_framebuffer *fb = plane->state->fb;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003704
3705 intel_plane->wm.enabled = enabled;
3706 intel_plane->wm.scaled = scaled;
3707 intel_plane->wm.horiz_pixels = sprite_width;
3708 intel_plane->wm.vert_pixels = sprite_height;
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003709 intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003710
3711 /* For planar: Bpp is for UV plane, y_Bpp is for Y plane */
3712 intel_plane->wm.bytes_per_pixel =
3713 (fb && fb->pixel_format == DRM_FORMAT_NV12) ?
3714 drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size;
3715 intel_plane->wm.y_bytes_per_pixel =
3716 (fb && fb->pixel_format == DRM_FORMAT_NV12) ?
3717 drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0;
3718
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003719 /*
3720 * Framebuffer can be NULL on plane disable, but it does not
3721 * matter for watermarks if we assume no tiling in that case.
3722 */
3723 if (fb)
3724 intel_plane->wm.tiling = fb->modifier[0];
Tvrtko Ursulin1fc0a8f2015-03-23 11:10:38 +00003725 intel_plane->wm.rotation = plane->state->rotation;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003726
3727 skl_update_wm(crtc);
3728}
3729
Imre Deak820c1982013-12-17 14:46:36 +02003730static void ilk_update_wm(struct drm_crtc *crtc)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03003731{
Ville Syrjälä7c4a3952013-10-09 19:17:56 +03003732 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjälä46ba6142013-09-10 11:40:40 +03003733 struct drm_device *dev = crtc->dev;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03003734 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak820c1982013-12-17 14:46:36 +02003735 struct ilk_wm_maximums max;
3736 struct ilk_pipe_wm_parameters params = {};
3737 struct ilk_wm_values results = {};
Ville Syrjälä77c122b2013-08-06 22:24:04 +03003738 enum intel_ddb_partitioning partitioning;
Ville Syrjälä7c4a3952013-10-09 19:17:56 +03003739 struct intel_pipe_wm pipe_wm = {};
Ville Syrjälä198a1e92013-10-09 19:17:58 +03003740 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
Ville Syrjäläa485bfb2013-10-09 19:17:59 +03003741 struct intel_wm_config config = {};
Paulo Zanoni801bcff2013-05-31 10:08:35 -03003742
Ville Syrjälä2a44b762014-03-07 18:32:09 +02003743 ilk_compute_wm_parameters(crtc, &params);
Paulo Zanoni861f3382013-05-31 10:19:21 -03003744
Ville Syrjälä7c4a3952013-10-09 19:17:56 +03003745 intel_compute_pipe_wm(crtc, &params, &pipe_wm);
3746
3747 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
3748 return;
3749
3750 intel_crtc->wm.active = pipe_wm;
3751
Ville Syrjälä2a44b762014-03-07 18:32:09 +02003752 ilk_compute_wm_config(dev, &config);
3753
Ville Syrjälä34982fe2013-10-09 19:18:09 +03003754 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
Ville Syrjälä0ba22e22013-12-05 15:51:34 +02003755 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
Ville Syrjälä0362c782013-10-09 19:17:57 +03003756
Ville Syrjäläa485bfb2013-10-09 19:17:59 +03003757 /* 5/6 split only in single pipe config on IVB+ */
Ville Syrjäläec98c8d2013-10-11 15:26:26 +03003758 if (INTEL_INFO(dev)->gen >= 7 &&
3759 config.num_pipes_active == 1 && config.sprites_enabled) {
Ville Syrjälä34982fe2013-10-09 19:18:09 +03003760 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
Ville Syrjälä0ba22e22013-12-05 15:51:34 +02003761 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
Ville Syrjäläa485bfb2013-10-09 19:17:59 +03003762
Imre Deak820c1982013-12-17 14:46:36 +02003763 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
Paulo Zanoni861f3382013-05-31 10:19:21 -03003764 } else {
Ville Syrjälä198a1e92013-10-09 19:17:58 +03003765 best_lp_wm = &lp_wm_1_2;
Paulo Zanoni861f3382013-05-31 10:19:21 -03003766 }
3767
Ville Syrjälä198a1e92013-10-09 19:17:58 +03003768 partitioning = (best_lp_wm == &lp_wm_1_2) ?
Ville Syrjälä77c122b2013-08-06 22:24:04 +03003769 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
Paulo Zanoni861f3382013-05-31 10:19:21 -03003770
Imre Deak820c1982013-12-17 14:46:36 +02003771 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
Ville Syrjälä609cede2013-10-09 19:18:03 +03003772
Imre Deak820c1982013-12-17 14:46:36 +02003773 ilk_write_wm_values(dev_priv, &results);
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03003774}
3775
Damien Lespiaued57cb82014-07-15 09:21:24 +02003776static void
3777ilk_update_sprite_wm(struct drm_plane *plane,
3778 struct drm_crtc *crtc,
3779 uint32_t sprite_width, uint32_t sprite_height,
3780 int pixel_size, bool enabled, bool scaled)
Paulo Zanoni526682e2013-05-24 11:59:18 -03003781{
Ville Syrjälä8553c182013-12-05 15:51:39 +02003782 struct drm_device *dev = plane->dev;
Ville Syrjäläadf3d352013-08-06 22:24:11 +03003783 struct intel_plane *intel_plane = to_intel_plane(plane);
Paulo Zanoni526682e2013-05-24 11:59:18 -03003784
Ville Syrjäläadf3d352013-08-06 22:24:11 +03003785 intel_plane->wm.enabled = enabled;
3786 intel_plane->wm.scaled = scaled;
3787 intel_plane->wm.horiz_pixels = sprite_width;
Damien Lespiaued57cb82014-07-15 09:21:24 +02003788 intel_plane->wm.vert_pixels = sprite_width;
Ville Syrjäläadf3d352013-08-06 22:24:11 +03003789 intel_plane->wm.bytes_per_pixel = pixel_size;
Paulo Zanoni526682e2013-05-24 11:59:18 -03003790
Ville Syrjälä8553c182013-12-05 15:51:39 +02003791 /*
3792 * IVB workaround: must disable low power watermarks for at least
3793 * one frame before enabling scaling. LP watermarks can be re-enabled
3794 * when scaling is disabled.
3795 *
3796 * WaCxSRDisabledForSpriteScaling:ivb
3797 */
3798 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
3799 intel_wait_for_vblank(dev, intel_plane->pipe);
3800
Imre Deak820c1982013-12-17 14:46:36 +02003801 ilk_update_wm(crtc);
Paulo Zanoni526682e2013-05-24 11:59:18 -03003802}
3803
Pradeep Bhat30789992014-11-04 17:06:45 +00003804static void skl_pipe_wm_active_state(uint32_t val,
3805 struct skl_pipe_wm *active,
3806 bool is_transwm,
3807 bool is_cursor,
3808 int i,
3809 int level)
3810{
3811 bool is_enabled = (val & PLANE_WM_EN) != 0;
3812
3813 if (!is_transwm) {
3814 if (!is_cursor) {
3815 active->wm[level].plane_en[i] = is_enabled;
3816 active->wm[level].plane_res_b[i] =
3817 val & PLANE_WM_BLOCKS_MASK;
3818 active->wm[level].plane_res_l[i] =
3819 (val >> PLANE_WM_LINES_SHIFT) &
3820 PLANE_WM_LINES_MASK;
3821 } else {
3822 active->wm[level].cursor_en = is_enabled;
3823 active->wm[level].cursor_res_b =
3824 val & PLANE_WM_BLOCKS_MASK;
3825 active->wm[level].cursor_res_l =
3826 (val >> PLANE_WM_LINES_SHIFT) &
3827 PLANE_WM_LINES_MASK;
3828 }
3829 } else {
3830 if (!is_cursor) {
3831 active->trans_wm.plane_en[i] = is_enabled;
3832 active->trans_wm.plane_res_b[i] =
3833 val & PLANE_WM_BLOCKS_MASK;
3834 active->trans_wm.plane_res_l[i] =
3835 (val >> PLANE_WM_LINES_SHIFT) &
3836 PLANE_WM_LINES_MASK;
3837 } else {
3838 active->trans_wm.cursor_en = is_enabled;
3839 active->trans_wm.cursor_res_b =
3840 val & PLANE_WM_BLOCKS_MASK;
3841 active->trans_wm.cursor_res_l =
3842 (val >> PLANE_WM_LINES_SHIFT) &
3843 PLANE_WM_LINES_MASK;
3844 }
3845 }
3846}
3847
3848static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3849{
3850 struct drm_device *dev = crtc->dev;
3851 struct drm_i915_private *dev_priv = dev->dev_private;
3852 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3853 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3854 struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
3855 enum pipe pipe = intel_crtc->pipe;
3856 int level, i, max_level;
3857 uint32_t temp;
3858
3859 max_level = ilk_wm_max_level(dev);
3860
3861 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3862
3863 for (level = 0; level <= max_level; level++) {
3864 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3865 hw->plane[pipe][i][level] =
3866 I915_READ(PLANE_WM(pipe, i, level));
3867 hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
3868 }
3869
3870 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3871 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
3872 hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
3873
Matt Roper3ef00282015-03-09 10:19:24 -07003874 if (!intel_crtc->active)
Pradeep Bhat30789992014-11-04 17:06:45 +00003875 return;
3876
3877 hw->dirty[pipe] = true;
3878
3879 active->linetime = hw->wm_linetime[pipe];
3880
3881 for (level = 0; level <= max_level; level++) {
3882 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3883 temp = hw->plane[pipe][i][level];
3884 skl_pipe_wm_active_state(temp, active, false,
3885 false, i, level);
3886 }
3887 temp = hw->cursor[pipe][level];
3888 skl_pipe_wm_active_state(temp, active, false, true, i, level);
3889 }
3890
3891 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3892 temp = hw->plane_trans[pipe][i];
3893 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
3894 }
3895
3896 temp = hw->cursor_trans[pipe];
3897 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3898}
3899
3900void skl_wm_get_hw_state(struct drm_device *dev)
3901{
Damien Lespiaua269c582014-11-04 17:06:49 +00003902 struct drm_i915_private *dev_priv = dev->dev_private;
3903 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
Pradeep Bhat30789992014-11-04 17:06:45 +00003904 struct drm_crtc *crtc;
3905
Damien Lespiaua269c582014-11-04 17:06:49 +00003906 skl_ddb_get_hw_state(dev_priv, ddb);
Pradeep Bhat30789992014-11-04 17:06:45 +00003907 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3908 skl_pipe_wm_get_hw_state(crtc);
3909}
3910
Ville Syrjälä243e6a42013-10-14 14:55:24 +03003911static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3912{
3913 struct drm_device *dev = crtc->dev;
3914 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak820c1982013-12-17 14:46:36 +02003915 struct ilk_wm_values *hw = &dev_priv->wm.hw;
Ville Syrjälä243e6a42013-10-14 14:55:24 +03003916 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3917 struct intel_pipe_wm *active = &intel_crtc->wm.active;
3918 enum pipe pipe = intel_crtc->pipe;
3919 static const unsigned int wm0_pipe_reg[] = {
3920 [PIPE_A] = WM0_PIPEA_ILK,
3921 [PIPE_B] = WM0_PIPEB_ILK,
3922 [PIPE_C] = WM0_PIPEC_IVB,
3923 };
3924
3925 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
Ville Syrjäläa42a5712014-01-07 16:14:08 +02003926 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjäläce0e0712013-12-05 15:51:36 +02003927 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
Ville Syrjälä243e6a42013-10-14 14:55:24 +03003928
Matt Roper3ef00282015-03-09 10:19:24 -07003929 active->pipe_enabled = intel_crtc->active;
Ville Syrjälä2a44b762014-03-07 18:32:09 +02003930
3931 if (active->pipe_enabled) {
Ville Syrjälä243e6a42013-10-14 14:55:24 +03003932 u32 tmp = hw->wm_pipe[pipe];
3933
3934 /*
3935 * For active pipes LP0 watermark is marked as
3936 * enabled, and LP1+ watermaks as disabled since
3937 * we can't really reverse compute them in case
3938 * multiple pipes are active.
3939 */
3940 active->wm[0].enable = true;
3941 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
3942 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
3943 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
3944 active->linetime = hw->wm_linetime[pipe];
3945 } else {
3946 int level, max_level = ilk_wm_max_level(dev);
3947
3948 /*
3949 * For inactive pipes, all watermark levels
3950 * should be marked as enabled but zeroed,
3951 * which is what we'd compute them to.
3952 */
3953 for (level = 0; level <= max_level; level++)
3954 active->wm[level].enable = true;
3955 }
3956}
3957
Ville Syrjälä6eb1a682015-06-24 22:00:03 +03003958#define _FW_WM(value, plane) \
3959 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
3960#define _FW_WM_VLV(value, plane) \
3961 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
3962
3963static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
3964 struct vlv_wm_values *wm)
3965{
3966 enum pipe pipe;
3967 uint32_t tmp;
3968
3969 for_each_pipe(dev_priv, pipe) {
3970 tmp = I915_READ(VLV_DDL(pipe));
3971
3972 wm->ddl[pipe].primary =
3973 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3974 wm->ddl[pipe].cursor =
3975 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3976 wm->ddl[pipe].sprite[0] =
3977 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3978 wm->ddl[pipe].sprite[1] =
3979 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3980 }
3981
3982 tmp = I915_READ(DSPFW1);
3983 wm->sr.plane = _FW_WM(tmp, SR);
3984 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
3985 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
3986 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
3987
3988 tmp = I915_READ(DSPFW2);
3989 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
3990 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
3991 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
3992
3993 tmp = I915_READ(DSPFW3);
3994 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
3995
3996 if (IS_CHERRYVIEW(dev_priv)) {
3997 tmp = I915_READ(DSPFW7_CHV);
3998 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
3999 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4000
4001 tmp = I915_READ(DSPFW8_CHV);
4002 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
4003 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
4004
4005 tmp = I915_READ(DSPFW9_CHV);
4006 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
4007 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
4008
4009 tmp = I915_READ(DSPHOWM);
4010 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4011 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
4012 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
4013 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
4014 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4015 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4016 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4017 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4018 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4019 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4020 } else {
4021 tmp = I915_READ(DSPFW7);
4022 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4023 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4024
4025 tmp = I915_READ(DSPHOWM);
4026 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4027 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4028 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4029 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4030 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4031 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4032 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4033 }
4034}
4035
4036#undef _FW_WM
4037#undef _FW_WM_VLV
4038
4039void vlv_wm_get_hw_state(struct drm_device *dev)
4040{
4041 struct drm_i915_private *dev_priv = to_i915(dev);
4042 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
4043 struct intel_plane *plane;
4044 enum pipe pipe;
4045 u32 val;
4046
4047 vlv_read_wm_values(dev_priv, wm);
4048
4049 for_each_intel_plane(dev, plane) {
4050 switch (plane->base.type) {
4051 int sprite;
4052 case DRM_PLANE_TYPE_CURSOR:
4053 plane->wm.fifo_size = 63;
4054 break;
4055 case DRM_PLANE_TYPE_PRIMARY:
4056 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
4057 break;
4058 case DRM_PLANE_TYPE_OVERLAY:
4059 sprite = plane->plane;
4060 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
4061 break;
4062 }
4063 }
4064
4065 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4066 wm->level = VLV_WM_LEVEL_PM2;
4067
4068 if (IS_CHERRYVIEW(dev_priv)) {
4069 mutex_lock(&dev_priv->rps.hw_lock);
4070
4071 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4072 if (val & DSP_MAXFIFO_PM5_ENABLE)
4073 wm->level = VLV_WM_LEVEL_PM5;
4074
4075 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4076 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4077 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4078
4079 mutex_unlock(&dev_priv->rps.hw_lock);
4080 }
4081
4082 for_each_pipe(dev_priv, pipe)
4083 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4084 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
4085 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
4086
4087 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4088 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4089}
4090
Ville Syrjälä243e6a42013-10-14 14:55:24 +03004091void ilk_wm_get_hw_state(struct drm_device *dev)
4092{
4093 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak820c1982013-12-17 14:46:36 +02004094 struct ilk_wm_values *hw = &dev_priv->wm.hw;
Ville Syrjälä243e6a42013-10-14 14:55:24 +03004095 struct drm_crtc *crtc;
4096
Damien Lespiau70e1e0e2014-05-13 23:32:24 +01004097 for_each_crtc(dev, crtc)
Ville Syrjälä243e6a42013-10-14 14:55:24 +03004098 ilk_pipe_wm_get_hw_state(crtc);
4099
4100 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4101 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4102 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4103
4104 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
Ville Syrjäläcfa76982014-03-07 18:32:08 +02004105 if (INTEL_INFO(dev)->gen >= 7) {
4106 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4107 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4108 }
Ville Syrjälä243e6a42013-10-14 14:55:24 +03004109
Ville Syrjäläa42a5712014-01-07 16:14:08 +02004110 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjäläac9545f2013-12-05 15:51:28 +02004111 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4112 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4113 else if (IS_IVYBRIDGE(dev))
4114 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4115 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
Ville Syrjälä243e6a42013-10-14 14:55:24 +03004116
4117 hw->enable_fbc_wm =
4118 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4119}
4120
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03004121/**
4122 * intel_update_watermarks - update FIFO watermark values based on current modes
4123 *
4124 * Calculate watermark values for the various WM regs based on current mode
4125 * and plane configuration.
4126 *
4127 * There are several cases to deal with here:
4128 * - normal (i.e. non-self-refresh)
4129 * - self-refresh (SR) mode
4130 * - lines are large relative to FIFO size (buffer can hold up to 2)
4131 * - lines are small relative to FIFO size (buffer can hold more than 2
4132 * lines), so need to account for TLB latency
4133 *
4134 * The normal calculation is:
4135 * watermark = dotclock * bytes per pixel * latency
4136 * where latency is platform & configuration dependent (we assume pessimal
4137 * values here).
4138 *
4139 * The SR calculation is:
4140 * watermark = (trunc(latency/line time)+1) * surface width *
4141 * bytes per pixel
4142 * where
4143 * line time = htotal / dotclock
4144 * surface width = hdisplay for normal plane and 64 for cursor
4145 * and latency is assumed to be high, as above.
4146 *
4147 * The final value programmed to the register should always be rounded up,
4148 * and include an extra 2 entries to account for clock crossings.
4149 *
4150 * We don't use the sprite, so we can ignore that. And on Crestline we have
4151 * to set the non-SR watermarks to 8.
4152 */
Ville Syrjälä46ba6142013-09-10 11:40:40 +03004153void intel_update_watermarks(struct drm_crtc *crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03004154{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03004155 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03004156
4157 if (dev_priv->display.update_wm)
Ville Syrjälä46ba6142013-09-10 11:40:40 +03004158 dev_priv->display.update_wm(crtc);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03004159}
4160
Ville Syrjäläadf3d352013-08-06 22:24:11 +03004161void intel_update_sprite_watermarks(struct drm_plane *plane,
4162 struct drm_crtc *crtc,
Damien Lespiaued57cb82014-07-15 09:21:24 +02004163 uint32_t sprite_width,
4164 uint32_t sprite_height,
4165 int pixel_size,
Ville Syrjälä39db4a42013-08-06 22:24:00 +03004166 bool enabled, bool scaled)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03004167{
Ville Syrjäläadf3d352013-08-06 22:24:11 +03004168 struct drm_i915_private *dev_priv = plane->dev->dev_private;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03004169
4170 if (dev_priv->display.update_sprite_wm)
Damien Lespiaued57cb82014-07-15 09:21:24 +02004171 dev_priv->display.update_sprite_wm(plane, crtc,
4172 sprite_width, sprite_height,
Ville Syrjälä39db4a42013-08-06 22:24:00 +03004173 pixel_size, enabled, scaled);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03004174}
4175
Daniel Vetter92703882012-08-09 16:46:01 +02004176/**
4177 * Lock protecting IPS related data structures
Daniel Vetter92703882012-08-09 16:46:01 +02004178 */
4179DEFINE_SPINLOCK(mchdev_lock);
4180
4181/* Global for IPS driver to get at the current i915 device. Protected by
4182 * mchdev_lock. */
4183static struct drm_i915_private *i915_mch_dev;
4184
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004185bool ironlake_set_drps(struct drm_device *dev, u8 val)
4186{
4187 struct drm_i915_private *dev_priv = dev->dev_private;
4188 u16 rgvswctl;
4189
Daniel Vetter92703882012-08-09 16:46:01 +02004190 assert_spin_locked(&mchdev_lock);
4191
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004192 rgvswctl = I915_READ16(MEMSWCTL);
4193 if (rgvswctl & MEMCTL_CMD_STS) {
4194 DRM_DEBUG("gpu busy, RCS change rejected\n");
4195 return false; /* still busy with another command */
4196 }
4197
4198 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4199 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4200 I915_WRITE16(MEMSWCTL, rgvswctl);
4201 POSTING_READ16(MEMSWCTL);
4202
4203 rgvswctl |= MEMCTL_CMD_STS;
4204 I915_WRITE16(MEMSWCTL, rgvswctl);
4205
4206 return true;
4207}
4208
Daniel Vetter8090c6b2012-06-24 16:42:32 +02004209static void ironlake_enable_drps(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004210{
4211 struct drm_i915_private *dev_priv = dev->dev_private;
4212 u32 rgvmodectl = I915_READ(MEMMODECTL);
4213 u8 fmax, fmin, fstart, vstart;
4214
Daniel Vetter92703882012-08-09 16:46:01 +02004215 spin_lock_irq(&mchdev_lock);
4216
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004217 /* Enable temp reporting */
4218 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4219 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4220
4221 /* 100ms RC evaluation intervals */
4222 I915_WRITE(RCUPEI, 100000);
4223 I915_WRITE(RCDNEI, 100000);
4224
4225 /* Set max/min thresholds to 90ms and 80ms respectively */
4226 I915_WRITE(RCBMAXAVG, 90000);
4227 I915_WRITE(RCBMINAVG, 80000);
4228
4229 I915_WRITE(MEMIHYST, 1);
4230
4231 /* Set up min, max, and cur for interrupt handling */
4232 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4233 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4234 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4235 MEMMODE_FSTART_SHIFT;
4236
4237 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
4238 PXVFREQ_PX_SHIFT;
4239
Daniel Vetter20e4d402012-08-08 23:35:39 +02004240 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4241 dev_priv->ips.fstart = fstart;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004242
Daniel Vetter20e4d402012-08-08 23:35:39 +02004243 dev_priv->ips.max_delay = fstart;
4244 dev_priv->ips.min_delay = fmin;
4245 dev_priv->ips.cur_delay = fstart;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004246
4247 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4248 fmax, fmin, fstart);
4249
4250 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4251
4252 /*
4253 * Interrupts will be enabled in ironlake_irq_postinstall
4254 */
4255
4256 I915_WRITE(VIDSTART, vstart);
4257 POSTING_READ(VIDSTART);
4258
4259 rgvmodectl |= MEMMODE_SWMODE_EN;
4260 I915_WRITE(MEMMODECTL, rgvmodectl);
4261
Daniel Vetter92703882012-08-09 16:46:01 +02004262 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004263 DRM_ERROR("stuck trying to change perf mode\n");
Daniel Vetter6adfb1e2015-07-07 09:10:40 +02004264 msleep(1);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004265
4266 ironlake_set_drps(dev, fstart);
4267
Daniel Vetter20e4d402012-08-08 23:35:39 +02004268 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004269 I915_READ(0x112e0);
Daniel Vetter20e4d402012-08-08 23:35:39 +02004270 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4271 dev_priv->ips.last_count2 = I915_READ(0x112f4);
Thomas Gleixner5ed0bdf2014-07-16 21:05:06 +00004272 dev_priv->ips.last_time2 = ktime_get_raw_ns();
Daniel Vetter92703882012-08-09 16:46:01 +02004273
4274 spin_unlock_irq(&mchdev_lock);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004275}
4276
Daniel Vetter8090c6b2012-06-24 16:42:32 +02004277static void ironlake_disable_drps(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004278{
4279 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter92703882012-08-09 16:46:01 +02004280 u16 rgvswctl;
4281
4282 spin_lock_irq(&mchdev_lock);
4283
4284 rgvswctl = I915_READ16(MEMSWCTL);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004285
4286 /* Ack interrupts, disable EFC interrupt */
4287 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4288 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4289 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4290 I915_WRITE(DEIIR, DE_PCU_EVENT);
4291 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4292
4293 /* Go back to the starting frequency */
Daniel Vetter20e4d402012-08-08 23:35:39 +02004294 ironlake_set_drps(dev, dev_priv->ips.fstart);
Daniel Vetter6adfb1e2015-07-07 09:10:40 +02004295 msleep(1);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004296 rgvswctl |= MEMCTL_CMD_STS;
4297 I915_WRITE(MEMSWCTL, rgvswctl);
Daniel Vetter6adfb1e2015-07-07 09:10:40 +02004298 msleep(1);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004299
Daniel Vetter92703882012-08-09 16:46:01 +02004300 spin_unlock_irq(&mchdev_lock);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004301}
4302
Daniel Vetteracbe9472012-07-26 11:50:05 +02004303/* There's a funny hw issue where the hw returns all 0 when reading from
4304 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4305 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4306 * all limits and the gpu stuck at whatever frequency it is at atm).
4307 */
Akash Goel74ef1172015-03-06 11:07:19 +05304308static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004309{
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01004310 u32 limits;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004311
Daniel Vetter20b46e52012-07-26 11:16:14 +02004312 /* Only set the down limit when we've reached the lowest level to avoid
4313 * getting more interrupts, otherwise leave this clear. This prevents a
4314 * race in the hw when coming out of rc6: There's a tiny window where
4315 * the hw runs at the minimal clock before selecting the desired
4316 * frequency, if the down threshold expires in that window we will not
4317 * receive a down interrupt. */
Akash Goel74ef1172015-03-06 11:07:19 +05304318 if (IS_GEN9(dev_priv->dev)) {
4319 limits = (dev_priv->rps.max_freq_softlimit) << 23;
4320 if (val <= dev_priv->rps.min_freq_softlimit)
4321 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4322 } else {
4323 limits = dev_priv->rps.max_freq_softlimit << 24;
4324 if (val <= dev_priv->rps.min_freq_softlimit)
4325 limits |= dev_priv->rps.min_freq_softlimit << 16;
4326 }
Daniel Vetter20b46e52012-07-26 11:16:14 +02004327
4328 return limits;
4329}
4330
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004331static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4332{
4333 int new_power;
Akash Goel8a586432015-03-06 11:07:18 +05304334 u32 threshold_up = 0, threshold_down = 0; /* in % */
4335 u32 ei_up = 0, ei_down = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004336
4337 new_power = dev_priv->rps.power;
4338 switch (dev_priv->rps.power) {
4339 case LOW_POWER:
Ben Widawskyb39fb292014-03-19 18:31:11 -07004340 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004341 new_power = BETWEEN;
4342 break;
4343
4344 case BETWEEN:
Ben Widawskyb39fb292014-03-19 18:31:11 -07004345 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004346 new_power = LOW_POWER;
Ben Widawskyb39fb292014-03-19 18:31:11 -07004347 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004348 new_power = HIGH_POWER;
4349 break;
4350
4351 case HIGH_POWER:
Ben Widawskyb39fb292014-03-19 18:31:11 -07004352 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004353 new_power = BETWEEN;
4354 break;
4355 }
4356 /* Max/min bins are special */
Chris Wilsonaed242f2015-03-18 09:48:21 +00004357 if (val <= dev_priv->rps.min_freq_softlimit)
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004358 new_power = LOW_POWER;
Chris Wilsonaed242f2015-03-18 09:48:21 +00004359 if (val >= dev_priv->rps.max_freq_softlimit)
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004360 new_power = HIGH_POWER;
4361 if (new_power == dev_priv->rps.power)
4362 return;
4363
4364 /* Note the units here are not exactly 1us, but 1280ns. */
4365 switch (new_power) {
4366 case LOW_POWER:
4367 /* Upclock if more than 95% busy over 16ms */
Akash Goel8a586432015-03-06 11:07:18 +05304368 ei_up = 16000;
4369 threshold_up = 95;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004370
4371 /* Downclock if less than 85% busy over 32ms */
Akash Goel8a586432015-03-06 11:07:18 +05304372 ei_down = 32000;
4373 threshold_down = 85;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004374 break;
4375
4376 case BETWEEN:
4377 /* Upclock if more than 90% busy over 13ms */
Akash Goel8a586432015-03-06 11:07:18 +05304378 ei_up = 13000;
4379 threshold_up = 90;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004380
4381 /* Downclock if less than 75% busy over 32ms */
Akash Goel8a586432015-03-06 11:07:18 +05304382 ei_down = 32000;
4383 threshold_down = 75;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004384 break;
4385
4386 case HIGH_POWER:
4387 /* Upclock if more than 85% busy over 10ms */
Akash Goel8a586432015-03-06 11:07:18 +05304388 ei_up = 10000;
4389 threshold_up = 85;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004390
4391 /* Downclock if less than 60% busy over 32ms */
Akash Goel8a586432015-03-06 11:07:18 +05304392 ei_down = 32000;
4393 threshold_down = 60;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004394 break;
4395 }
4396
Akash Goel8a586432015-03-06 11:07:18 +05304397 I915_WRITE(GEN6_RP_UP_EI,
4398 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4399 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4400 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
4401
4402 I915_WRITE(GEN6_RP_DOWN_EI,
4403 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4404 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4405 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
4406
4407 I915_WRITE(GEN6_RP_CONTROL,
4408 GEN6_RP_MEDIA_TURBO |
4409 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4410 GEN6_RP_MEDIA_IS_GFX |
4411 GEN6_RP_ENABLE |
4412 GEN6_RP_UP_BUSY_AVG |
4413 GEN6_RP_DOWN_IDLE_AVG);
4414
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004415 dev_priv->rps.power = new_power;
Chris Wilson8fb55192015-04-07 16:20:28 +01004416 dev_priv->rps.up_threshold = threshold_up;
4417 dev_priv->rps.down_threshold = threshold_down;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004418 dev_priv->rps.last_adj = 0;
4419}
4420
Chris Wilson2876ce72014-03-28 08:03:34 +00004421static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4422{
4423 u32 mask = 0;
4424
4425 if (val > dev_priv->rps.min_freq_softlimit)
Chris Wilson6f4b12f82015-03-18 09:48:23 +00004426 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
Chris Wilson2876ce72014-03-28 08:03:34 +00004427 if (val < dev_priv->rps.max_freq_softlimit)
Chris Wilson6f4b12f82015-03-18 09:48:23 +00004428 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
Chris Wilson2876ce72014-03-28 08:03:34 +00004429
Chris Wilson7b3c29f2014-07-10 20:31:19 +01004430 mask &= dev_priv->pm_rps_events;
4431
Imre Deak59d02a12014-12-19 19:33:26 +02004432 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
Chris Wilson2876ce72014-03-28 08:03:34 +00004433}
4434
Jeff McGeeb8a5ff82014-02-04 11:37:01 -06004435/* gen6_set_rps is called to update the frequency request, but should also be
4436 * called when the range (min_delay and max_delay) is modified so that we can
4437 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004438static void gen6_set_rps(struct drm_device *dev, u8 val)
Daniel Vetter20b46e52012-07-26 11:16:14 +02004439{
4440 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01004441
Jesse Barnes4fc688c2012-11-02 11:14:01 -07004442 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Chris Wilsonaed242f2015-03-18 09:48:21 +00004443 WARN_ON(val > dev_priv->rps.max_freq);
4444 WARN_ON(val < dev_priv->rps.min_freq);
Daniel Vetter004777c2012-08-09 15:07:01 +02004445
Chris Wilsoneb64cad2014-03-27 08:24:20 +00004446 /* min/max delay may still have been modified so be sure to
4447 * write the limits value.
4448 */
4449 if (val != dev_priv->rps.cur_freq) {
4450 gen6_set_rps_thresholds(dev_priv, val);
Jeff McGeeb8a5ff82014-02-04 11:37:01 -06004451
Akash Goel57041952015-03-06 11:07:17 +05304452 if (IS_GEN9(dev))
4453 I915_WRITE(GEN6_RPNSWREQ,
4454 GEN9_FREQUENCY(val));
4455 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Chris Wilsoneb64cad2014-03-27 08:24:20 +00004456 I915_WRITE(GEN6_RPNSWREQ,
4457 HSW_FREQUENCY(val));
4458 else
4459 I915_WRITE(GEN6_RPNSWREQ,
4460 GEN6_FREQUENCY(val) |
4461 GEN6_OFFSET(0) |
4462 GEN6_AGGRESSIVE_TURBO);
Jeff McGeeb8a5ff82014-02-04 11:37:01 -06004463 }
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01004464
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01004465 /* Make sure we continue to get interrupts
4466 * until we hit the minimum or maximum frequencies.
4467 */
Akash Goel74ef1172015-03-06 11:07:19 +05304468 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
Chris Wilson2876ce72014-03-28 08:03:34 +00004469 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01004470
Ben Widawskyd5570a72012-09-07 19:43:41 -07004471 POSTING_READ(GEN6_RPNSWREQ);
4472
Ben Widawskyb39fb292014-03-19 18:31:11 -07004473 dev_priv->rps.cur_freq = val;
Daniel Vetterbe2cde92012-08-30 13:26:48 +02004474 trace_intel_gpu_freq_change(val * 50);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004475}
4476
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004477static void valleyview_set_rps(struct drm_device *dev, u8 val)
4478{
4479 struct drm_i915_private *dev_priv = dev->dev_private;
4480
4481 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Chris Wilsonaed242f2015-03-18 09:48:21 +00004482 WARN_ON(val > dev_priv->rps.max_freq);
4483 WARN_ON(val < dev_priv->rps.min_freq);
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004484
4485 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
4486 "Odd GPU freq value\n"))
4487 val &= ~1;
4488
Deepak Scd25dd52015-07-10 18:31:40 +05304489 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4490
Chris Wilson8fb55192015-04-07 16:20:28 +01004491 if (val != dev_priv->rps.cur_freq) {
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004492 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
Chris Wilson8fb55192015-04-07 16:20:28 +01004493 if (!IS_CHERRYVIEW(dev_priv))
4494 gen6_set_rps_thresholds(dev_priv, val);
4495 }
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004496
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004497 dev_priv->rps.cur_freq = val;
4498 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4499}
4500
Deepak Sa7f6e232015-05-09 18:04:44 +05304501/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
Deepak S76c3552f2014-01-30 23:08:16 +05304502 *
4503 * * If Gfx is Idle, then
Deepak Sa7f6e232015-05-09 18:04:44 +05304504 * 1. Forcewake Media well.
4505 * 2. Request idle freq.
4506 * 3. Release Forcewake of Media well.
Deepak S76c3552f2014-01-30 23:08:16 +05304507*/
4508static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4509{
Chris Wilsonaed242f2015-03-18 09:48:21 +00004510 u32 val = dev_priv->rps.idle_freq;
Deepak S5549d252014-06-28 11:26:11 +05304511
Chris Wilsonaed242f2015-03-18 09:48:21 +00004512 if (dev_priv->rps.cur_freq <= val)
Deepak S76c3552f2014-01-30 23:08:16 +05304513 return;
4514
Deepak Sa7f6e232015-05-09 18:04:44 +05304515 /* Wake up the media well, as that takes a lot less
4516 * power than the Render well. */
4517 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4518 valleyview_set_rps(dev_priv->dev, val);
4519 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
Deepak S76c3552f2014-01-30 23:08:16 +05304520}
4521
Chris Wilson43cf3bf2015-03-18 09:48:22 +00004522void gen6_rps_busy(struct drm_i915_private *dev_priv)
4523{
4524 mutex_lock(&dev_priv->rps.hw_lock);
4525 if (dev_priv->rps.enabled) {
4526 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4527 gen6_rps_reset_ei(dev_priv);
4528 I915_WRITE(GEN6_PMINTRMSK,
4529 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4530 }
4531 mutex_unlock(&dev_priv->rps.hw_lock);
4532}
4533
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004534void gen6_rps_idle(struct drm_i915_private *dev_priv)
4535{
Damien Lespiau691bb712013-12-12 14:36:36 +00004536 struct drm_device *dev = dev_priv->dev;
4537
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004538 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilsonc0951f02013-10-10 21:58:50 +01004539 if (dev_priv->rps.enabled) {
Ville Syrjälä21a11ff2015-01-27 16:36:15 +02004540 if (IS_VALLEYVIEW(dev))
Deepak S76c3552f2014-01-30 23:08:16 +05304541 vlv_set_rps_idle(dev_priv);
Daniel Vetter7526ed72014-09-29 15:07:19 +02004542 else
Chris Wilsonaed242f2015-03-18 09:48:21 +00004543 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
Chris Wilsonc0951f02013-10-10 21:58:50 +01004544 dev_priv->rps.last_adj = 0;
Chris Wilson43cf3bf2015-03-18 09:48:22 +00004545 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
Chris Wilsonc0951f02013-10-10 21:58:50 +01004546 }
Chris Wilson8d3afd72015-05-21 21:01:47 +01004547 mutex_unlock(&dev_priv->rps.hw_lock);
Chris Wilson1854d5c2015-04-07 16:20:32 +01004548
Chris Wilson8d3afd72015-05-21 21:01:47 +01004549 spin_lock(&dev_priv->rps.client_lock);
Chris Wilson1854d5c2015-04-07 16:20:32 +01004550 while (!list_empty(&dev_priv->rps.clients))
4551 list_del_init(dev_priv->rps.clients.next);
Chris Wilson8d3afd72015-05-21 21:01:47 +01004552 spin_unlock(&dev_priv->rps.client_lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004553}
4554
Chris Wilson1854d5c2015-04-07 16:20:32 +01004555void gen6_rps_boost(struct drm_i915_private *dev_priv,
Chris Wilsone61b9952015-04-27 13:41:24 +01004556 struct intel_rps_client *rps,
4557 unsigned long submitted)
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004558{
Chris Wilson8d3afd72015-05-21 21:01:47 +01004559 /* This is intentionally racy! We peek at the state here, then
4560 * validate inside the RPS worker.
4561 */
4562 if (!(dev_priv->mm.busy &&
4563 dev_priv->rps.enabled &&
4564 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
4565 return;
Chris Wilson43cf3bf2015-03-18 09:48:22 +00004566
Chris Wilsone61b9952015-04-27 13:41:24 +01004567 /* Force a RPS boost (and don't count it against the client) if
4568 * the GPU is severely congested.
4569 */
Chris Wilsond0bc54f2015-05-21 21:01:48 +01004570 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
Chris Wilsone61b9952015-04-27 13:41:24 +01004571 rps = NULL;
4572
Chris Wilson8d3afd72015-05-21 21:01:47 +01004573 spin_lock(&dev_priv->rps.client_lock);
4574 if (rps == NULL || list_empty(&rps->link)) {
4575 spin_lock_irq(&dev_priv->irq_lock);
4576 if (dev_priv->rps.interrupts_enabled) {
4577 dev_priv->rps.client_boost = true;
4578 queue_work(dev_priv->wq, &dev_priv->rps.work);
4579 }
4580 spin_unlock_irq(&dev_priv->irq_lock);
Chris Wilson1854d5c2015-04-07 16:20:32 +01004581
Chris Wilson2e1b8732015-04-27 13:41:22 +01004582 if (rps != NULL) {
4583 list_add(&rps->link, &dev_priv->rps.clients);
4584 rps->boosts++;
Chris Wilson1854d5c2015-04-07 16:20:32 +01004585 } else
4586 dev_priv->rps.boosts++;
Chris Wilsonc0951f02013-10-10 21:58:50 +01004587 }
Chris Wilson8d3afd72015-05-21 21:01:47 +01004588 spin_unlock(&dev_priv->rps.client_lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004589}
4590
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004591void intel_set_rps(struct drm_device *dev, u8 val)
Jesse Barnes0a073b82013-04-17 15:54:58 -07004592{
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004593 if (IS_VALLEYVIEW(dev))
4594 valleyview_set_rps(dev, val);
4595 else
4596 gen6_set_rps(dev, val);
Jesse Barnes0a073b82013-04-17 15:54:58 -07004597}
4598
Zhe Wang20e49362014-11-04 17:07:05 +00004599static void gen9_disable_rps(struct drm_device *dev)
4600{
4601 struct drm_i915_private *dev_priv = dev->dev_private;
4602
4603 I915_WRITE(GEN6_RC_CONTROL, 0);
Zhe Wang38c23522015-01-20 12:23:04 +00004604 I915_WRITE(GEN9_PG_ENABLE, 0);
Zhe Wang20e49362014-11-04 17:07:05 +00004605}
4606
Daniel Vetter44fc7d52013-07-12 22:43:27 +02004607static void gen6_disable_rps(struct drm_device *dev)
4608{
4609 struct drm_i915_private *dev_priv = dev->dev_private;
4610
4611 I915_WRITE(GEN6_RC_CONTROL, 0);
4612 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
Daniel Vetter44fc7d52013-07-12 22:43:27 +02004613}
4614
Deepak S38807742014-05-23 21:00:15 +05304615static void cherryview_disable_rps(struct drm_device *dev)
4616{
4617 struct drm_i915_private *dev_priv = dev->dev_private;
4618
4619 I915_WRITE(GEN6_RC_CONTROL, 0);
4620}
4621
Jesse Barnesd20d4f02013-04-23 10:09:28 -07004622static void valleyview_disable_rps(struct drm_device *dev)
4623{
4624 struct drm_i915_private *dev_priv = dev->dev_private;
4625
Deepak S98a2e5f2014-08-18 10:35:27 -07004626 /* we're doing forcewake before Disabling RC6,
4627 * This what the BIOS expects when going into suspend */
Mika Kuoppala59bad942015-01-16 11:34:40 +02004628 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Deepak S98a2e5f2014-08-18 10:35:27 -07004629
Jesse Barnesd20d4f02013-04-23 10:09:28 -07004630 I915_WRITE(GEN6_RC_CONTROL, 0);
Jesse Barnesd20d4f02013-04-23 10:09:28 -07004631
Mika Kuoppala59bad942015-01-16 11:34:40 +02004632 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Jesse Barnesd20d4f02013-04-23 10:09:28 -07004633}
4634
Ben Widawskydc39fff2013-10-18 12:32:07 -07004635static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4636{
Imre Deak91ca6892014-04-14 20:24:25 +03004637 if (IS_VALLEYVIEW(dev)) {
4638 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4639 mode = GEN6_RC_CTL_RC6_ENABLE;
4640 else
4641 mode = 0;
4642 }
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -07004643 if (HAS_RC6p(dev))
4644 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4645 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
4646 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
4647 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
4648
4649 else
4650 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
4651 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
Ben Widawskydc39fff2013-10-18 12:32:07 -07004652}
4653
Imre Deake6069ca2014-04-18 16:01:02 +03004654static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004655{
Daniel Vettere7d66d82015-06-15 23:23:54 +02004656 /* No RC6 before Ironlake and code is gone for ilk. */
4657 if (INTEL_INFO(dev)->gen < 6)
Imre Deake6069ca2014-04-18 16:01:02 +03004658 return 0;
4659
Daniel Vetter456470e2012-08-08 23:35:40 +02004660 /* Respect the kernel parameter if it is set */
Imre Deake6069ca2014-04-18 16:01:02 +03004661 if (enable_rc6 >= 0) {
4662 int mask;
4663
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -07004664 if (HAS_RC6p(dev))
Imre Deake6069ca2014-04-18 16:01:02 +03004665 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4666 INTEL_RC6pp_ENABLE;
4667 else
4668 mask = INTEL_RC6_ENABLE;
4669
4670 if ((enable_rc6 & mask) != enable_rc6)
Daniel Vetter8dfd1f02014-08-04 11:15:56 +02004671 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
4672 enable_rc6 & mask, enable_rc6, mask);
Imre Deake6069ca2014-04-18 16:01:02 +03004673
4674 return enable_rc6 & mask;
4675 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004676
Ben Widawsky8bade1a2014-01-28 20:25:39 -08004677 if (IS_IVYBRIDGE(dev))
Ben Widawskycca84a12014-01-28 20:25:38 -08004678 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
Ben Widawsky8bade1a2014-01-28 20:25:39 -08004679
4680 return INTEL_RC6_ENABLE;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004681}
4682
Imre Deake6069ca2014-04-18 16:01:02 +03004683int intel_enable_rc6(const struct drm_device *dev)
4684{
4685 return i915.enable_rc6;
4686}
4687
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004688static void gen6_init_rps_frequencies(struct drm_device *dev)
Ben Widawsky3280e8b2014-03-31 17:16:42 -07004689{
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004690 struct drm_i915_private *dev_priv = dev->dev_private;
4691 uint32_t rp_state_cap;
4692 u32 ddcc_status = 0;
4693 int ret;
4694
Ben Widawsky3280e8b2014-03-31 17:16:42 -07004695 /* All of these values are in units of 50MHz */
4696 dev_priv->rps.cur_freq = 0;
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004697 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
Bob Paauwe35040562015-06-25 14:54:07 -07004698 if (IS_BROXTON(dev)) {
4699 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
4700 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
4701 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4702 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
4703 } else {
4704 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4705 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
4706 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4707 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
4708 }
4709
Ben Widawsky3280e8b2014-03-31 17:16:42 -07004710 /* hw_max = RP0 until we check for overclocking */
4711 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4712
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004713 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
Akash Goelc5e06882015-06-29 14:50:19 +05304714 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004715 ret = sandybridge_pcode_read(dev_priv,
4716 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4717 &ddcc_status);
4718 if (0 == ret)
4719 dev_priv->rps.efficient_freq =
Tom O'Rourke46efa4a2015-02-10 23:06:46 -08004720 clamp_t(u8,
4721 ((ddcc_status >> 8) & 0xff),
4722 dev_priv->rps.min_freq,
4723 dev_priv->rps.max_freq);
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004724 }
4725
Akash Goelc5e06882015-06-29 14:50:19 +05304726 if (IS_SKYLAKE(dev)) {
4727 /* Store the frequency values in 16.66 MHZ units, which is
4728 the natural hardware unit for SKL */
4729 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
4730 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
4731 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
4732 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
4733 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
4734 }
4735
Chris Wilsonaed242f2015-03-18 09:48:21 +00004736 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4737
Ben Widawsky3280e8b2014-03-31 17:16:42 -07004738 /* Preserve min/max settings in case of re-init */
4739 if (dev_priv->rps.max_freq_softlimit == 0)
4740 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4741
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004742 if (dev_priv->rps.min_freq_softlimit == 0) {
4743 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4744 dev_priv->rps.min_freq_softlimit =
Ville Syrjälä813b5e62015-03-25 19:27:16 +02004745 max_t(int, dev_priv->rps.efficient_freq,
4746 intel_freq_opcode(dev_priv, 450));
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004747 else
4748 dev_priv->rps.min_freq_softlimit =
4749 dev_priv->rps.min_freq;
4750 }
Ben Widawsky3280e8b2014-03-31 17:16:42 -07004751}
4752
Jesse Barnesb6fef0e2015-01-16 18:07:25 +00004753/* See the Gen9_GT_PM_Programming_Guide doc for the below */
Zhe Wang20e49362014-11-04 17:07:05 +00004754static void gen9_enable_rps(struct drm_device *dev)
4755{
4756 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesb6fef0e2015-01-16 18:07:25 +00004757
4758 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4759
Damien Lespiauba1c5542015-01-16 18:07:26 +00004760 gen6_init_rps_frequencies(dev);
4761
Akash Goel0beb0592015-03-06 11:07:20 +05304762 /* Program defaults and thresholds for RPS*/
4763 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4764 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
Jesse Barnesb6fef0e2015-01-16 18:07:25 +00004765
Akash Goel0beb0592015-03-06 11:07:20 +05304766 /* 1 second timeout*/
4767 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
4768 GT_INTERVAL_FROM_US(dev_priv, 1000000));
4769
Jesse Barnesb6fef0e2015-01-16 18:07:25 +00004770 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
Jesse Barnesb6fef0e2015-01-16 18:07:25 +00004771
Akash Goel0beb0592015-03-06 11:07:20 +05304772 /* Leaning on the below call to gen6_set_rps to program/setup the
4773 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
4774 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
4775 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4776 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
Jesse Barnesb6fef0e2015-01-16 18:07:25 +00004777
4778 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4779}
4780
4781static void gen9_enable_rc6(struct drm_device *dev)
4782{
4783 struct drm_i915_private *dev_priv = dev->dev_private;
Zhe Wang20e49362014-11-04 17:07:05 +00004784 struct intel_engine_cs *ring;
4785 uint32_t rc6_mask = 0;
4786 int unused;
4787
4788 /* 1a: Software RC state - RC0 */
4789 I915_WRITE(GEN6_RC_STATE, 0);
4790
4791 /* 1b: Get forcewake during program sequence. Although the driver
4792 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
Mika Kuoppala59bad942015-01-16 11:34:40 +02004793 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Zhe Wang20e49362014-11-04 17:07:05 +00004794
4795 /* 2a: Disable RC states. */
4796 I915_WRITE(GEN6_RC_CONTROL, 0);
4797
4798 /* 2b: Program RC6 thresholds.*/
4799 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
4800 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4801 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4802 for_each_ring(ring, dev_priv, unused)
4803 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4804 I915_WRITE(GEN6_RC_SLEEP, 0);
4805 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4806
Zhe Wang38c23522015-01-20 12:23:04 +00004807 /* 2c: Program Coarse Power Gating Policies. */
4808 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
4809 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
4810
Zhe Wang20e49362014-11-04 17:07:05 +00004811 /* 3a: Enable RC6 */
4812 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4813 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4814 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4815 "on" : "off");
4816 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4817 GEN6_RC_CTL_EI_MODE(1) |
4818 rc6_mask);
4819
Sagar Kamblecb07bae2015-04-12 11:28:14 +05304820 /*
4821 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4822 * WaDisableRenderPowerGating:skl,bxt - Render PG need to be disabled with RC6.
4823 */
Sagar Kamblea4104c52015-04-10 14:11:29 +05304824 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
Sagar Kamblecb07bae2015-04-12 11:28:14 +05304825 GEN9_MEDIA_PG_ENABLE : 0);
Sagar Kamblea4104c52015-04-10 14:11:29 +05304826
Zhe Wang38c23522015-01-20 12:23:04 +00004827
Mika Kuoppala59bad942015-01-16 11:34:40 +02004828 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Zhe Wang20e49362014-11-04 17:07:05 +00004829
4830}
4831
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004832static void gen8_enable_rps(struct drm_device *dev)
4833{
4834 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01004835 struct intel_engine_cs *ring;
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004836 uint32_t rc6_mask = 0;
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004837 int unused;
4838
4839 /* 1a: Software RC state - RC0 */
4840 I915_WRITE(GEN6_RC_STATE, 0);
4841
4842 /* 1c & 1d: Get forcewake during program sequence. Although the driver
4843 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
Mika Kuoppala59bad942015-01-16 11:34:40 +02004844 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004845
4846 /* 2a: Disable RC states. */
4847 I915_WRITE(GEN6_RC_CONTROL, 0);
4848
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004849 /* Initialize rps frequencies */
4850 gen6_init_rps_frequencies(dev);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004851
4852 /* 2b: Program RC6 thresholds.*/
4853 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4854 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4855 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4856 for_each_ring(ring, dev_priv, unused)
4857 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4858 I915_WRITE(GEN6_RC_SLEEP, 0);
Tom O'Rourke0d68b252014-04-09 11:44:06 -07004859 if (IS_BROADWELL(dev))
4860 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4861 else
4862 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004863
4864 /* 3: Enable RC6 */
4865 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4866 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
Ben Widawskyabbf9d22014-01-28 20:25:41 -08004867 intel_print_rc6_info(dev, rc6_mask);
Tom O'Rourke0d68b252014-04-09 11:44:06 -07004868 if (IS_BROADWELL(dev))
4869 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4870 GEN7_RC_CTL_TO_MODE |
4871 rc6_mask);
4872 else
4873 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4874 GEN6_RC_CTL_EI_MODE(1) |
4875 rc6_mask);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004876
4877 /* 4 Program defaults and thresholds for RPS*/
Ben Widawskyf9bdc582014-03-31 17:16:41 -07004878 I915_WRITE(GEN6_RPNSWREQ,
4879 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4880 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4881 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
Daniel Vetter7526ed72014-09-29 15:07:19 +02004882 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
4883 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004884
Daniel Vetter7526ed72014-09-29 15:07:19 +02004885 /* Docs recommend 900MHz, and 300 MHz respectively */
4886 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
4887 dev_priv->rps.max_freq_softlimit << 24 |
4888 dev_priv->rps.min_freq_softlimit << 16);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004889
Daniel Vetter7526ed72014-09-29 15:07:19 +02004890 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
4891 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
4892 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
4893 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004894
Daniel Vetter7526ed72014-09-29 15:07:19 +02004895 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004896
4897 /* 5: Enable RPS */
Daniel Vetter7526ed72014-09-29 15:07:19 +02004898 I915_WRITE(GEN6_RP_CONTROL,
4899 GEN6_RP_MEDIA_TURBO |
4900 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4901 GEN6_RP_MEDIA_IS_GFX |
4902 GEN6_RP_ENABLE |
4903 GEN6_RP_UP_BUSY_AVG |
4904 GEN6_RP_DOWN_IDLE_AVG);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004905
Daniel Vetter7526ed72014-09-29 15:07:19 +02004906 /* 6: Ring frequency + overclocking (our driver does this later */
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004907
Tom O'Rourkec7f31532014-11-19 14:21:54 -08004908 dev_priv->rps.power = HIGH_POWER; /* force a reset */
Chris Wilsonaed242f2015-03-18 09:48:21 +00004909 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
Daniel Vetter7526ed72014-09-29 15:07:19 +02004910
Mika Kuoppala59bad942015-01-16 11:34:40 +02004911 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004912}
4913
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02004914static void gen6_enable_rps(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004915{
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02004916 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01004917 struct intel_engine_cs *ring;
Ben Widawskyd060c162014-03-19 18:31:08 -07004918 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004919 u32 gtfifodbg;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004920 int rc6_mode;
Ben Widawsky42c05262012-09-26 10:34:00 -07004921 int i, ret;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004922
Jesse Barnes4fc688c2012-11-02 11:14:01 -07004923 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02004924
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004925 /* Here begins a magic sequence of register writes to enable
4926 * auto-downclocking.
4927 *
4928 * Perhaps there might be some value in exposing these to
4929 * userspace...
4930 */
4931 I915_WRITE(GEN6_RC_STATE, 0);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004932
4933 /* Clear the DBG now so we don't confuse earlier errors */
4934 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4935 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
4936 I915_WRITE(GTFIFODBG, gtfifodbg);
4937 }
4938
Mika Kuoppala59bad942015-01-16 11:34:40 +02004939 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004940
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004941 /* Initialize rps frequencies */
4942 gen6_init_rps_frequencies(dev);
Jeff McGeedd0a1aa2014-02-04 11:32:31 -06004943
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004944 /* disable the counters and set deterministic thresholds */
4945 I915_WRITE(GEN6_RC_CONTROL, 0);
4946
4947 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
4948 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
4949 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
4950 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4951 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4952
Chris Wilsonb4519512012-05-11 14:29:30 +01004953 for_each_ring(ring, dev_priv, i)
4954 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004955
4956 I915_WRITE(GEN6_RC_SLEEP, 0);
4957 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
Daniel Vetter29c78f62013-11-16 16:04:26 +01004958 if (IS_IVYBRIDGE(dev))
Stéphane Marchesin351aa562013-08-13 11:55:17 -07004959 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
4960 else
4961 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
Stéphane Marchesin0920a482013-01-29 19:41:59 -08004962 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004963 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
4964
Eugeni Dodonov5a7dc922012-07-02 11:51:05 -03004965 /* Check if we are enabling RC6 */
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004966 rc6_mode = intel_enable_rc6(dev_priv->dev);
4967 if (rc6_mode & INTEL_RC6_ENABLE)
4968 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
4969
Eugeni Dodonov5a7dc922012-07-02 11:51:05 -03004970 /* We don't use those on Haswell */
4971 if (!IS_HASWELL(dev)) {
4972 if (rc6_mode & INTEL_RC6p_ENABLE)
4973 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004974
Eugeni Dodonov5a7dc922012-07-02 11:51:05 -03004975 if (rc6_mode & INTEL_RC6pp_ENABLE)
4976 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
4977 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004978
Ben Widawskydc39fff2013-10-18 12:32:07 -07004979 intel_print_rc6_info(dev, rc6_mask);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004980
4981 I915_WRITE(GEN6_RC_CONTROL,
4982 rc6_mask |
4983 GEN6_RC_CTL_EI_MODE(1) |
4984 GEN6_RC_CTL_HW_ENABLE);
4985
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004986 /* Power down if completely idle for over 50ms */
4987 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004988 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004989
Ben Widawsky42c05262012-09-26 10:34:00 -07004990 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
Ben Widawskyd060c162014-03-19 18:31:08 -07004991 if (ret)
Ben Widawsky42c05262012-09-26 10:34:00 -07004992 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
Ben Widawskyd060c162014-03-19 18:31:08 -07004993
4994 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
4995 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
4996 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
Ben Widawskyb39fb292014-03-19 18:31:11 -07004997 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
Ben Widawskyd060c162014-03-19 18:31:08 -07004998 (pcu_mbox & 0xff) * 50);
Ben Widawskyb39fb292014-03-19 18:31:11 -07004999 dev_priv->rps.max_freq = pcu_mbox & 0xff;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005000 }
5001
Chris Wilsondd75fdc2013-09-25 17:34:57 +01005002 dev_priv->rps.power = HIGH_POWER; /* force a reset */
Chris Wilsonaed242f2015-03-18 09:48:21 +00005003 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005004
Ben Widawsky31643d52012-09-26 10:34:01 -07005005 rc6vids = 0;
5006 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5007 if (IS_GEN6(dev) && ret) {
5008 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5009 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5010 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5011 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5012 rc6vids &= 0xffff00;
5013 rc6vids |= GEN6_ENCODE_RC6_VID(450);
5014 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
5015 if (ret)
5016 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5017 }
5018
Mika Kuoppala59bad942015-01-16 11:34:40 +02005019 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005020}
5021
Imre Deakc2bc2fc2014-04-18 16:16:23 +03005022static void __gen6_update_ring_freq(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005023{
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02005024 struct drm_i915_private *dev_priv = dev->dev_private;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005025 int min_freq = 15;
Chris Wilson3ebecd02013-04-12 19:10:13 +01005026 unsigned int gpu_freq;
5027 unsigned int max_ia_freq, min_ring_freq;
Akash Goel4c8c7742015-06-29 14:50:20 +05305028 unsigned int max_gpu_freq, min_gpu_freq;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005029 int scaling_factor = 180;
Ben Widawskyeda79642013-10-07 17:15:48 -03005030 struct cpufreq_policy *policy;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005031
Jesse Barnes4fc688c2012-11-02 11:14:01 -07005032 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02005033
Ben Widawskyeda79642013-10-07 17:15:48 -03005034 policy = cpufreq_cpu_get(0);
5035 if (policy) {
5036 max_ia_freq = policy->cpuinfo.max_freq;
5037 cpufreq_cpu_put(policy);
5038 } else {
5039 /*
5040 * Default to measured freq if none found, PCU will ensure we
5041 * don't go over
5042 */
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005043 max_ia_freq = tsc_khz;
Ben Widawskyeda79642013-10-07 17:15:48 -03005044 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005045
5046 /* Convert from kHz to MHz */
5047 max_ia_freq /= 1000;
5048
Ben Widawsky153b4b952013-10-22 22:05:09 -07005049 min_ring_freq = I915_READ(DCLK) & 0xf;
Ben Widawskyf6aca452013-10-02 09:25:02 -07005050 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5051 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
Chris Wilson3ebecd02013-04-12 19:10:13 +01005052
Akash Goel4c8c7742015-06-29 14:50:20 +05305053 if (IS_SKYLAKE(dev)) {
5054 /* Convert GT frequency to 50 HZ units */
5055 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5056 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5057 } else {
5058 min_gpu_freq = dev_priv->rps.min_freq;
5059 max_gpu_freq = dev_priv->rps.max_freq;
5060 }
5061
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005062 /*
5063 * For each potential GPU frequency, load a ring frequency we'd like
5064 * to use for memory access. We do this by specifying the IA frequency
5065 * the PCU should use as a reference to determine the ring frequency.
5066 */
Akash Goel4c8c7742015-06-29 14:50:20 +05305067 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5068 int diff = max_gpu_freq - gpu_freq;
Chris Wilson3ebecd02013-04-12 19:10:13 +01005069 unsigned int ia_freq = 0, ring_freq = 0;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005070
Akash Goel4c8c7742015-06-29 14:50:20 +05305071 if (IS_SKYLAKE(dev)) {
5072 /*
5073 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5074 * No floor required for ring frequency on SKL.
5075 */
5076 ring_freq = gpu_freq;
5077 } else if (INTEL_INFO(dev)->gen >= 8) {
Ben Widawsky46c764d2013-11-02 21:07:49 -07005078 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5079 ring_freq = max(min_ring_freq, gpu_freq);
5080 } else if (IS_HASWELL(dev)) {
Ben Widawskyf6aca452013-10-02 09:25:02 -07005081 ring_freq = mult_frac(gpu_freq, 5, 4);
Chris Wilson3ebecd02013-04-12 19:10:13 +01005082 ring_freq = max(min_ring_freq, ring_freq);
5083 /* leave ia_freq as the default, chosen by cpufreq */
5084 } else {
5085 /* On older processors, there is no separate ring
5086 * clock domain, so in order to boost the bandwidth
5087 * of the ring, we need to upclock the CPU (ia_freq).
5088 *
5089 * For GPU frequencies less than 750MHz,
5090 * just use the lowest ring freq.
5091 */
5092 if (gpu_freq < min_freq)
5093 ia_freq = 800;
5094 else
5095 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5096 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5097 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005098
Ben Widawsky42c05262012-09-26 10:34:00 -07005099 sandybridge_pcode_write(dev_priv,
5100 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
Chris Wilson3ebecd02013-04-12 19:10:13 +01005101 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5102 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5103 gpu_freq);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005104 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005105}
5106
Imre Deakc2bc2fc2014-04-18 16:16:23 +03005107void gen6_update_ring_freq(struct drm_device *dev)
5108{
5109 struct drm_i915_private *dev_priv = dev->dev_private;
5110
Akash Goel97d33082015-06-29 14:50:23 +05305111 if (!HAS_CORE_RING_FREQ(dev))
Imre Deakc2bc2fc2014-04-18 16:16:23 +03005112 return;
5113
5114 mutex_lock(&dev_priv->rps.hw_lock);
5115 __gen6_update_ring_freq(dev);
5116 mutex_unlock(&dev_priv->rps.hw_lock);
5117}
5118
Ville Syrjälä03af2042014-06-28 02:03:53 +03005119static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
Deepak S2b6b3a02014-05-27 15:59:30 +05305120{
Deepak S095acd52015-01-17 11:05:59 +05305121 struct drm_device *dev = dev_priv->dev;
Deepak S2b6b3a02014-05-27 15:59:30 +05305122 u32 val, rp0;
5123
Deepak S095acd52015-01-17 11:05:59 +05305124 if (dev->pdev->revision >= 0x20) {
5125 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
Deepak S2b6b3a02014-05-27 15:59:30 +05305126
Deepak S095acd52015-01-17 11:05:59 +05305127 switch (INTEL_INFO(dev)->eu_total) {
5128 case 8:
5129 /* (2 * 4) config */
5130 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5131 break;
5132 case 12:
5133 /* (2 * 6) config */
5134 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5135 break;
5136 case 16:
5137 /* (2 * 8) config */
5138 default:
5139 /* Setting (2 * 8) Min RP0 for any other combination */
5140 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5141 break;
5142 }
5143 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5144 } else {
5145 /* For pre-production hardware */
5146 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
5147 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
5148 PUNIT_GPU_STATUS_MAX_FREQ_MASK;
5149 }
Deepak S2b6b3a02014-05-27 15:59:30 +05305150 return rp0;
5151}
5152
5153static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5154{
5155 u32 val, rpe;
5156
5157 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5158 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5159
5160 return rpe;
5161}
5162
Deepak S7707df42014-07-12 18:46:14 +05305163static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5164{
Deepak S095acd52015-01-17 11:05:59 +05305165 struct drm_device *dev = dev_priv->dev;
Deepak S7707df42014-07-12 18:46:14 +05305166 u32 val, rp1;
5167
Deepak S095acd52015-01-17 11:05:59 +05305168 if (dev->pdev->revision >= 0x20) {
5169 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5170 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5171 } else {
5172 /* For pre-production hardware */
5173 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5174 rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
5175 PUNIT_GPU_STATUS_MAX_FREQ_MASK);
5176 }
Deepak S7707df42014-07-12 18:46:14 +05305177 return rp1;
5178}
5179
Deepak Sf8f2b002014-07-10 13:16:21 +05305180static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5181{
5182 u32 val, rp1;
5183
5184 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5185
5186 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5187
5188 return rp1;
5189}
5190
Ville Syrjälä03af2042014-06-28 02:03:53 +03005191static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
Jesse Barnes0a073b82013-04-17 15:54:58 -07005192{
5193 u32 val, rp0;
5194
Jani Nikula64936252013-05-22 15:36:20 +03005195 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005196
5197 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5198 /* Clamp to max */
5199 rp0 = min_t(u32, rp0, 0xea);
5200
5201 return rp0;
5202}
5203
5204static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5205{
5206 u32 val, rpe;
5207
Jani Nikula64936252013-05-22 15:36:20 +03005208 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005209 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
Jani Nikula64936252013-05-22 15:36:20 +03005210 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005211 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5212
5213 return rpe;
5214}
5215
Ville Syrjälä03af2042014-06-28 02:03:53 +03005216static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
Jesse Barnes0a073b82013-04-17 15:54:58 -07005217{
Jani Nikula64936252013-05-22 15:36:20 +03005218 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
Jesse Barnes0a073b82013-04-17 15:54:58 -07005219}
5220
Imre Deakae484342014-03-31 15:10:44 +03005221/* Check that the pctx buffer wasn't move under us. */
5222static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5223{
5224 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5225
5226 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5227 dev_priv->vlv_pctx->stolen->start);
5228}
5229
Deepak S38807742014-05-23 21:00:15 +05305230
5231/* Check that the pcbr address is not empty. */
5232static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5233{
5234 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5235
5236 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5237}
5238
5239static void cherryview_setup_pctx(struct drm_device *dev)
5240{
5241 struct drm_i915_private *dev_priv = dev->dev_private;
5242 unsigned long pctx_paddr, paddr;
5243 struct i915_gtt *gtt = &dev_priv->gtt;
5244 u32 pcbr;
5245 int pctx_size = 32*1024;
5246
5247 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5248
5249 pcbr = I915_READ(VLV_PCBR);
5250 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
Ville Syrjäläce611ef2014-11-07 21:33:46 +02005251 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
Deepak S38807742014-05-23 21:00:15 +05305252 paddr = (dev_priv->mm.stolen_base +
5253 (gtt->stolen_size - pctx_size));
5254
5255 pctx_paddr = (paddr & (~4095));
5256 I915_WRITE(VLV_PCBR, pctx_paddr);
5257 }
Ville Syrjäläce611ef2014-11-07 21:33:46 +02005258
5259 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
Deepak S38807742014-05-23 21:00:15 +05305260}
5261
Jesse Barnesc9cddff2013-05-08 10:45:13 -07005262static void valleyview_setup_pctx(struct drm_device *dev)
5263{
5264 struct drm_i915_private *dev_priv = dev->dev_private;
5265 struct drm_i915_gem_object *pctx;
5266 unsigned long pctx_paddr;
5267 u32 pcbr;
5268 int pctx_size = 24*1024;
5269
Imre Deak17b0c1f2014-02-11 21:39:06 +02005270 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5271
Jesse Barnesc9cddff2013-05-08 10:45:13 -07005272 pcbr = I915_READ(VLV_PCBR);
5273 if (pcbr) {
5274 /* BIOS set it up already, grab the pre-alloc'd space */
5275 int pcbr_offset;
5276
5277 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5278 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
5279 pcbr_offset,
Daniel Vetter190d6cd2013-07-04 13:06:28 +02005280 I915_GTT_OFFSET_NONE,
Jesse Barnesc9cddff2013-05-08 10:45:13 -07005281 pctx_size);
5282 goto out;
5283 }
5284
Ville Syrjäläce611ef2014-11-07 21:33:46 +02005285 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5286
Jesse Barnesc9cddff2013-05-08 10:45:13 -07005287 /*
5288 * From the Gunit register HAS:
5289 * The Gfx driver is expected to program this register and ensure
5290 * proper allocation within Gfx stolen memory. For example, this
5291 * register should be programmed such than the PCBR range does not
5292 * overlap with other ranges, such as the frame buffer, protected
5293 * memory, or any other relevant ranges.
5294 */
5295 pctx = i915_gem_object_create_stolen(dev, pctx_size);
5296 if (!pctx) {
5297 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5298 return;
5299 }
5300
5301 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5302 I915_WRITE(VLV_PCBR, pctx_paddr);
5303
5304out:
Ville Syrjäläce611ef2014-11-07 21:33:46 +02005305 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
Jesse Barnesc9cddff2013-05-08 10:45:13 -07005306 dev_priv->vlv_pctx = pctx;
5307}
5308
Imre Deakae484342014-03-31 15:10:44 +03005309static void valleyview_cleanup_pctx(struct drm_device *dev)
5310{
5311 struct drm_i915_private *dev_priv = dev->dev_private;
5312
5313 if (WARN_ON(!dev_priv->vlv_pctx))
5314 return;
5315
5316 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
5317 dev_priv->vlv_pctx = NULL;
5318}
5319
Imre Deak4e805192014-04-14 20:24:41 +03005320static void valleyview_init_gt_powersave(struct drm_device *dev)
5321{
5322 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä2bb25c12014-08-18 14:42:44 +03005323 u32 val;
Imre Deak4e805192014-04-14 20:24:41 +03005324
5325 valleyview_setup_pctx(dev);
5326
5327 mutex_lock(&dev_priv->rps.hw_lock);
5328
Ville Syrjälä2bb25c12014-08-18 14:42:44 +03005329 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5330 switch ((val >> 6) & 3) {
5331 case 0:
5332 case 1:
5333 dev_priv->mem_freq = 800;
5334 break;
5335 case 2:
5336 dev_priv->mem_freq = 1066;
5337 break;
5338 case 3:
5339 dev_priv->mem_freq = 1333;
5340 break;
5341 }
Ville Syrjälä80b83b62014-11-10 22:55:14 +02005342 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
Ville Syrjälä2bb25c12014-08-18 14:42:44 +03005343
Imre Deak4e805192014-04-14 20:24:41 +03005344 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5345 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5346 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005347 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
Imre Deak4e805192014-04-14 20:24:41 +03005348 dev_priv->rps.max_freq);
5349
5350 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5351 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005352 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
Imre Deak4e805192014-04-14 20:24:41 +03005353 dev_priv->rps.efficient_freq);
5354
Deepak Sf8f2b002014-07-10 13:16:21 +05305355 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5356 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005357 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
Deepak Sf8f2b002014-07-10 13:16:21 +05305358 dev_priv->rps.rp1_freq);
5359
Imre Deak4e805192014-04-14 20:24:41 +03005360 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5361 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005362 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
Imre Deak4e805192014-04-14 20:24:41 +03005363 dev_priv->rps.min_freq);
5364
Chris Wilsonaed242f2015-03-18 09:48:21 +00005365 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5366
Imre Deak4e805192014-04-14 20:24:41 +03005367 /* Preserve min/max settings in case of re-init */
5368 if (dev_priv->rps.max_freq_softlimit == 0)
5369 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5370
5371 if (dev_priv->rps.min_freq_softlimit == 0)
5372 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5373
5374 mutex_unlock(&dev_priv->rps.hw_lock);
5375}
5376
Deepak S38807742014-05-23 21:00:15 +05305377static void cherryview_init_gt_powersave(struct drm_device *dev)
5378{
Deepak S2b6b3a02014-05-27 15:59:30 +05305379 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä2bb25c12014-08-18 14:42:44 +03005380 u32 val;
Deepak S2b6b3a02014-05-27 15:59:30 +05305381
Deepak S38807742014-05-23 21:00:15 +05305382 cherryview_setup_pctx(dev);
Deepak S2b6b3a02014-05-27 15:59:30 +05305383
5384 mutex_lock(&dev_priv->rps.hw_lock);
5385
Ville Syrjäläa5805162015-05-26 20:42:30 +03005386 mutex_lock(&dev_priv->sb_lock);
Ville Syrjäläc6e8f392014-11-07 21:33:43 +02005387 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
Ville Syrjäläa5805162015-05-26 20:42:30 +03005388 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjäläc6e8f392014-11-07 21:33:43 +02005389
Ville Syrjälä2bb25c12014-08-18 14:42:44 +03005390 switch ((val >> 2) & 0x7) {
5391 case 0:
5392 case 1:
5393 dev_priv->rps.cz_freq = 200;
5394 dev_priv->mem_freq = 1600;
5395 break;
5396 case 2:
5397 dev_priv->rps.cz_freq = 267;
5398 dev_priv->mem_freq = 1600;
5399 break;
5400 case 3:
5401 dev_priv->rps.cz_freq = 333;
5402 dev_priv->mem_freq = 2000;
5403 break;
5404 case 4:
5405 dev_priv->rps.cz_freq = 320;
5406 dev_priv->mem_freq = 1600;
5407 break;
5408 case 5:
5409 dev_priv->rps.cz_freq = 400;
5410 dev_priv->mem_freq = 1600;
5411 break;
5412 }
Ville Syrjälä80b83b62014-11-10 22:55:14 +02005413 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
Ville Syrjälä2bb25c12014-08-18 14:42:44 +03005414
Deepak S2b6b3a02014-05-27 15:59:30 +05305415 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5416 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5417 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005418 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
Deepak S2b6b3a02014-05-27 15:59:30 +05305419 dev_priv->rps.max_freq);
5420
5421 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5422 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005423 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
Deepak S2b6b3a02014-05-27 15:59:30 +05305424 dev_priv->rps.efficient_freq);
5425
Deepak S7707df42014-07-12 18:46:14 +05305426 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5427 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005428 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
Deepak S7707df42014-07-12 18:46:14 +05305429 dev_priv->rps.rp1_freq);
5430
Deepak S5b7c91b2015-05-09 18:15:46 +05305431 /* PUnit validated range is only [RPe, RP0] */
5432 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
Deepak S2b6b3a02014-05-27 15:59:30 +05305433 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005434 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
Deepak S2b6b3a02014-05-27 15:59:30 +05305435 dev_priv->rps.min_freq);
5436
Ville Syrjälä1c147622014-08-18 14:42:43 +03005437 WARN_ONCE((dev_priv->rps.max_freq |
5438 dev_priv->rps.efficient_freq |
5439 dev_priv->rps.rp1_freq |
5440 dev_priv->rps.min_freq) & 1,
5441 "Odd GPU freq values\n");
5442
Chris Wilsonaed242f2015-03-18 09:48:21 +00005443 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5444
Deepak S2b6b3a02014-05-27 15:59:30 +05305445 /* Preserve min/max settings in case of re-init */
5446 if (dev_priv->rps.max_freq_softlimit == 0)
5447 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5448
5449 if (dev_priv->rps.min_freq_softlimit == 0)
5450 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5451
5452 mutex_unlock(&dev_priv->rps.hw_lock);
Deepak S38807742014-05-23 21:00:15 +05305453}
5454
Imre Deak4e805192014-04-14 20:24:41 +03005455static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
5456{
5457 valleyview_cleanup_pctx(dev);
5458}
5459
Deepak S38807742014-05-23 21:00:15 +05305460static void cherryview_enable_rps(struct drm_device *dev)
5461{
5462 struct drm_i915_private *dev_priv = dev->dev_private;
5463 struct intel_engine_cs *ring;
Deepak S2b6b3a02014-05-27 15:59:30 +05305464 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
Deepak S38807742014-05-23 21:00:15 +05305465 int i;
5466
5467 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5468
5469 gtfifodbg = I915_READ(GTFIFODBG);
5470 if (gtfifodbg) {
5471 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5472 gtfifodbg);
5473 I915_WRITE(GTFIFODBG, gtfifodbg);
5474 }
5475
5476 cherryview_check_pctx(dev_priv);
5477
5478 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5479 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
Mika Kuoppala59bad942015-01-16 11:34:40 +02005480 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Deepak S38807742014-05-23 21:00:15 +05305481
Ville Syrjälä160614a2015-01-19 13:50:47 +02005482 /* Disable RC states. */
5483 I915_WRITE(GEN6_RC_CONTROL, 0);
5484
Deepak S38807742014-05-23 21:00:15 +05305485 /* 2a: Program RC6 thresholds.*/
5486 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5487 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5488 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5489
5490 for_each_ring(ring, dev_priv, i)
5491 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5492 I915_WRITE(GEN6_RC_SLEEP, 0);
5493
Deepak Sf4f71c72015-03-28 15:23:35 +05305494 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5495 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
Deepak S38807742014-05-23 21:00:15 +05305496
5497 /* allows RC6 residency counter to work */
5498 I915_WRITE(VLV_COUNTER_CONTROL,
5499 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5500 VLV_MEDIA_RC6_COUNT_EN |
5501 VLV_RENDER_RC6_COUNT_EN));
5502
5503 /* For now we assume BIOS is allocating and populating the PCBR */
5504 pcbr = I915_READ(VLV_PCBR);
5505
Deepak S38807742014-05-23 21:00:15 +05305506 /* 3: Enable RC6 */
5507 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
5508 (pcbr >> VLV_PCBR_ADDR_SHIFT))
Ville Syrjäläaf5a75a2015-01-19 13:50:50 +02005509 rc6_mode = GEN7_RC_CTL_TO_MODE;
Deepak S38807742014-05-23 21:00:15 +05305510
5511 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5512
Deepak S2b6b3a02014-05-27 15:59:30 +05305513 /* 4 Program defaults and thresholds for RPS*/
Ville Syrjälä3cbdb482015-01-19 13:50:49 +02005514 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
Deepak S2b6b3a02014-05-27 15:59:30 +05305515 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5516 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5517 I915_WRITE(GEN6_RP_UP_EI, 66000);
5518 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5519
5520 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5521
5522 /* 5: Enable RPS */
5523 I915_WRITE(GEN6_RP_CONTROL,
5524 GEN6_RP_MEDIA_HW_NORMAL_MODE |
Ville Syrjäläeb973a52015-01-21 19:37:59 +02005525 GEN6_RP_MEDIA_IS_GFX |
Deepak S2b6b3a02014-05-27 15:59:30 +05305526 GEN6_RP_ENABLE |
5527 GEN6_RP_UP_BUSY_AVG |
5528 GEN6_RP_DOWN_IDLE_AVG);
5529
Deepak S3ef62342015-04-29 08:36:24 +05305530 /* Setting Fixed Bias */
5531 val = VLV_OVERRIDE_EN |
5532 VLV_SOC_TDP_EN |
5533 CHV_BIAS_CPU_50_SOC_50;
5534 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5535
Deepak S2b6b3a02014-05-27 15:59:30 +05305536 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5537
Ville Syrjälä8d40c3a2014-11-07 21:33:45 +02005538 /* RPS code assumes GPLL is used */
5539 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5540
Ville Syrjäläc8e96272014-11-07 21:33:44 +02005541 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
Deepak S2b6b3a02014-05-27 15:59:30 +05305542 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5543
5544 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5545 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005546 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
Deepak S2b6b3a02014-05-27 15:59:30 +05305547 dev_priv->rps.cur_freq);
5548
5549 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005550 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
Deepak S2b6b3a02014-05-27 15:59:30 +05305551 dev_priv->rps.efficient_freq);
5552
5553 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5554
Mika Kuoppala59bad942015-01-16 11:34:40 +02005555 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Deepak S38807742014-05-23 21:00:15 +05305556}
5557
Jesse Barnes0a073b82013-04-17 15:54:58 -07005558static void valleyview_enable_rps(struct drm_device *dev)
5559{
5560 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01005561 struct intel_engine_cs *ring;
Ben Widawsky2a5913a2014-03-19 18:31:13 -07005562 u32 gtfifodbg, val, rc6_mode = 0;
Jesse Barnes0a073b82013-04-17 15:54:58 -07005563 int i;
5564
5565 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5566
Imre Deakae484342014-03-31 15:10:44 +03005567 valleyview_check_pctx(dev_priv);
5568
Jesse Barnes0a073b82013-04-17 15:54:58 -07005569 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
Jesse Barnesf7d85c12013-09-27 10:40:54 -07005570 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5571 gtfifodbg);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005572 I915_WRITE(GTFIFODBG, gtfifodbg);
5573 }
5574
Deepak Sc8d9a592013-11-23 14:55:42 +05305575 /* If VLV, Forcewake all wells, else re-direct to regular path */
Mika Kuoppala59bad942015-01-16 11:34:40 +02005576 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005577
Ville Syrjälä160614a2015-01-19 13:50:47 +02005578 /* Disable RC states. */
5579 I915_WRITE(GEN6_RC_CONTROL, 0);
5580
Ville Syrjäläcad725f2015-01-19 13:50:48 +02005581 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005582 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5583 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5584 I915_WRITE(GEN6_RP_UP_EI, 66000);
5585 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5586
5587 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5588
5589 I915_WRITE(GEN6_RP_CONTROL,
5590 GEN6_RP_MEDIA_TURBO |
5591 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5592 GEN6_RP_MEDIA_IS_GFX |
5593 GEN6_RP_ENABLE |
5594 GEN6_RP_UP_BUSY_AVG |
5595 GEN6_RP_DOWN_IDLE_CONT);
5596
5597 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
5598 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5599 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5600
5601 for_each_ring(ring, dev_priv, i)
5602 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5603
Jesse Barnes2f0aa302013-11-15 09:32:11 -08005604 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005605
5606 /* allows RC6 residency counter to work */
Jesse Barnes49798eb2013-09-26 17:55:57 -07005607 I915_WRITE(VLV_COUNTER_CONTROL,
Deepak S31685c22014-07-03 17:33:01 -04005608 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
5609 VLV_RENDER_RC0_COUNT_EN |
Jesse Barnes49798eb2013-09-26 17:55:57 -07005610 VLV_MEDIA_RC6_COUNT_EN |
5611 VLV_RENDER_RC6_COUNT_EN));
Deepak S31685c22014-07-03 17:33:01 -04005612
Jesse Barnesa2b23fe2013-09-19 09:33:13 -07005613 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
Jesse Barnes6b88f292013-11-15 09:32:12 -08005614 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
Ben Widawskydc39fff2013-10-18 12:32:07 -07005615
5616 intel_print_rc6_info(dev, rc6_mode);
5617
Jesse Barnesa2b23fe2013-09-19 09:33:13 -07005618 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005619
Deepak S3ef62342015-04-29 08:36:24 +05305620 /* Setting Fixed Bias */
5621 val = VLV_OVERRIDE_EN |
5622 VLV_SOC_TDP_EN |
5623 VLV_BIAS_CPU_125_SOC_875;
5624 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5625
Jani Nikula64936252013-05-22 15:36:20 +03005626 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005627
Ville Syrjälä8d40c3a2014-11-07 21:33:45 +02005628 /* RPS code assumes GPLL is used */
5629 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5630
Ville Syrjäläc8e96272014-11-07 21:33:44 +02005631 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
Jesse Barnes0a073b82013-04-17 15:54:58 -07005632 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5633
Ben Widawskyb39fb292014-03-19 18:31:11 -07005634 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
Ville Syrjälä73008b92013-06-25 19:21:01 +03005635 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005636 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
Ben Widawskyb39fb292014-03-19 18:31:11 -07005637 dev_priv->rps.cur_freq);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005638
Ville Syrjälä73008b92013-06-25 19:21:01 +03005639 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005640 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
Ben Widawskyb39fb292014-03-19 18:31:11 -07005641 dev_priv->rps.efficient_freq);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005642
Ben Widawskyb39fb292014-03-19 18:31:11 -07005643 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005644
Mika Kuoppala59bad942015-01-16 11:34:40 +02005645 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005646}
5647
Eugeni Dodonovdde18882012-04-18 15:29:24 -03005648static unsigned long intel_pxfreq(u32 vidfreq)
5649{
5650 unsigned long freq;
5651 int div = (vidfreq & 0x3f0000) >> 16;
5652 int post = (vidfreq & 0x3000) >> 12;
5653 int pre = (vidfreq & 0x7);
5654
5655 if (!pre)
5656 return 0;
5657
5658 freq = ((div * 133333) / ((1<<post) * pre));
5659
5660 return freq;
5661}
5662
Daniel Vettereb48eb02012-04-26 23:28:12 +02005663static const struct cparams {
5664 u16 i;
5665 u16 t;
5666 u16 m;
5667 u16 c;
5668} cparams[] = {
5669 { 1, 1333, 301, 28664 },
5670 { 1, 1066, 294, 24460 },
5671 { 1, 800, 294, 25192 },
5672 { 0, 1333, 276, 27605 },
5673 { 0, 1066, 276, 27605 },
5674 { 0, 800, 231, 23784 },
5675};
5676
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005677static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
Daniel Vettereb48eb02012-04-26 23:28:12 +02005678{
5679 u64 total_count, diff, ret;
5680 u32 count1, count2, count3, m = 0, c = 0;
5681 unsigned long now = jiffies_to_msecs(jiffies), diff1;
5682 int i;
5683
Daniel Vetter02d71952012-08-09 16:44:54 +02005684 assert_spin_locked(&mchdev_lock);
5685
Daniel Vetter20e4d402012-08-08 23:35:39 +02005686 diff1 = now - dev_priv->ips.last_time1;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005687
5688 /* Prevent division-by-zero if we are asking too fast.
5689 * Also, we don't get interesting results if we are polling
5690 * faster than once in 10ms, so just return the saved value
5691 * in such cases.
5692 */
5693 if (diff1 <= 10)
Daniel Vetter20e4d402012-08-08 23:35:39 +02005694 return dev_priv->ips.chipset_power;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005695
5696 count1 = I915_READ(DMIEC);
5697 count2 = I915_READ(DDREC);
5698 count3 = I915_READ(CSIEC);
5699
5700 total_count = count1 + count2 + count3;
5701
5702 /* FIXME: handle per-counter overflow */
Daniel Vetter20e4d402012-08-08 23:35:39 +02005703 if (total_count < dev_priv->ips.last_count1) {
5704 diff = ~0UL - dev_priv->ips.last_count1;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005705 diff += total_count;
5706 } else {
Daniel Vetter20e4d402012-08-08 23:35:39 +02005707 diff = total_count - dev_priv->ips.last_count1;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005708 }
5709
5710 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
Daniel Vetter20e4d402012-08-08 23:35:39 +02005711 if (cparams[i].i == dev_priv->ips.c_m &&
5712 cparams[i].t == dev_priv->ips.r_t) {
Daniel Vettereb48eb02012-04-26 23:28:12 +02005713 m = cparams[i].m;
5714 c = cparams[i].c;
5715 break;
5716 }
5717 }
5718
5719 diff = div_u64(diff, diff1);
5720 ret = ((m * diff) + c);
5721 ret = div_u64(ret, 10);
5722
Daniel Vetter20e4d402012-08-08 23:35:39 +02005723 dev_priv->ips.last_count1 = total_count;
5724 dev_priv->ips.last_time1 = now;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005725
Daniel Vetter20e4d402012-08-08 23:35:39 +02005726 dev_priv->ips.chipset_power = ret;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005727
5728 return ret;
5729}
5730
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005731unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5732{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00005733 struct drm_device *dev = dev_priv->dev;
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005734 unsigned long val;
5735
Damien Lespiau3d13ef22014-02-07 19:12:47 +00005736 if (INTEL_INFO(dev)->gen != 5)
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005737 return 0;
5738
5739 spin_lock_irq(&mchdev_lock);
5740
5741 val = __i915_chipset_val(dev_priv);
5742
5743 spin_unlock_irq(&mchdev_lock);
5744
5745 return val;
5746}
5747
Daniel Vettereb48eb02012-04-26 23:28:12 +02005748unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
5749{
5750 unsigned long m, x, b;
5751 u32 tsfs;
5752
5753 tsfs = I915_READ(TSFS);
5754
5755 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
5756 x = I915_READ8(TR1);
5757
5758 b = tsfs & TSFS_INTR_MASK;
5759
5760 return ((m * x) / 127) - b;
5761}
5762
Mika Kuoppalad972d6e2014-12-01 18:01:05 +02005763static int _pxvid_to_vd(u8 pxvid)
5764{
5765 if (pxvid == 0)
5766 return 0;
5767
5768 if (pxvid >= 8 && pxvid < 31)
5769 pxvid = 31;
5770
5771 return (pxvid + 2) * 125;
5772}
5773
5774static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
Daniel Vettereb48eb02012-04-26 23:28:12 +02005775{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00005776 struct drm_device *dev = dev_priv->dev;
Mika Kuoppalad972d6e2014-12-01 18:01:05 +02005777 const int vd = _pxvid_to_vd(pxvid);
5778 const int vm = vd - 1125;
5779
Damien Lespiau3d13ef22014-02-07 19:12:47 +00005780 if (INTEL_INFO(dev)->is_mobile)
Mika Kuoppalad972d6e2014-12-01 18:01:05 +02005781 return vm > 0 ? vm : 0;
5782
5783 return vd;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005784}
5785
Daniel Vetter02d71952012-08-09 16:44:54 +02005786static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
Daniel Vettereb48eb02012-04-26 23:28:12 +02005787{
Thomas Gleixner5ed0bdf2014-07-16 21:05:06 +00005788 u64 now, diff, diffms;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005789 u32 count;
5790
Daniel Vetter02d71952012-08-09 16:44:54 +02005791 assert_spin_locked(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005792
Thomas Gleixner5ed0bdf2014-07-16 21:05:06 +00005793 now = ktime_get_raw_ns();
5794 diffms = now - dev_priv->ips.last_time2;
5795 do_div(diffms, NSEC_PER_MSEC);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005796
5797 /* Don't divide by 0 */
Daniel Vettereb48eb02012-04-26 23:28:12 +02005798 if (!diffms)
5799 return;
5800
5801 count = I915_READ(GFXEC);
5802
Daniel Vetter20e4d402012-08-08 23:35:39 +02005803 if (count < dev_priv->ips.last_count2) {
5804 diff = ~0UL - dev_priv->ips.last_count2;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005805 diff += count;
5806 } else {
Daniel Vetter20e4d402012-08-08 23:35:39 +02005807 diff = count - dev_priv->ips.last_count2;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005808 }
5809
Daniel Vetter20e4d402012-08-08 23:35:39 +02005810 dev_priv->ips.last_count2 = count;
5811 dev_priv->ips.last_time2 = now;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005812
5813 /* More magic constants... */
5814 diff = diff * 1181;
5815 diff = div_u64(diff, diffms * 10);
Daniel Vetter20e4d402012-08-08 23:35:39 +02005816 dev_priv->ips.gfx_power = diff;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005817}
5818
Daniel Vetter02d71952012-08-09 16:44:54 +02005819void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5820{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00005821 struct drm_device *dev = dev_priv->dev;
5822
5823 if (INTEL_INFO(dev)->gen != 5)
Daniel Vetter02d71952012-08-09 16:44:54 +02005824 return;
5825
Daniel Vetter92703882012-08-09 16:46:01 +02005826 spin_lock_irq(&mchdev_lock);
Daniel Vetter02d71952012-08-09 16:44:54 +02005827
5828 __i915_update_gfx_val(dev_priv);
5829
Daniel Vetter92703882012-08-09 16:46:01 +02005830 spin_unlock_irq(&mchdev_lock);
Daniel Vetter02d71952012-08-09 16:44:54 +02005831}
5832
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005833static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
Daniel Vettereb48eb02012-04-26 23:28:12 +02005834{
5835 unsigned long t, corr, state1, corr2, state2;
5836 u32 pxvid, ext_v;
5837
Daniel Vetter02d71952012-08-09 16:44:54 +02005838 assert_spin_locked(&mchdev_lock);
5839
Ben Widawskyb39fb292014-03-19 18:31:11 -07005840 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
Daniel Vettereb48eb02012-04-26 23:28:12 +02005841 pxvid = (pxvid >> 24) & 0x7f;
5842 ext_v = pvid_to_extvid(dev_priv, pxvid);
5843
5844 state1 = ext_v;
5845
5846 t = i915_mch_val(dev_priv);
5847
5848 /* Revel in the empirically derived constants */
5849
5850 /* Correction factor in 1/100000 units */
5851 if (t > 80)
5852 corr = ((t * 2349) + 135940);
5853 else if (t >= 50)
5854 corr = ((t * 964) + 29317);
5855 else /* < 50 */
5856 corr = ((t * 301) + 1004);
5857
5858 corr = corr * ((150142 * state1) / 10000 - 78642);
5859 corr /= 100000;
Daniel Vetter20e4d402012-08-08 23:35:39 +02005860 corr2 = (corr * dev_priv->ips.corr);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005861
5862 state2 = (corr2 * state1) / 10000;
5863 state2 /= 100; /* convert to mW */
5864
Daniel Vetter02d71952012-08-09 16:44:54 +02005865 __i915_update_gfx_val(dev_priv);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005866
Daniel Vetter20e4d402012-08-08 23:35:39 +02005867 return dev_priv->ips.gfx_power + state2;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005868}
5869
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005870unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5871{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00005872 struct drm_device *dev = dev_priv->dev;
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005873 unsigned long val;
5874
Damien Lespiau3d13ef22014-02-07 19:12:47 +00005875 if (INTEL_INFO(dev)->gen != 5)
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005876 return 0;
5877
5878 spin_lock_irq(&mchdev_lock);
5879
5880 val = __i915_gfx_val(dev_priv);
5881
5882 spin_unlock_irq(&mchdev_lock);
5883
5884 return val;
5885}
5886
Daniel Vettereb48eb02012-04-26 23:28:12 +02005887/**
5888 * i915_read_mch_val - return value for IPS use
5889 *
5890 * Calculate and return a value for the IPS driver to use when deciding whether
5891 * we have thermal and power headroom to increase CPU or GPU power budget.
5892 */
5893unsigned long i915_read_mch_val(void)
5894{
5895 struct drm_i915_private *dev_priv;
5896 unsigned long chipset_val, graphics_val, ret = 0;
5897
Daniel Vetter92703882012-08-09 16:46:01 +02005898 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005899 if (!i915_mch_dev)
5900 goto out_unlock;
5901 dev_priv = i915_mch_dev;
5902
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005903 chipset_val = __i915_chipset_val(dev_priv);
5904 graphics_val = __i915_gfx_val(dev_priv);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005905
5906 ret = chipset_val + graphics_val;
5907
5908out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02005909 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005910
5911 return ret;
5912}
5913EXPORT_SYMBOL_GPL(i915_read_mch_val);
5914
5915/**
5916 * i915_gpu_raise - raise GPU frequency limit
5917 *
5918 * Raise the limit; IPS indicates we have thermal headroom.
5919 */
5920bool i915_gpu_raise(void)
5921{
5922 struct drm_i915_private *dev_priv;
5923 bool ret = true;
5924
Daniel Vetter92703882012-08-09 16:46:01 +02005925 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005926 if (!i915_mch_dev) {
5927 ret = false;
5928 goto out_unlock;
5929 }
5930 dev_priv = i915_mch_dev;
5931
Daniel Vetter20e4d402012-08-08 23:35:39 +02005932 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
5933 dev_priv->ips.max_delay--;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005934
5935out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02005936 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005937
5938 return ret;
5939}
5940EXPORT_SYMBOL_GPL(i915_gpu_raise);
5941
5942/**
5943 * i915_gpu_lower - lower GPU frequency limit
5944 *
5945 * IPS indicates we're close to a thermal limit, so throttle back the GPU
5946 * frequency maximum.
5947 */
5948bool i915_gpu_lower(void)
5949{
5950 struct drm_i915_private *dev_priv;
5951 bool ret = true;
5952
Daniel Vetter92703882012-08-09 16:46:01 +02005953 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005954 if (!i915_mch_dev) {
5955 ret = false;
5956 goto out_unlock;
5957 }
5958 dev_priv = i915_mch_dev;
5959
Daniel Vetter20e4d402012-08-08 23:35:39 +02005960 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
5961 dev_priv->ips.max_delay++;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005962
5963out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02005964 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005965
5966 return ret;
5967}
5968EXPORT_SYMBOL_GPL(i915_gpu_lower);
5969
5970/**
5971 * i915_gpu_busy - indicate GPU business to IPS
5972 *
5973 * Tell the IPS driver whether or not the GPU is busy.
5974 */
5975bool i915_gpu_busy(void)
5976{
5977 struct drm_i915_private *dev_priv;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01005978 struct intel_engine_cs *ring;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005979 bool ret = false;
Chris Wilsonf047e392012-07-21 12:31:41 +01005980 int i;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005981
Daniel Vetter92703882012-08-09 16:46:01 +02005982 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005983 if (!i915_mch_dev)
5984 goto out_unlock;
5985 dev_priv = i915_mch_dev;
5986
Chris Wilsonf047e392012-07-21 12:31:41 +01005987 for_each_ring(ring, dev_priv, i)
5988 ret |= !list_empty(&ring->request_list);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005989
5990out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02005991 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005992
5993 return ret;
5994}
5995EXPORT_SYMBOL_GPL(i915_gpu_busy);
5996
5997/**
5998 * i915_gpu_turbo_disable - disable graphics turbo
5999 *
6000 * Disable graphics turbo by resetting the max frequency and setting the
6001 * current frequency to the default.
6002 */
6003bool i915_gpu_turbo_disable(void)
6004{
6005 struct drm_i915_private *dev_priv;
6006 bool ret = true;
6007
Daniel Vetter92703882012-08-09 16:46:01 +02006008 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02006009 if (!i915_mch_dev) {
6010 ret = false;
6011 goto out_unlock;
6012 }
6013 dev_priv = i915_mch_dev;
6014
Daniel Vetter20e4d402012-08-08 23:35:39 +02006015 dev_priv->ips.max_delay = dev_priv->ips.fstart;
Daniel Vettereb48eb02012-04-26 23:28:12 +02006016
Daniel Vetter20e4d402012-08-08 23:35:39 +02006017 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
Daniel Vettereb48eb02012-04-26 23:28:12 +02006018 ret = false;
6019
6020out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02006021 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02006022
6023 return ret;
6024}
6025EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6026
6027/**
6028 * Tells the intel_ips driver that the i915 driver is now loaded, if
6029 * IPS got loaded first.
6030 *
6031 * This awkward dance is so that neither module has to depend on the
6032 * other in order for IPS to do the appropriate communication of
6033 * GPU turbo limits to i915.
6034 */
6035static void
6036ips_ping_for_i915_load(void)
6037{
6038 void (*link)(void);
6039
6040 link = symbol_get(ips_link_to_i915_driver);
6041 if (link) {
6042 link();
6043 symbol_put(ips_link_to_i915_driver);
6044 }
6045}
6046
6047void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6048{
Daniel Vetter02d71952012-08-09 16:44:54 +02006049 /* We only register the i915 ips part with intel-ips once everything is
6050 * set up, to avoid intel-ips sneaking in and reading bogus values. */
Daniel Vetter92703882012-08-09 16:46:01 +02006051 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02006052 i915_mch_dev = dev_priv;
Daniel Vetter92703882012-08-09 16:46:01 +02006053 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02006054
6055 ips_ping_for_i915_load();
6056}
6057
6058void intel_gpu_ips_teardown(void)
6059{
Daniel Vetter92703882012-08-09 16:46:01 +02006060 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02006061 i915_mch_dev = NULL;
Daniel Vetter92703882012-08-09 16:46:01 +02006062 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02006063}
Deepak S76c3552f2014-01-30 23:08:16 +05306064
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006065static void intel_init_emon(struct drm_device *dev)
Eugeni Dodonovdde18882012-04-18 15:29:24 -03006066{
6067 struct drm_i915_private *dev_priv = dev->dev_private;
6068 u32 lcfuse;
6069 u8 pxw[16];
6070 int i;
6071
6072 /* Disable to program */
6073 I915_WRITE(ECR, 0);
6074 POSTING_READ(ECR);
6075
6076 /* Program energy weights for various events */
6077 I915_WRITE(SDEW, 0x15040d00);
6078 I915_WRITE(CSIEW0, 0x007f0000);
6079 I915_WRITE(CSIEW1, 0x1e220004);
6080 I915_WRITE(CSIEW2, 0x04000004);
6081
6082 for (i = 0; i < 5; i++)
6083 I915_WRITE(PEW + (i * 4), 0);
6084 for (i = 0; i < 3; i++)
6085 I915_WRITE(DEW + (i * 4), 0);
6086
6087 /* Program P-state weights to account for frequency power adjustment */
6088 for (i = 0; i < 16; i++) {
6089 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
6090 unsigned long freq = intel_pxfreq(pxvidfreq);
6091 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6092 PXVFREQ_PX_SHIFT;
6093 unsigned long val;
6094
6095 val = vid * vid;
6096 val *= (freq / 1000);
6097 val *= 255;
6098 val /= (127*127*900);
6099 if (val > 0xff)
6100 DRM_ERROR("bad pxval: %ld\n", val);
6101 pxw[i] = val;
6102 }
6103 /* Render standby states get 0 weight */
6104 pxw[14] = 0;
6105 pxw[15] = 0;
6106
6107 for (i = 0; i < 4; i++) {
6108 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6109 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6110 I915_WRITE(PXW + (i * 4), val);
6111 }
6112
6113 /* Adjust magic regs to magic values (more experimental results) */
6114 I915_WRITE(OGW0, 0);
6115 I915_WRITE(OGW1, 0);
6116 I915_WRITE(EG0, 0x00007f00);
6117 I915_WRITE(EG1, 0x0000000e);
6118 I915_WRITE(EG2, 0x000e0000);
6119 I915_WRITE(EG3, 0x68000300);
6120 I915_WRITE(EG4, 0x42000000);
6121 I915_WRITE(EG5, 0x00140031);
6122 I915_WRITE(EG6, 0);
6123 I915_WRITE(EG7, 0);
6124
6125 for (i = 0; i < 8; i++)
6126 I915_WRITE(PXWL + (i * 4), 0);
6127
6128 /* Enable PMON + select events */
6129 I915_WRITE(ECR, 0x80000019);
6130
6131 lcfuse = I915_READ(LCFUSE02);
6132
Daniel Vetter20e4d402012-08-08 23:35:39 +02006133 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
Eugeni Dodonovdde18882012-04-18 15:29:24 -03006134}
6135
Imre Deakae484342014-03-31 15:10:44 +03006136void intel_init_gt_powersave(struct drm_device *dev)
6137{
Imre Deake6069ca2014-04-18 16:01:02 +03006138 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
6139
Deepak S38807742014-05-23 21:00:15 +05306140 if (IS_CHERRYVIEW(dev))
6141 cherryview_init_gt_powersave(dev);
6142 else if (IS_VALLEYVIEW(dev))
Imre Deak4e805192014-04-14 20:24:41 +03006143 valleyview_init_gt_powersave(dev);
Imre Deakae484342014-03-31 15:10:44 +03006144}
6145
6146void intel_cleanup_gt_powersave(struct drm_device *dev)
6147{
Deepak S38807742014-05-23 21:00:15 +05306148 if (IS_CHERRYVIEW(dev))
6149 return;
6150 else if (IS_VALLEYVIEW(dev))
Imre Deak4e805192014-04-14 20:24:41 +03006151 valleyview_cleanup_gt_powersave(dev);
Imre Deakae484342014-03-31 15:10:44 +03006152}
6153
Imre Deakdbea3ce2014-12-15 18:59:28 +02006154static void gen6_suspend_rps(struct drm_device *dev)
6155{
6156 struct drm_i915_private *dev_priv = dev->dev_private;
6157
6158 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6159
Akash Goel4c2a8892015-03-06 11:07:24 +05306160 gen6_disable_rps_interrupts(dev);
Imre Deakdbea3ce2014-12-15 18:59:28 +02006161}
6162
Jesse Barnes156c7ca2014-06-12 08:35:45 -07006163/**
6164 * intel_suspend_gt_powersave - suspend PM work and helper threads
6165 * @dev: drm device
6166 *
6167 * We don't want to disable RC6 or other features here, we just want
6168 * to make sure any work we've queued has finished and won't bother
6169 * us while we're suspended.
6170 */
6171void intel_suspend_gt_powersave(struct drm_device *dev)
6172{
6173 struct drm_i915_private *dev_priv = dev->dev_private;
6174
Imre Deakd4d70aa2014-11-19 15:30:04 +02006175 if (INTEL_INFO(dev)->gen < 6)
6176 return;
6177
Imre Deakdbea3ce2014-12-15 18:59:28 +02006178 gen6_suspend_rps(dev);
Deepak Sb47adc12014-06-20 20:03:02 +05306179
6180 /* Force GPU to min freq during suspend */
6181 gen6_rps_idle(dev_priv);
Jesse Barnes156c7ca2014-06-12 08:35:45 -07006182}
6183
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006184void intel_disable_gt_powersave(struct drm_device *dev)
6185{
Jesse Barnes1a01ab32012-11-02 11:14:00 -07006186 struct drm_i915_private *dev_priv = dev->dev_private;
6187
Daniel Vetter930ebb42012-06-29 23:32:16 +02006188 if (IS_IRONLAKE_M(dev)) {
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006189 ironlake_disable_drps(dev);
Deepak S38807742014-05-23 21:00:15 +05306190 } else if (INTEL_INFO(dev)->gen >= 6) {
Daniel Vetter10d8d362014-06-12 17:48:52 +02006191 intel_suspend_gt_powersave(dev);
Imre Deake4948372014-05-12 18:35:04 +03006192
Jesse Barnes4fc688c2012-11-02 11:14:01 -07006193 mutex_lock(&dev_priv->rps.hw_lock);
Zhe Wang20e49362014-11-04 17:07:05 +00006194 if (INTEL_INFO(dev)->gen >= 9)
6195 gen9_disable_rps(dev);
6196 else if (IS_CHERRYVIEW(dev))
Deepak S38807742014-05-23 21:00:15 +05306197 cherryview_disable_rps(dev);
6198 else if (IS_VALLEYVIEW(dev))
Jesse Barnesd20d4f02013-04-23 10:09:28 -07006199 valleyview_disable_rps(dev);
6200 else
6201 gen6_disable_rps(dev);
Imre Deake5347702014-11-19 15:30:02 +02006202
Chris Wilsonc0951f02013-10-10 21:58:50 +01006203 dev_priv->rps.enabled = false;
Jesse Barnes4fc688c2012-11-02 11:14:01 -07006204 mutex_unlock(&dev_priv->rps.hw_lock);
Daniel Vetter930ebb42012-06-29 23:32:16 +02006205 }
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006206}
6207
Jesse Barnes1a01ab32012-11-02 11:14:00 -07006208static void intel_gen6_powersave_work(struct work_struct *work)
6209{
6210 struct drm_i915_private *dev_priv =
6211 container_of(work, struct drm_i915_private,
6212 rps.delayed_resume_work.work);
6213 struct drm_device *dev = dev_priv->dev;
6214
Jesse Barnes4fc688c2012-11-02 11:14:01 -07006215 mutex_lock(&dev_priv->rps.hw_lock);
Jesse Barnes0a073b82013-04-17 15:54:58 -07006216
Akash Goel4c2a8892015-03-06 11:07:24 +05306217 gen6_reset_rps_interrupts(dev);
Imre Deak3cc134e2014-11-19 15:30:03 +02006218
Deepak S38807742014-05-23 21:00:15 +05306219 if (IS_CHERRYVIEW(dev)) {
6220 cherryview_enable_rps(dev);
6221 } else if (IS_VALLEYVIEW(dev)) {
Jesse Barnes0a073b82013-04-17 15:54:58 -07006222 valleyview_enable_rps(dev);
Zhe Wang20e49362014-11-04 17:07:05 +00006223 } else if (INTEL_INFO(dev)->gen >= 9) {
Jesse Barnesb6fef0e2015-01-16 18:07:25 +00006224 gen9_enable_rc6(dev);
Zhe Wang20e49362014-11-04 17:07:05 +00006225 gen9_enable_rps(dev);
Akash Goelcc017fb42015-06-29 14:50:21 +05306226 if (IS_SKYLAKE(dev))
6227 __gen6_update_ring_freq(dev);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07006228 } else if (IS_BROADWELL(dev)) {
6229 gen8_enable_rps(dev);
Imre Deakc2bc2fc2014-04-18 16:16:23 +03006230 __gen6_update_ring_freq(dev);
Jesse Barnes0a073b82013-04-17 15:54:58 -07006231 } else {
6232 gen6_enable_rps(dev);
Imre Deakc2bc2fc2014-04-18 16:16:23 +03006233 __gen6_update_ring_freq(dev);
Jesse Barnes0a073b82013-04-17 15:54:58 -07006234 }
Chris Wilsonaed242f2015-03-18 09:48:21 +00006235
6236 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6237 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6238
6239 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6240 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6241
Chris Wilsonc0951f02013-10-10 21:58:50 +01006242 dev_priv->rps.enabled = true;
Imre Deak3cc134e2014-11-19 15:30:03 +02006243
Akash Goel4c2a8892015-03-06 11:07:24 +05306244 gen6_enable_rps_interrupts(dev);
Imre Deak3cc134e2014-11-19 15:30:03 +02006245
Jesse Barnes4fc688c2012-11-02 11:14:01 -07006246 mutex_unlock(&dev_priv->rps.hw_lock);
Imre Deakc6df39b2014-04-14 20:24:29 +03006247
6248 intel_runtime_pm_put(dev_priv);
Jesse Barnes1a01ab32012-11-02 11:14:00 -07006249}
6250
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006251void intel_enable_gt_powersave(struct drm_device *dev)
6252{
Jesse Barnes1a01ab32012-11-02 11:14:00 -07006253 struct drm_i915_private *dev_priv = dev->dev_private;
6254
Yu Zhangf61018b2015-02-10 19:05:52 +08006255 /* Powersaving is controlled by the host when inside a VM */
6256 if (intel_vgpu_active(dev))
6257 return;
6258
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006259 if (IS_IRONLAKE_M(dev)) {
Imre Deakdc1d0132014-04-14 20:24:28 +03006260 mutex_lock(&dev->struct_mutex);
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006261 ironlake_enable_drps(dev);
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006262 intel_init_emon(dev);
Imre Deakdc1d0132014-04-14 20:24:28 +03006263 mutex_unlock(&dev->struct_mutex);
Deepak S38807742014-05-23 21:00:15 +05306264 } else if (INTEL_INFO(dev)->gen >= 6) {
Jesse Barnes1a01ab32012-11-02 11:14:00 -07006265 /*
6266 * PCU communication is slow and this doesn't need to be
6267 * done at any specific time, so do this out of our fast path
6268 * to make resume and init faster.
Imre Deakc6df39b2014-04-14 20:24:29 +03006269 *
6270 * We depend on the HW RC6 power context save/restore
6271 * mechanism when entering D3 through runtime PM suspend. So
6272 * disable RPM until RPS/RC6 is properly setup. We can only
6273 * get here via the driver load/system resume/runtime resume
6274 * paths, so the _noresume version is enough (and in case of
6275 * runtime resume it's necessary).
Jesse Barnes1a01ab32012-11-02 11:14:00 -07006276 */
Imre Deakc6df39b2014-04-14 20:24:29 +03006277 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
6278 round_jiffies_up_relative(HZ)))
6279 intel_runtime_pm_get_noresume(dev_priv);
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006280 }
6281}
6282
Imre Deakc6df39b2014-04-14 20:24:29 +03006283void intel_reset_gt_powersave(struct drm_device *dev)
6284{
6285 struct drm_i915_private *dev_priv = dev->dev_private;
6286
Imre Deakdbea3ce2014-12-15 18:59:28 +02006287 if (INTEL_INFO(dev)->gen < 6)
6288 return;
6289
6290 gen6_suspend_rps(dev);
Imre Deakc6df39b2014-04-14 20:24:29 +03006291 dev_priv->rps.enabled = false;
Imre Deakc6df39b2014-04-14 20:24:29 +03006292}
6293
Daniel Vetter3107bd42012-10-31 22:52:31 +01006294static void ibx_init_clock_gating(struct drm_device *dev)
6295{
6296 struct drm_i915_private *dev_priv = dev->dev_private;
6297
6298 /*
6299 * On Ibex Peak and Cougar Point, we need to disable clock
6300 * gating for the panel power sequencer or it will fail to
6301 * start up when no ports are active.
6302 */
6303 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6304}
6305
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006306static void g4x_disable_trickle_feed(struct drm_device *dev)
6307{
6308 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläb12ce1d2015-05-26 20:27:23 +03006309 enum pipe pipe;
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006310
Damien Lespiau055e3932014-08-18 13:49:10 +01006311 for_each_pipe(dev_priv, pipe) {
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006312 I915_WRITE(DSPCNTR(pipe),
6313 I915_READ(DSPCNTR(pipe)) |
6314 DISPPLANE_TRICKLE_FEED_DISABLE);
Ville Syrjäläb12ce1d2015-05-26 20:27:23 +03006315
6316 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6317 POSTING_READ(DSPSURF(pipe));
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006318 }
6319}
6320
Ville Syrjälä017636c2013-12-05 15:51:37 +02006321static void ilk_init_lp_watermarks(struct drm_device *dev)
6322{
6323 struct drm_i915_private *dev_priv = dev->dev_private;
6324
6325 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6326 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6327 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6328
6329 /*
6330 * Don't touch WM1S_LP_EN here.
6331 * Doing so could cause underruns.
6332 */
6333}
6334
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006335static void ironlake_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006336{
6337 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiau231e54f2012-10-19 17:55:41 +01006338 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006339
Damien Lespiauf1e8fa52013-06-07 17:41:09 +01006340 /*
6341 * Required for FBC
6342 * WaFbcDisableDpfcClockGating:ilk
6343 */
Damien Lespiau4d47e4f2012-10-19 17:55:42 +01006344 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6345 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6346 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006347
6348 I915_WRITE(PCH_3DCGDIS0,
6349 MARIUNIT_CLOCK_GATE_DISABLE |
6350 SVSMUNIT_CLOCK_GATE_DISABLE);
6351 I915_WRITE(PCH_3DCGDIS1,
6352 VFMUNIT_CLOCK_GATE_DISABLE);
6353
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006354 /*
6355 * According to the spec the following bits should be set in
6356 * order to enable memory self-refresh
6357 * The bit 22/21 of 0x42004
6358 * The bit 5 of 0x42020
6359 * The bit 15 of 0x45000
6360 */
6361 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6362 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6363 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
Damien Lespiau4d47e4f2012-10-19 17:55:42 +01006364 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006365 I915_WRITE(DISP_ARB_CTL,
6366 (I915_READ(DISP_ARB_CTL) |
6367 DISP_FBC_WM_DIS));
Ville Syrjälä017636c2013-12-05 15:51:37 +02006368
6369 ilk_init_lp_watermarks(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006370
6371 /*
6372 * Based on the document from hardware guys the following bits
6373 * should be set unconditionally in order to enable FBC.
6374 * The bit 22 of 0x42000
6375 * The bit 22 of 0x42004
6376 * The bit 7,8,9 of 0x42020.
6377 */
6378 if (IS_IRONLAKE_M(dev)) {
Damien Lespiau4bb35332013-06-14 15:23:24 +01006379 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006380 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6381 I915_READ(ILK_DISPLAY_CHICKEN1) |
6382 ILK_FBCQ_DIS);
6383 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6384 I915_READ(ILK_DISPLAY_CHICKEN2) |
6385 ILK_DPARB_GATE);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006386 }
6387
Damien Lespiau4d47e4f2012-10-19 17:55:42 +01006388 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6389
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006390 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6391 I915_READ(ILK_DISPLAY_CHICKEN2) |
6392 ILK_ELPIN_409_SELECT);
6393 I915_WRITE(_3D_CHICKEN2,
6394 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6395 _3D_CHICKEN2_WM_READ_PIPELINED);
Daniel Vetter4358a372012-10-18 11:49:51 +02006396
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006397 /* WaDisableRenderCachePipelinedFlush:ilk */
Daniel Vetter4358a372012-10-18 11:49:51 +02006398 I915_WRITE(CACHE_MODE_0,
6399 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
Daniel Vetter3107bd42012-10-31 22:52:31 +01006400
Akash Goel4e046322014-04-04 17:14:38 +05306401 /* WaDisable_RenderCache_OperationalFlush:ilk */
6402 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6403
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006404 g4x_disable_trickle_feed(dev);
Ville Syrjäläbdad2b22013-06-07 10:47:03 +03006405
Daniel Vetter3107bd42012-10-31 22:52:31 +01006406 ibx_init_clock_gating(dev);
6407}
6408
6409static void cpt_init_clock_gating(struct drm_device *dev)
6410{
6411 struct drm_i915_private *dev_priv = dev->dev_private;
6412 int pipe;
Paulo Zanoni3f704fa2013-04-08 15:48:07 -03006413 uint32_t val;
Daniel Vetter3107bd42012-10-31 22:52:31 +01006414
6415 /*
6416 * On Ibex Peak and Cougar Point, we need to disable clock
6417 * gating for the panel power sequencer or it will fail to
6418 * start up when no ports are active.
6419 */
Jesse Barnescd664072013-10-02 10:34:19 -07006420 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6421 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6422 PCH_CPUNIT_CLOCK_GATE_DISABLE);
Daniel Vetter3107bd42012-10-31 22:52:31 +01006423 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6424 DPLS_EDP_PPS_FIX_DIS);
Takashi Iwai335c07b2012-12-11 11:46:29 +01006425 /* The below fixes the weird display corruption, a few pixels shifted
6426 * downward, on (only) LVDS of some HP laptops with IVY.
6427 */
Damien Lespiau055e3932014-08-18 13:49:10 +01006428 for_each_pipe(dev_priv, pipe) {
Paulo Zanonidc4bd2d2013-04-08 15:48:08 -03006429 val = I915_READ(TRANS_CHICKEN2(pipe));
6430 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6431 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03006432 if (dev_priv->vbt.fdi_rx_polarity_inverted)
Paulo Zanoni3f704fa2013-04-08 15:48:07 -03006433 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
Paulo Zanonidc4bd2d2013-04-08 15:48:08 -03006434 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6435 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6436 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
Paulo Zanoni3f704fa2013-04-08 15:48:07 -03006437 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6438 }
Daniel Vetter3107bd42012-10-31 22:52:31 +01006439 /* WADP0ClockGatingDisable */
Damien Lespiau055e3932014-08-18 13:49:10 +01006440 for_each_pipe(dev_priv, pipe) {
Daniel Vetter3107bd42012-10-31 22:52:31 +01006441 I915_WRITE(TRANS_CHICKEN1(pipe),
6442 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6443 }
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006444}
6445
Daniel Vetter1d7aaa02013-02-09 21:03:42 +01006446static void gen6_check_mch_setup(struct drm_device *dev)
6447{
6448 struct drm_i915_private *dev_priv = dev->dev_private;
6449 uint32_t tmp;
6450
6451 tmp = I915_READ(MCH_SSKPD);
Daniel Vetterdf662a22014-08-04 11:17:25 +02006452 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6453 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6454 tmp);
Daniel Vetter1d7aaa02013-02-09 21:03:42 +01006455}
6456
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006457static void gen6_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006458{
6459 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiau231e54f2012-10-19 17:55:41 +01006460 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006461
Damien Lespiau231e54f2012-10-19 17:55:41 +01006462 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006463
6464 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6465 I915_READ(ILK_DISPLAY_CHICKEN2) |
6466 ILK_ELPIN_409_SELECT);
6467
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006468 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
Daniel Vetter42839082012-12-14 23:38:28 +01006469 I915_WRITE(_3D_CHICKEN,
6470 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6471
Akash Goel4e046322014-04-04 17:14:38 +05306472 /* WaDisable_RenderCache_OperationalFlush:snb */
6473 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6474
Ville Syrjälä8d85d272014-02-04 21:59:15 +02006475 /*
6476 * BSpec recoomends 8x4 when MSAA is used,
6477 * however in practice 16x4 seems fastest.
Ville Syrjäläc5c98a52014-02-05 12:43:47 +02006478 *
6479 * Note that PS/WM thread counts depend on the WIZ hashing
6480 * disable bit, which we don't touch here, but it's good
6481 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
Ville Syrjälä8d85d272014-02-04 21:59:15 +02006482 */
6483 I915_WRITE(GEN6_GT_MODE,
Damien Lespiau98533252014-12-08 17:33:51 +00006484 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
Ville Syrjälä8d85d272014-02-04 21:59:15 +02006485
Ville Syrjälä017636c2013-12-05 15:51:37 +02006486 ilk_init_lp_watermarks(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006487
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006488 I915_WRITE(CACHE_MODE_0,
Daniel Vetter50743292012-04-26 22:02:54 +02006489 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006490
6491 I915_WRITE(GEN6_UCGCTL1,
6492 I915_READ(GEN6_UCGCTL1) |
6493 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6494 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6495
6496 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6497 * gating disable must be set. Failure to set it results in
6498 * flickering pixels due to Z write ordering failures after
6499 * some amount of runtime in the Mesa "fire" demo, and Unigine
6500 * Sanctuary and Tropics, and apparently anything else with
6501 * alpha test or pixel discard.
6502 *
6503 * According to the spec, bit 11 (RCCUNIT) must also be set,
6504 * but we didn't debug actual testcases to find it out.
Jesse Barnes0f846f82012-06-14 11:04:47 -07006505 *
Ville Syrjäläef593182014-01-22 21:32:47 +02006506 * WaDisableRCCUnitClockGating:snb
6507 * WaDisableRCPBUnitClockGating:snb
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006508 */
6509 I915_WRITE(GEN6_UCGCTL2,
6510 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6511 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6512
Ville Syrjälä5eb146d2014-02-04 21:59:16 +02006513 /* WaStripsFansDisableFastClipPerformanceFix:snb */
Ville Syrjälä743b57d2014-02-04 21:59:17 +02006514 I915_WRITE(_3D_CHICKEN3,
6515 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006516
6517 /*
Ville Syrjäläe927ecd2014-02-04 21:59:18 +02006518 * Bspec says:
6519 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6520 * 3DSTATE_SF number of SF output attributes is more than 16."
6521 */
6522 I915_WRITE(_3D_CHICKEN3,
6523 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6524
6525 /*
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006526 * According to the spec the following bits should be
6527 * set in order to enable memory self-refresh and fbc:
6528 * The bit21 and bit22 of 0x42000
6529 * The bit21 and bit22 of 0x42004
6530 * The bit5 and bit7 of 0x42020
6531 * The bit14 of 0x70180
6532 * The bit14 of 0x71180
Damien Lespiau4bb35332013-06-14 15:23:24 +01006533 *
6534 * WaFbcAsynchFlipDisableFbcQueue:snb
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006535 */
6536 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6537 I915_READ(ILK_DISPLAY_CHICKEN1) |
6538 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6539 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6540 I915_READ(ILK_DISPLAY_CHICKEN2) |
6541 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
Damien Lespiau231e54f2012-10-19 17:55:41 +01006542 I915_WRITE(ILK_DSPCLK_GATE_D,
6543 I915_READ(ILK_DSPCLK_GATE_D) |
6544 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
6545 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006546
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006547 g4x_disable_trickle_feed(dev);
Ben Widawskyf8f2ac92012-10-03 19:34:24 -07006548
Daniel Vetter3107bd42012-10-31 22:52:31 +01006549 cpt_init_clock_gating(dev);
Daniel Vetter1d7aaa02013-02-09 21:03:42 +01006550
6551 gen6_check_mch_setup(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006552}
6553
6554static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6555{
6556 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
6557
Ville Syrjälä3aad9052014-01-22 21:32:59 +02006558 /*
Ville Syrjälä46680e02014-01-22 21:33:01 +02006559 * WaVSThreadDispatchOverride:ivb,vlv
Ville Syrjälä3aad9052014-01-22 21:32:59 +02006560 *
6561 * This actually overrides the dispatch
6562 * mode for all thread types.
6563 */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006564 reg &= ~GEN7_FF_SCHED_MASK;
6565 reg |= GEN7_FF_TS_SCHED_HW;
6566 reg |= GEN7_FF_VS_SCHED_HW;
6567 reg |= GEN7_FF_DS_SCHED_HW;
6568
6569 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6570}
6571
Paulo Zanoni17a303e2012-11-20 15:12:07 -02006572static void lpt_init_clock_gating(struct drm_device *dev)
6573{
6574 struct drm_i915_private *dev_priv = dev->dev_private;
6575
6576 /*
6577 * TODO: this bit should only be enabled when really needed, then
6578 * disabled when not needed anymore in order to save power.
6579 */
6580 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
6581 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6582 I915_READ(SOUTH_DSPCLK_GATE_D) |
6583 PCH_LP_PARTITION_LEVEL_DISABLE);
Paulo Zanoni0a790cd2013-04-17 18:15:49 -03006584
6585 /* WADPOClockGatingDisable:hsw */
6586 I915_WRITE(_TRANSA_CHICKEN1,
6587 I915_READ(_TRANSA_CHICKEN1) |
6588 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
Paulo Zanoni17a303e2012-11-20 15:12:07 -02006589}
6590
Imre Deak7d708ee2013-04-17 14:04:50 +03006591static void lpt_suspend_hw(struct drm_device *dev)
6592{
6593 struct drm_i915_private *dev_priv = dev->dev_private;
6594
6595 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6596 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
6597
6598 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6599 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6600 }
6601}
6602
Paulo Zanoni47c2bd92014-08-21 17:09:37 -03006603static void broadwell_init_clock_gating(struct drm_device *dev)
Ben Widawsky1020a5c2013-11-02 21:07:06 -07006604{
6605 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiau07d27e22014-03-03 17:31:46 +00006606 enum pipe pipe;
Ville Syrjälä4d487cf2015-05-19 20:32:56 +03006607 uint32_t misccpctl;
Ben Widawsky1020a5c2013-11-02 21:07:06 -07006608
Ville Syrjälä7ad0dba2015-05-19 20:32:55 +03006609 ilk_init_lp_watermarks(dev);
Ben Widawsky50ed5fb2013-11-02 21:07:40 -07006610
Ben Widawskyab57fff2013-12-12 15:28:04 -08006611 /* WaSwitchSolVfFArbitrationPriority:bdw */
Ben Widawsky50ed5fb2013-11-02 21:07:40 -07006612 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
Ben Widawskyfe4ab3c2013-11-02 21:07:54 -07006613
Ben Widawskyab57fff2013-12-12 15:28:04 -08006614 /* WaPsrDPAMaskVBlankInSRD:bdw */
Ben Widawskyfe4ab3c2013-11-02 21:07:54 -07006615 I915_WRITE(CHICKEN_PAR1_1,
6616 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6617
Ben Widawskyab57fff2013-12-12 15:28:04 -08006618 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
Damien Lespiau055e3932014-08-18 13:49:10 +01006619 for_each_pipe(dev_priv, pipe) {
Damien Lespiau07d27e22014-03-03 17:31:46 +00006620 I915_WRITE(CHICKEN_PIPESL_1(pipe),
Ville Syrjäläc7c65622014-03-05 13:05:45 +02006621 I915_READ(CHICKEN_PIPESL_1(pipe)) |
Ville Syrjälä8f670bb2014-03-05 13:05:47 +02006622 BDW_DPRS_MASK_VBLANK_SRD);
Ben Widawskyfe4ab3c2013-11-02 21:07:54 -07006623 }
Ben Widawsky63801f22013-12-12 17:26:03 -08006624
Ben Widawskyab57fff2013-12-12 15:28:04 -08006625 /* WaVSRefCountFullforceMissDisable:bdw */
6626 /* WaDSRefCountFullforceMissDisable:bdw */
6627 I915_WRITE(GEN7_FF_THREAD_MODE,
6628 I915_READ(GEN7_FF_THREAD_MODE) &
6629 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
Ville Syrjälä36075a42014-02-04 21:59:21 +02006630
Ville Syrjälä295e8bb2014-02-27 21:59:01 +02006631 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6632 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
Ville Syrjälä4f1ca9e2014-02-27 21:59:02 +02006633
6634 /* WaDisableSDEUnitClockGating:bdw */
6635 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6636 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
Damien Lespiau5d708682014-03-26 18:41:51 +00006637
Ville Syrjälä4d487cf2015-05-19 20:32:56 +03006638 /*
6639 * WaProgramL3SqcReg1Default:bdw
6640 * WaTempDisableDOPClkGating:bdw
6641 */
6642 misccpctl = I915_READ(GEN7_MISCCPCTL);
6643 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6644 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
6645 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6646
Ville Syrjälä6d50b062015-05-19 20:32:57 +03006647 /*
6648 * WaGttCachingOffByDefault:bdw
6649 * GTT cache may not work with big pages, so if those
6650 * are ever enabled GTT cache may need to be disabled.
6651 */
6652 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
6653
Paulo Zanoni89d6b2b2014-08-21 17:09:36 -03006654 lpt_init_clock_gating(dev);
Ben Widawsky1020a5c2013-11-02 21:07:06 -07006655}
6656
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03006657static void haswell_init_clock_gating(struct drm_device *dev)
6658{
6659 struct drm_i915_private *dev_priv = dev->dev_private;
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03006660
Ville Syrjälä017636c2013-12-05 15:51:37 +02006661 ilk_init_lp_watermarks(dev);
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03006662
Francisco Jerezf3fc4882013-10-02 15:53:16 -07006663 /* L3 caching of data atomics doesn't work -- disable it. */
6664 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6665 I915_WRITE(HSW_ROW_CHICKEN3,
6666 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6667
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006668 /* This is required by WaCatErrorRejectionIssue:hsw */
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03006669 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6670 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6671 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6672
Ville Syrjäläe36ea7f2014-01-22 21:33:00 +02006673 /* WaVSRefCountFullforceMissDisable:hsw */
6674 I915_WRITE(GEN7_FF_THREAD_MODE,
6675 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03006676
Akash Goel4e046322014-04-04 17:14:38 +05306677 /* WaDisable_RenderCache_OperationalFlush:hsw */
6678 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6679
Chia-I Wufe27c602014-01-28 13:29:33 +08006680 /* enable HiZ Raw Stall Optimization */
6681 I915_WRITE(CACHE_MODE_0_GEN7,
6682 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6683
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006684 /* WaDisable4x2SubspanOptimization:hsw */
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03006685 I915_WRITE(CACHE_MODE_1,
6686 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
Eugeni Dodonov1544d9d2012-07-02 11:51:10 -03006687
Ville Syrjäläa12c4962014-02-04 21:59:20 +02006688 /*
6689 * BSpec recommends 8x4 when MSAA is used,
6690 * however in practice 16x4 seems fastest.
Ville Syrjäläc5c98a52014-02-05 12:43:47 +02006691 *
6692 * Note that PS/WM thread counts depend on the WIZ hashing
6693 * disable bit, which we don't touch here, but it's good
6694 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
Ville Syrjäläa12c4962014-02-04 21:59:20 +02006695 */
6696 I915_WRITE(GEN7_GT_MODE,
Damien Lespiau98533252014-12-08 17:33:51 +00006697 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
Ville Syrjäläa12c4962014-02-04 21:59:20 +02006698
Kenneth Graunke94411592014-12-31 16:23:00 -08006699 /* WaSampleCChickenBitEnable:hsw */
6700 I915_WRITE(HALF_SLICE_CHICKEN3,
6701 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
6702
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006703 /* WaSwitchSolVfFArbitrationPriority:hsw */
Ben Widawskye3dff582013-03-20 14:49:14 -07006704 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6705
Paulo Zanoni90a88642013-05-03 17:23:45 -03006706 /* WaRsPkgCStateDisplayPMReq:hsw */
6707 I915_WRITE(CHICKEN_PAR1_1,
6708 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
Eugeni Dodonov1544d9d2012-07-02 11:51:10 -03006709
Paulo Zanoni17a303e2012-11-20 15:12:07 -02006710 lpt_init_clock_gating(dev);
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03006711}
6712
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006713static void ivybridge_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006714{
6715 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky20848222012-05-04 18:58:59 -07006716 uint32_t snpcr;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006717
Ville Syrjälä017636c2013-12-05 15:51:37 +02006718 ilk_init_lp_watermarks(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006719
Damien Lespiau231e54f2012-10-19 17:55:41 +01006720 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006721
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006722 /* WaDisableEarlyCull:ivb */
Jesse Barnes87f80202012-10-02 17:43:41 -05006723 I915_WRITE(_3D_CHICKEN3,
6724 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6725
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006726 /* WaDisableBackToBackFlipFix:ivb */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006727 I915_WRITE(IVB_CHICKEN3,
6728 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6729 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6730
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006731 /* WaDisablePSDDualDispatchEnable:ivb */
Jesse Barnes12f33822012-10-25 12:15:45 -07006732 if (IS_IVB_GT1(dev))
6733 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6734 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
Jesse Barnes12f33822012-10-25 12:15:45 -07006735
Akash Goel4e046322014-04-04 17:14:38 +05306736 /* WaDisable_RenderCache_OperationalFlush:ivb */
6737 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6738
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006739 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006740 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
6741 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
6742
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006743 /* WaApplyL3ControlAndL3ChickenMode:ivb */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006744 I915_WRITE(GEN7_L3CNTLREG1,
6745 GEN7_WA_FOR_GEN7_L3_CONTROL);
6746 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
Jesse Barnes8ab43972012-10-25 12:15:42 -07006747 GEN7_WA_L3_CHICKEN_MODE);
6748 if (IS_IVB_GT1(dev))
6749 I915_WRITE(GEN7_ROW_CHICKEN2,
6750 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
Ville Syrjälä412236c2014-01-22 21:32:44 +02006751 else {
6752 /* must write both registers */
6753 I915_WRITE(GEN7_ROW_CHICKEN2,
6754 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
Jesse Barnes8ab43972012-10-25 12:15:42 -07006755 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
6756 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
Ville Syrjälä412236c2014-01-22 21:32:44 +02006757 }
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006758
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006759 /* WaForceL3Serialization:ivb */
Jesse Barnes61939d92012-10-02 17:43:38 -05006760 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6761 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6762
Ville Syrjälä1b80a19a2014-01-22 21:32:53 +02006763 /*
Jesse Barnes0f846f82012-06-14 11:04:47 -07006764 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006765 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
Jesse Barnes0f846f82012-06-14 11:04:47 -07006766 */
6767 I915_WRITE(GEN6_UCGCTL2,
Ville Syrjälä28acf3b2014-01-22 21:32:48 +02006768 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
Jesse Barnes0f846f82012-06-14 11:04:47 -07006769
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006770 /* This is required by WaCatErrorRejectionIssue:ivb */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006771 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6772 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6773 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6774
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006775 g4x_disable_trickle_feed(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006776
6777 gen7_setup_fixed_func_scheduler(dev_priv);
Daniel Vetter97e19302012-04-24 16:00:21 +02006778
Chris Wilson22721342014-03-04 09:41:43 +00006779 if (0) { /* causes HiZ corruption on ivb:gt1 */
6780 /* enable HiZ Raw Stall Optimization */
6781 I915_WRITE(CACHE_MODE_0_GEN7,
6782 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6783 }
Chia-I Wu116f2b62014-01-28 13:29:34 +08006784
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006785 /* WaDisable4x2SubspanOptimization:ivb */
Daniel Vetter97e19302012-04-24 16:00:21 +02006786 I915_WRITE(CACHE_MODE_1,
6787 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
Ben Widawsky20848222012-05-04 18:58:59 -07006788
Ville Syrjäläa607c1a2014-02-04 21:59:19 +02006789 /*
6790 * BSpec recommends 8x4 when MSAA is used,
6791 * however in practice 16x4 seems fastest.
Ville Syrjäläc5c98a52014-02-05 12:43:47 +02006792 *
6793 * Note that PS/WM thread counts depend on the WIZ hashing
6794 * disable bit, which we don't touch here, but it's good
6795 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
Ville Syrjäläa607c1a2014-02-04 21:59:19 +02006796 */
6797 I915_WRITE(GEN7_GT_MODE,
Damien Lespiau98533252014-12-08 17:33:51 +00006798 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
Ville Syrjäläa607c1a2014-02-04 21:59:19 +02006799
Ben Widawsky20848222012-05-04 18:58:59 -07006800 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6801 snpcr &= ~GEN6_MBC_SNPCR_MASK;
6802 snpcr |= GEN6_MBC_SNPCR_MED;
6803 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
Daniel Vetter3107bd42012-10-31 22:52:31 +01006804
Ben Widawskyab5c6082013-04-05 13:12:41 -07006805 if (!HAS_PCH_NOP(dev))
6806 cpt_init_clock_gating(dev);
Daniel Vetter1d7aaa02013-02-09 21:03:42 +01006807
6808 gen6_check_mch_setup(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006809}
6810
Ville Syrjäläc6beb132015-03-05 21:19:48 +02006811static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
6812{
6813 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6814
6815 /*
6816 * Disable trickle feed and enable pnd deadline calculation
6817 */
6818 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
6819 I915_WRITE(CBR1_VLV, 0);
6820}
6821
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006822static void valleyview_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006823{
6824 struct drm_i915_private *dev_priv = dev->dev_private;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006825
Ville Syrjäläc6beb132015-03-05 21:19:48 +02006826 vlv_init_display_clock_gating(dev_priv);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006827
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006828 /* WaDisableEarlyCull:vlv */
Jesse Barnes87f80202012-10-02 17:43:41 -05006829 I915_WRITE(_3D_CHICKEN3,
6830 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6831
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006832 /* WaDisableBackToBackFlipFix:vlv */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006833 I915_WRITE(IVB_CHICKEN3,
6834 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6835 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6836
Ville Syrjäläfad7d362014-01-22 21:32:39 +02006837 /* WaPsdDispatchEnable:vlv */
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006838 /* WaDisablePSDDualDispatchEnable:vlv */
Jesse Barnes12f33822012-10-25 12:15:45 -07006839 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
Jesse Barnesd3bc0302013-03-08 10:45:51 -08006840 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
6841 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
Jesse Barnes12f33822012-10-25 12:15:45 -07006842
Akash Goel4e046322014-04-04 17:14:38 +05306843 /* WaDisable_RenderCache_OperationalFlush:vlv */
6844 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6845
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006846 /* WaForceL3Serialization:vlv */
Jesse Barnes61939d92012-10-02 17:43:38 -05006847 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6848 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6849
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006850 /* WaDisableDopClockGating:vlv */
Jesse Barnes8ab43972012-10-25 12:15:42 -07006851 I915_WRITE(GEN7_ROW_CHICKEN2,
6852 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6853
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006854 /* This is required by WaCatErrorRejectionIssue:vlv */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006855 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6856 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6857 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6858
Ville Syrjälä46680e02014-01-22 21:33:01 +02006859 gen7_setup_fixed_func_scheduler(dev_priv);
6860
Ville Syrjälä3c0edae2014-01-22 21:32:56 +02006861 /*
Jesse Barnes0f846f82012-06-14 11:04:47 -07006862 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006863 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
Jesse Barnes0f846f82012-06-14 11:04:47 -07006864 */
6865 I915_WRITE(GEN6_UCGCTL2,
Ville Syrjälä3c0edae2014-01-22 21:32:56 +02006866 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
Jesse Barnes0f846f82012-06-14 11:04:47 -07006867
Akash Goelc98f5062014-03-24 23:00:07 +05306868 /* WaDisableL3Bank2xClockGate:vlv
6869 * Disabling L3 clock gating- MMIO 940c[25] = 1
6870 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
6871 I915_WRITE(GEN7_UCGCTL4,
6872 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
Jesse Barnese3f33d42012-06-14 11:04:50 -07006873
Ville Syrjäläafd58e72014-01-22 21:33:03 +02006874 /*
6875 * BSpec says this must be set, even though
6876 * WaDisable4x2SubspanOptimization isn't listed for VLV.
6877 */
Daniel Vetter6b26c862012-04-24 14:04:12 +02006878 I915_WRITE(CACHE_MODE_1,
6879 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
Jesse Barnes79831172012-06-20 10:53:12 -07006880
6881 /*
Ville Syrjäläda2518f2015-01-21 19:38:01 +02006882 * BSpec recommends 8x4 when MSAA is used,
6883 * however in practice 16x4 seems fastest.
6884 *
6885 * Note that PS/WM thread counts depend on the WIZ hashing
6886 * disable bit, which we don't touch here, but it's good
6887 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6888 */
6889 I915_WRITE(GEN7_GT_MODE,
6890 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6891
6892 /*
Ville Syrjälä031994e2014-01-22 21:32:46 +02006893 * WaIncreaseL3CreditsForVLVB0:vlv
6894 * This is the hardware default actually.
6895 */
6896 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
6897
6898 /*
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006899 * WaDisableVLVClockGating_VBIIssue:vlv
Jesse Barnes2d809572012-10-25 12:15:44 -07006900 * Disable clock gating on th GCFG unit to prevent a delay
6901 * in the reporting of vblank events.
6902 */
Ville Syrjälä7a0d1ee2014-01-22 21:33:04 +02006903 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006904}
6905
Ville Syrjäläa4565da2014-04-09 13:28:10 +03006906static void cherryview_init_clock_gating(struct drm_device *dev)
6907{
6908 struct drm_i915_private *dev_priv = dev->dev_private;
6909
Ville Syrjäläc6beb132015-03-05 21:19:48 +02006910 vlv_init_display_clock_gating(dev_priv);
Ville Syrjälädd811e72014-04-09 13:28:33 +03006911
Ville Syrjälä232ce332014-04-09 13:28:35 +03006912 /* WaVSRefCountFullforceMissDisable:chv */
6913 /* WaDSRefCountFullforceMissDisable:chv */
6914 I915_WRITE(GEN7_FF_THREAD_MODE,
6915 I915_READ(GEN7_FF_THREAD_MODE) &
6916 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
Ville Syrjäläacea6f92014-04-09 13:28:36 +03006917
6918 /* WaDisableSemaphoreAndSyncFlipWait:chv */
6919 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6920 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
Ville Syrjälä08466972014-04-09 13:28:37 +03006921
6922 /* WaDisableCSUnitClockGating:chv */
6923 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6924 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
Ville Syrjäläc6317802014-04-09 13:28:38 +03006925
6926 /* WaDisableSDEUnitClockGating:chv */
6927 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6928 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
Ville Syrjälä6d50b062015-05-19 20:32:57 +03006929
6930 /*
6931 * GTT cache may not work with big pages, so if those
6932 * are ever enabled GTT cache may need to be disabled.
6933 */
6934 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
Ville Syrjäläa4565da2014-04-09 13:28:10 +03006935}
6936
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006937static void g4x_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006938{
6939 struct drm_i915_private *dev_priv = dev->dev_private;
6940 uint32_t dspclk_gate;
6941
6942 I915_WRITE(RENCLK_GATE_D1, 0);
6943 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
6944 GS_UNIT_CLOCK_GATE_DISABLE |
6945 CL_UNIT_CLOCK_GATE_DISABLE);
6946 I915_WRITE(RAMCLK_GATE_D, 0);
6947 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
6948 OVRUNIT_CLOCK_GATE_DISABLE |
6949 OVCUNIT_CLOCK_GATE_DISABLE;
6950 if (IS_GM45(dev))
6951 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
6952 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
Daniel Vetter4358a372012-10-18 11:49:51 +02006953
6954 /* WaDisableRenderCachePipelinedFlush */
6955 I915_WRITE(CACHE_MODE_0,
6956 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
Ville Syrjäläde1aa622013-06-07 10:47:01 +03006957
Akash Goel4e046322014-04-04 17:14:38 +05306958 /* WaDisable_RenderCache_OperationalFlush:g4x */
6959 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6960
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006961 g4x_disable_trickle_feed(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006962}
6963
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006964static void crestline_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006965{
6966 struct drm_i915_private *dev_priv = dev->dev_private;
6967
6968 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
6969 I915_WRITE(RENCLK_GATE_D2, 0);
6970 I915_WRITE(DSPCLK_GATE_D, 0);
6971 I915_WRITE(RAMCLK_GATE_D, 0);
6972 I915_WRITE16(DEUC, 0);
Ville Syrjälä20f94962013-06-07 10:47:02 +03006973 I915_WRITE(MI_ARB_STATE,
6974 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
Akash Goel4e046322014-04-04 17:14:38 +05306975
6976 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6977 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006978}
6979
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006980static void broadwater_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006981{
6982 struct drm_i915_private *dev_priv = dev->dev_private;
6983
6984 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
6985 I965_RCC_CLOCK_GATE_DISABLE |
6986 I965_RCPB_CLOCK_GATE_DISABLE |
6987 I965_ISC_CLOCK_GATE_DISABLE |
6988 I965_FBC_CLOCK_GATE_DISABLE);
6989 I915_WRITE(RENCLK_GATE_D2, 0);
Ville Syrjälä20f94962013-06-07 10:47:02 +03006990 I915_WRITE(MI_ARB_STATE,
6991 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
Akash Goel4e046322014-04-04 17:14:38 +05306992
6993 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6994 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006995}
6996
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006997static void gen3_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006998{
6999 struct drm_i915_private *dev_priv = dev->dev_private;
7000 u32 dstate = I915_READ(D_STATE);
7001
7002 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7003 DSTATE_DOT_CLOCK_GATING;
7004 I915_WRITE(D_STATE, dstate);
Chris Wilson13a86b82012-04-24 14:51:43 +01007005
7006 if (IS_PINEVIEW(dev))
7007 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
Daniel Vetter974a3b02012-09-09 11:54:16 +02007008
7009 /* IIR "flip pending" means done if this bit is set */
7010 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
Ville Syrjälä12fabbcb92014-02-25 15:13:38 +02007011
7012 /* interrupts should cause a wake up from C3 */
Ville Syrjälä32992542014-02-25 15:13:39 +02007013 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
Ville Syrjälädbb42742014-02-25 15:13:41 +02007014
7015 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7016 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Ville Syrjälä10383922014-08-15 01:21:54 +03007017
7018 I915_WRITE(MI_ARB_STATE,
7019 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007020}
7021
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007022static void i85x_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007023{
7024 struct drm_i915_private *dev_priv = dev->dev_private;
7025
7026 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
Ville Syrjälä54e472a2014-02-25 15:13:40 +02007027
7028 /* interrupts should cause a wake up from C3 */
7029 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7030 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
Ville Syrjälä10383922014-08-15 01:21:54 +03007031
7032 I915_WRITE(MEM_MODE,
7033 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007034}
7035
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007036static void i830_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007037{
7038 struct drm_i915_private *dev_priv = dev->dev_private;
7039
7040 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
Ville Syrjälä10383922014-08-15 01:21:54 +03007041
7042 I915_WRITE(MEM_MODE,
7043 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7044 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007045}
7046
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007047void intel_init_clock_gating(struct drm_device *dev)
7048{
7049 struct drm_i915_private *dev_priv = dev->dev_private;
7050
Damien Lespiauc57e3552015-02-09 19:33:05 +00007051 if (dev_priv->display.init_clock_gating)
7052 dev_priv->display.init_clock_gating(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007053}
7054
Imre Deak7d708ee2013-04-17 14:04:50 +03007055void intel_suspend_hw(struct drm_device *dev)
7056{
7057 if (HAS_PCH_LPT(dev))
7058 lpt_suspend_hw(dev);
7059}
7060
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007061/* Set up chip specific power management-related functions */
7062void intel_init_pm(struct drm_device *dev)
7063{
7064 struct drm_i915_private *dev_priv = dev->dev_private;
7065
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -02007066 intel_fbc_init(dev_priv);
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007067
Daniel Vetterc921aba2012-04-26 23:28:17 +02007068 /* For cxsr */
7069 if (IS_PINEVIEW(dev))
7070 i915_pineview_get_mem_freq(dev);
7071 else if (IS_GEN5(dev))
7072 i915_ironlake_get_mem_freq(dev);
7073
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007074 /* For FIFO watermark updates */
Damien Lespiauf5ed50c2014-11-13 17:51:52 +00007075 if (INTEL_INFO(dev)->gen >= 9) {
Pradeep Bhat2af30a52014-11-04 17:06:38 +00007076 skl_setup_wm_latency(dev);
7077
Imre Deaka82abe42015-03-27 14:00:04 +02007078 if (IS_BROXTON(dev))
7079 dev_priv->display.init_clock_gating =
7080 bxt_init_clock_gating;
7081 else if (IS_SKYLAKE(dev))
7082 dev_priv->display.init_clock_gating =
7083 skl_init_clock_gating;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00007084 dev_priv->display.update_wm = skl_update_wm;
7085 dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
Damien Lespiauc83155a2014-03-28 00:18:35 +05307086 } else if (HAS_PCH_SPLIT(dev)) {
Damien Lespiaufa50ad62014-03-17 18:01:16 +00007087 ilk_setup_wm_latency(dev);
Ville Syrjälä53615a52013-08-01 16:18:50 +03007088
Ville Syrjäläbd602542014-01-07 16:14:10 +02007089 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7090 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7091 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7092 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7093 dev_priv->display.update_wm = ilk_update_wm;
7094 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
7095 } else {
7096 DRM_DEBUG_KMS("Failed to read display plane latency. "
7097 "Disable CxSR\n");
7098 }
7099
7100 if (IS_GEN5(dev))
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007101 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
Ville Syrjäläbd602542014-01-07 16:14:10 +02007102 else if (IS_GEN6(dev))
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007103 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
Ville Syrjäläbd602542014-01-07 16:14:10 +02007104 else if (IS_IVYBRIDGE(dev))
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007105 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
Ville Syrjäläbd602542014-01-07 16:14:10 +02007106 else if (IS_HASWELL(dev))
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03007107 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
Ville Syrjäläbd602542014-01-07 16:14:10 +02007108 else if (INTEL_INFO(dev)->gen == 8)
Paulo Zanoni47c2bd92014-08-21 17:09:37 -03007109 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
Ville Syrjäläa4565da2014-04-09 13:28:10 +03007110 } else if (IS_CHERRYVIEW(dev)) {
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03007111 vlv_setup_wm_latency(dev);
7112
7113 dev_priv->display.update_wm = vlv_update_wm;
Ville Syrjäläa4565da2014-04-09 13:28:10 +03007114 dev_priv->display.init_clock_gating =
7115 cherryview_init_clock_gating;
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007116 } else if (IS_VALLEYVIEW(dev)) {
Ville Syrjälä26e1fe42015-06-24 22:00:06 +03007117 vlv_setup_wm_latency(dev);
7118
7119 dev_priv->display.update_wm = vlv_update_wm;
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007120 dev_priv->display.init_clock_gating =
7121 valleyview_init_clock_gating;
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007122 } else if (IS_PINEVIEW(dev)) {
7123 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7124 dev_priv->is_ddr3,
7125 dev_priv->fsb_freq,
7126 dev_priv->mem_freq)) {
7127 DRM_INFO("failed to find known CxSR latency "
7128 "(found ddr%s fsb freq %d, mem freq %d), "
7129 "disabling CxSR\n",
7130 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7131 dev_priv->fsb_freq, dev_priv->mem_freq);
7132 /* Disable CxSR and never update its watermark again */
Imre Deak5209b1f2014-07-01 12:36:17 +03007133 intel_set_memory_cxsr(dev_priv, false);
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007134 dev_priv->display.update_wm = NULL;
7135 } else
7136 dev_priv->display.update_wm = pineview_update_wm;
7137 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7138 } else if (IS_G4X(dev)) {
7139 dev_priv->display.update_wm = g4x_update_wm;
7140 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7141 } else if (IS_GEN4(dev)) {
7142 dev_priv->display.update_wm = i965_update_wm;
7143 if (IS_CRESTLINE(dev))
7144 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7145 else if (IS_BROADWATER(dev))
7146 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7147 } else if (IS_GEN3(dev)) {
7148 dev_priv->display.update_wm = i9xx_update_wm;
7149 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7150 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
Daniel Vetterfeb56b92013-12-14 20:38:30 -02007151 } else if (IS_GEN2(dev)) {
7152 if (INTEL_INFO(dev)->num_pipes == 1) {
7153 dev_priv->display.update_wm = i845_update_wm;
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007154 dev_priv->display.get_fifo_size = i845_get_fifo_size;
Daniel Vetterfeb56b92013-12-14 20:38:30 -02007155 } else {
7156 dev_priv->display.update_wm = i9xx_update_wm;
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007157 dev_priv->display.get_fifo_size = i830_get_fifo_size;
Daniel Vetterfeb56b92013-12-14 20:38:30 -02007158 }
7159
7160 if (IS_I85X(dev) || IS_I865G(dev))
7161 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7162 else
7163 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7164 } else {
7165 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007166 }
7167}
7168
Tom O'Rourke151a49d2014-11-13 18:50:10 -08007169int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
Ben Widawsky42c05262012-09-26 10:34:00 -07007170{
Jesse Barnes4fc688c2012-11-02 11:14:01 -07007171 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Ben Widawsky42c05262012-09-26 10:34:00 -07007172
7173 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7174 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7175 return -EAGAIN;
7176 }
7177
7178 I915_WRITE(GEN6_PCODE_DATA, *val);
Damien Lespiaudddab342014-11-13 17:51:50 +00007179 I915_WRITE(GEN6_PCODE_DATA1, 0);
Ben Widawsky42c05262012-09-26 10:34:00 -07007180 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7181
7182 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7183 500)) {
7184 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7185 return -ETIMEDOUT;
7186 }
7187
7188 *val = I915_READ(GEN6_PCODE_DATA);
7189 I915_WRITE(GEN6_PCODE_DATA, 0);
7190
7191 return 0;
7192}
7193
Tom O'Rourke151a49d2014-11-13 18:50:10 -08007194int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
Ben Widawsky42c05262012-09-26 10:34:00 -07007195{
Jesse Barnes4fc688c2012-11-02 11:14:01 -07007196 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Ben Widawsky42c05262012-09-26 10:34:00 -07007197
7198 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7199 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7200 return -EAGAIN;
7201 }
7202
7203 I915_WRITE(GEN6_PCODE_DATA, val);
7204 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7205
7206 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7207 500)) {
7208 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7209 return -ETIMEDOUT;
7210 }
7211
7212 I915_WRITE(GEN6_PCODE_DATA, 0);
7213
7214 return 0;
7215}
Jesse Barnesa0e4e192013-04-02 11:23:05 -07007216
Ville Syrjälädd06f882014-11-10 22:55:12 +02007217static int vlv_gpu_freq_div(unsigned int czclk_freq)
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007218{
Ville Syrjälädd06f882014-11-10 22:55:12 +02007219 switch (czclk_freq) {
7220 case 200:
7221 return 10;
7222 case 267:
7223 return 12;
7224 case 320:
7225 case 333:
Ville Syrjälädd06f882014-11-10 22:55:12 +02007226 return 16;
Ville Syrjäläab3fb152014-11-10 22:55:15 +02007227 case 400:
7228 return 20;
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007229 default:
7230 return -1;
7231 }
Ville Syrjälädd06f882014-11-10 22:55:12 +02007232}
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007233
Ville Syrjälädd06f882014-11-10 22:55:12 +02007234static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7235{
7236 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
7237
7238 div = vlv_gpu_freq_div(czclk_freq);
7239 if (div < 0)
7240 return div;
7241
7242 return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007243}
7244
Fengguang Wub55dd642014-07-12 11:21:39 +02007245static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007246{
Ville Syrjälädd06f882014-11-10 22:55:12 +02007247 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007248
Ville Syrjälädd06f882014-11-10 22:55:12 +02007249 mul = vlv_gpu_freq_div(czclk_freq);
7250 if (mul < 0)
7251 return mul;
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007252
Ville Syrjälädd06f882014-11-10 22:55:12 +02007253 return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007254}
7255
Fengguang Wub55dd642014-07-12 11:21:39 +02007256static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
Deepak S22b1b2f2014-07-12 14:54:33 +05307257{
Ville Syrjälädd06f882014-11-10 22:55:12 +02007258 int div, czclk_freq = dev_priv->rps.cz_freq;
Deepak S22b1b2f2014-07-12 14:54:33 +05307259
Ville Syrjälädd06f882014-11-10 22:55:12 +02007260 div = vlv_gpu_freq_div(czclk_freq) / 2;
7261 if (div < 0)
7262 return div;
Deepak S22b1b2f2014-07-12 14:54:33 +05307263
Ville Syrjälädd06f882014-11-10 22:55:12 +02007264 return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
Deepak S22b1b2f2014-07-12 14:54:33 +05307265}
7266
Fengguang Wub55dd642014-07-12 11:21:39 +02007267static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
Deepak S22b1b2f2014-07-12 14:54:33 +05307268{
Ville Syrjälädd06f882014-11-10 22:55:12 +02007269 int mul, czclk_freq = dev_priv->rps.cz_freq;
Deepak S22b1b2f2014-07-12 14:54:33 +05307270
Ville Syrjälädd06f882014-11-10 22:55:12 +02007271 mul = vlv_gpu_freq_div(czclk_freq) / 2;
7272 if (mul < 0)
7273 return mul;
Deepak S22b1b2f2014-07-12 14:54:33 +05307274
Ville Syrjälä1c147622014-08-18 14:42:43 +03007275 /* CHV needs even values */
Ville Syrjälädd06f882014-11-10 22:55:12 +02007276 return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
Deepak S22b1b2f2014-07-12 14:54:33 +05307277}
7278
Ville Syrjälä616bc822015-01-23 21:04:25 +02007279int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7280{
Akash Goel80b6dda2015-03-06 11:07:15 +05307281 if (IS_GEN9(dev_priv->dev))
7282 return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER;
7283 else if (IS_CHERRYVIEW(dev_priv->dev))
Ville Syrjälä616bc822015-01-23 21:04:25 +02007284 return chv_gpu_freq(dev_priv, val);
7285 else if (IS_VALLEYVIEW(dev_priv->dev))
7286 return byt_gpu_freq(dev_priv, val);
7287 else
7288 return val * GT_FREQUENCY_MULTIPLIER;
7289}
7290
Ville Syrjälä616bc822015-01-23 21:04:25 +02007291int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7292{
Akash Goel80b6dda2015-03-06 11:07:15 +05307293 if (IS_GEN9(dev_priv->dev))
7294 return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER;
7295 else if (IS_CHERRYVIEW(dev_priv->dev))
Ville Syrjälä616bc822015-01-23 21:04:25 +02007296 return chv_freq_opcode(dev_priv, val);
Deepak S22b1b2f2014-07-12 14:54:33 +05307297 else if (IS_VALLEYVIEW(dev_priv->dev))
Ville Syrjälä616bc822015-01-23 21:04:25 +02007298 return byt_freq_opcode(dev_priv, val);
7299 else
7300 return val / GT_FREQUENCY_MULTIPLIER;
Deepak S22b1b2f2014-07-12 14:54:33 +05307301}
7302
Chris Wilson6ad790c2015-04-07 16:20:31 +01007303struct request_boost {
7304 struct work_struct work;
Daniel Vettereed29a52015-05-21 14:21:25 +02007305 struct drm_i915_gem_request *req;
Chris Wilson6ad790c2015-04-07 16:20:31 +01007306};
7307
7308static void __intel_rps_boost_work(struct work_struct *work)
7309{
7310 struct request_boost *boost = container_of(work, struct request_boost, work);
Chris Wilsone61b9952015-04-27 13:41:24 +01007311 struct drm_i915_gem_request *req = boost->req;
Chris Wilson6ad790c2015-04-07 16:20:31 +01007312
Chris Wilsone61b9952015-04-27 13:41:24 +01007313 if (!i915_gem_request_completed(req, true))
7314 gen6_rps_boost(to_i915(req->ring->dev), NULL,
7315 req->emitted_jiffies);
Chris Wilson6ad790c2015-04-07 16:20:31 +01007316
Chris Wilsone61b9952015-04-27 13:41:24 +01007317 i915_gem_request_unreference__unlocked(req);
Chris Wilson6ad790c2015-04-07 16:20:31 +01007318 kfree(boost);
7319}
7320
7321void intel_queue_rps_boost_for_request(struct drm_device *dev,
Daniel Vettereed29a52015-05-21 14:21:25 +02007322 struct drm_i915_gem_request *req)
Chris Wilson6ad790c2015-04-07 16:20:31 +01007323{
7324 struct request_boost *boost;
7325
Daniel Vettereed29a52015-05-21 14:21:25 +02007326 if (req == NULL || INTEL_INFO(dev)->gen < 6)
Chris Wilson6ad790c2015-04-07 16:20:31 +01007327 return;
7328
Chris Wilsone61b9952015-04-27 13:41:24 +01007329 if (i915_gem_request_completed(req, true))
7330 return;
7331
Chris Wilson6ad790c2015-04-07 16:20:31 +01007332 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
7333 if (boost == NULL)
7334 return;
7335
Daniel Vettereed29a52015-05-21 14:21:25 +02007336 i915_gem_request_reference(req);
7337 boost->req = req;
Chris Wilson6ad790c2015-04-07 16:20:31 +01007338
7339 INIT_WORK(&boost->work, __intel_rps_boost_work);
7340 queue_work(to_i915(dev)->wq, &boost->work);
7341}
7342
Daniel Vetterf742a552013-12-06 10:17:53 +01007343void intel_pm_setup(struct drm_device *dev)
Chris Wilson907b28c2013-07-19 20:36:52 +01007344{
7345 struct drm_i915_private *dev_priv = dev->dev_private;
7346
Daniel Vetterf742a552013-12-06 10:17:53 +01007347 mutex_init(&dev_priv->rps.hw_lock);
Chris Wilson8d3afd72015-05-21 21:01:47 +01007348 spin_lock_init(&dev_priv->rps.client_lock);
Daniel Vetterf742a552013-12-06 10:17:53 +01007349
Chris Wilson907b28c2013-07-19 20:36:52 +01007350 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7351 intel_gen6_powersave_work);
Chris Wilson1854d5c2015-04-07 16:20:32 +01007352 INIT_LIST_HEAD(&dev_priv->rps.clients);
Chris Wilson2e1b8732015-04-27 13:41:22 +01007353 INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
7354 INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
Paulo Zanoni5d584b22014-03-07 20:08:15 -03007355
Paulo Zanoni33688d92014-03-07 20:08:19 -03007356 dev_priv->pm.suspended = false;
Chris Wilson907b28c2013-07-19 20:36:52 +01007357}