blob: f7f01dc05c4e6f6c37545b8f07e31f7ce47bcd06 [file] [log] [blame]
Eugeni Dodonov85208be2012-04-16 22:20:34 -03001/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -030028#include <linux/cpufreq.h>
Eugeni Dodonov85208be2012-04-16 22:20:34 -030029#include "i915_drv.h"
30#include "intel_drv.h"
Daniel Vettereb48eb02012-04-26 23:28:12 +020031#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h>
Eugeni Dodonov85208be2012-04-16 22:20:34 -030033
Ben Widawskydc39fff2013-10-18 12:32:07 -070034/**
35 * RC6 is a special power stage which allows the GPU to enter an very
36 * low-voltage mode when idle, using down to 0V while at this stage. This
37 * stage is entered automatically when the GPU is idle when RC6 support is
38 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
39 *
40 * There are different RC6 modes available in Intel GPU, which differentiate
41 * among each other with the latency required to enter and leave RC6 and
42 * voltage consumed by the GPU in different states.
43 *
44 * The combination of the following flags define which states GPU is allowed
45 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
46 * RC6pp is deepest RC6. Their support by hardware varies according to the
47 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
48 * which brings the most power savings; deeper states save more power, but
49 * require higher latency to switch to and wake up.
50 */
51#define INTEL_RC6_ENABLE (1<<0)
52#define INTEL_RC6p_ENABLE (1<<1)
53#define INTEL_RC6pp_ENABLE (1<<2)
54
Damien Lespiauda2078c2013-02-13 15:27:27 +000055static void gen9_init_clock_gating(struct drm_device *dev)
56{
Damien Lespiauacd5c342014-03-26 16:55:46 +000057 struct drm_i915_private *dev_priv = dev->dev_private;
58
Damien Lespiau77719d22015-02-09 19:33:13 +000059 /* WaEnableLbsSlaRetryTimerDecrement:skl */
60 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
61 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
Nick Hoath6381b552015-07-14 14:41:15 +010062
63 /* WaDisableKillLogic:bxt,skl */
64 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
65 ECOCHK_DIS_TLB);
Damien Lespiau77719d22015-02-09 19:33:13 +000066}
Damien Lespiau91e41d12014-03-26 17:42:50 +000067
Damien Lespiau45db2192015-02-09 19:33:09 +000068static void skl_init_clock_gating(struct drm_device *dev)
Damien Lespiauda2078c2013-02-13 15:27:27 +000069{
Damien Lespiauacd5c342014-03-26 16:55:46 +000070 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiau3ca5da42014-03-26 18:18:01 +000071
Damien Lespiau77719d22015-02-09 19:33:13 +000072 gen9_init_clock_gating(dev);
73
Damien Lespiau669506e2015-02-26 18:20:38 +000074 if (INTEL_REVID(dev) <= SKL_REVID_B0) {
Hoath, Nicholas3dcd0202015-02-05 10:47:21 +000075 /*
76 * WaDisableSDEUnitClockGating:skl
Damien Lespiau9253c2e2015-02-09 19:33:10 +000077 * WaSetGAPSunitClckGateDisable:skl
Hoath, Nicholas3dcd0202015-02-05 10:47:21 +000078 */
79 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
Damien Lespiau9253c2e2015-02-09 19:33:10 +000080 GEN8_GAPSUNIT_CLOCK_GATE_DISABLE |
Hoath, Nicholas3dcd0202015-02-05 10:47:21 +000081 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
Damien Lespiauf9fc42f2015-02-26 18:20:39 +000082
83 /* WaDisableVFUnitClockGating:skl */
84 I915_WRITE(GEN6_UCGCTL2, I915_READ(GEN6_UCGCTL2) |
85 GEN6_VFUNIT_CLOCK_GATE_DISABLE);
Hoath, Nicholas3dcd0202015-02-05 10:47:21 +000086 }
Damien Lespiau8bc0ccf2015-02-09 19:33:18 +000087
Damien Lespiau2caa3b22015-02-09 19:33:20 +000088 if (INTEL_REVID(dev) <= SKL_REVID_D0) {
Damien Lespiau81e231a2015-02-09 19:33:19 +000089 /* WaDisableHDCInvalidation:skl */
90 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
91 BDW_DISABLE_HDC_INVALIDATION);
92
Damien Lespiau2caa3b22015-02-09 19:33:20 +000093 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
94 I915_WRITE(FF_SLICE_CS_CHICKEN2,
Damien Lespiauf1d3d342015-05-06 14:36:27 +010095 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
Damien Lespiau2caa3b22015-02-09 19:33:20 +000096 }
Damien Lespiau81e231a2015-02-09 19:33:19 +000097
Arun Siluverya4106a72015-07-14 15:01:29 +010098 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
99 * involving this register should also be added to WA batch as required.
100 */
Damien Lespiau8bc0ccf2015-02-09 19:33:18 +0000101 if (INTEL_REVID(dev) <= SKL_REVID_E0)
102 /* WaDisableLSQCROPERFforOCL:skl */
103 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
104 GEN8_LQSC_RO_PERF_DIS);
Arun Siluvery245d9662015-08-03 20:24:56 +0100105
106 /* WaEnableGapsTsvCreditFix:skl */
107 if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
108 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
109 GEN9_GAPS_TSV_CREDIT_DISABLE));
110 }
Damien Lespiauda2078c2013-02-13 15:27:27 +0000111}
112
Imre Deaka82abe42015-03-27 14:00:04 +0200113static void bxt_init_clock_gating(struct drm_device *dev)
114{
Imre Deak32608ca2015-03-11 11:10:27 +0200115 struct drm_i915_private *dev_priv = dev->dev_private;
116
Imre Deaka82abe42015-03-27 14:00:04 +0200117 gen9_init_clock_gating(dev);
Imre Deak32608ca2015-03-11 11:10:27 +0200118
119 /*
120 * FIXME:
121 * GEN8_SDEUNIT_CLOCK_GATE_DISABLE applies on A0 only.
Ben Widawsky868434c2015-03-11 10:49:32 +0200122 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
Imre Deak32608ca2015-03-11 11:10:27 +0200123 */
124 /* WaDisableSDEUnitClockGating:bxt */
125 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
Ben Widawsky868434c2015-03-11 10:49:32 +0200126 GEN8_SDEUNIT_CLOCK_GATE_DISABLE |
127 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
Imre Deak32608ca2015-03-11 11:10:27 +0200128
Robert Beckette3a29052015-03-11 10:28:25 +0200129 /* FIXME: apply on A0 only */
130 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
Imre Deaka82abe42015-03-27 14:00:04 +0200131}
132
Daniel Vetterc921aba2012-04-26 23:28:17 +0200133static void i915_pineview_get_mem_freq(struct drm_device *dev)
134{
Jani Nikula50227e12014-03-31 14:27:21 +0300135 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200136 u32 tmp;
137
138 tmp = I915_READ(CLKCFG);
139
140 switch (tmp & CLKCFG_FSB_MASK) {
141 case CLKCFG_FSB_533:
142 dev_priv->fsb_freq = 533; /* 133*4 */
143 break;
144 case CLKCFG_FSB_800:
145 dev_priv->fsb_freq = 800; /* 200*4 */
146 break;
147 case CLKCFG_FSB_667:
148 dev_priv->fsb_freq = 667; /* 167*4 */
149 break;
150 case CLKCFG_FSB_400:
151 dev_priv->fsb_freq = 400; /* 100*4 */
152 break;
153 }
154
155 switch (tmp & CLKCFG_MEM_MASK) {
156 case CLKCFG_MEM_533:
157 dev_priv->mem_freq = 533;
158 break;
159 case CLKCFG_MEM_667:
160 dev_priv->mem_freq = 667;
161 break;
162 case CLKCFG_MEM_800:
163 dev_priv->mem_freq = 800;
164 break;
165 }
166
167 /* detect pineview DDR3 setting */
168 tmp = I915_READ(CSHRDDR3CTL);
169 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
170}
171
172static void i915_ironlake_get_mem_freq(struct drm_device *dev)
173{
Jani Nikula50227e12014-03-31 14:27:21 +0300174 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200175 u16 ddrpll, csipll;
176
177 ddrpll = I915_READ16(DDRMPLL1);
178 csipll = I915_READ16(CSIPLL0);
179
180 switch (ddrpll & 0xff) {
181 case 0xc:
182 dev_priv->mem_freq = 800;
183 break;
184 case 0x10:
185 dev_priv->mem_freq = 1066;
186 break;
187 case 0x14:
188 dev_priv->mem_freq = 1333;
189 break;
190 case 0x18:
191 dev_priv->mem_freq = 1600;
192 break;
193 default:
194 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
195 ddrpll & 0xff);
196 dev_priv->mem_freq = 0;
197 break;
198 }
199
Daniel Vetter20e4d402012-08-08 23:35:39 +0200200 dev_priv->ips.r_t = dev_priv->mem_freq;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200201
202 switch (csipll & 0x3ff) {
203 case 0x00c:
204 dev_priv->fsb_freq = 3200;
205 break;
206 case 0x00e:
207 dev_priv->fsb_freq = 3733;
208 break;
209 case 0x010:
210 dev_priv->fsb_freq = 4266;
211 break;
212 case 0x012:
213 dev_priv->fsb_freq = 4800;
214 break;
215 case 0x014:
216 dev_priv->fsb_freq = 5333;
217 break;
218 case 0x016:
219 dev_priv->fsb_freq = 5866;
220 break;
221 case 0x018:
222 dev_priv->fsb_freq = 6400;
223 break;
224 default:
225 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
226 csipll & 0x3ff);
227 dev_priv->fsb_freq = 0;
228 break;
229 }
230
231 if (dev_priv->fsb_freq == 3200) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200232 dev_priv->ips.c_m = 0;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200233 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200234 dev_priv->ips.c_m = 1;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200235 } else {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200236 dev_priv->ips.c_m = 2;
Daniel Vetterc921aba2012-04-26 23:28:17 +0200237 }
238}
239
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300240static const struct cxsr_latency cxsr_latency_table[] = {
241 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
242 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
243 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
244 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
245 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
246
247 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
248 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
249 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
250 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
251 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
252
253 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
254 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
255 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
256 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
257 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
258
259 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
260 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
261 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
262 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
263 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
264
265 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
266 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
267 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
268 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
269 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
270
271 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
272 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
273 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
274 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
275 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
276};
277
Daniel Vetter63c62272012-04-21 23:17:55 +0200278static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300279 int is_ddr3,
280 int fsb,
281 int mem)
282{
283 const struct cxsr_latency *latency;
284 int i;
285
286 if (fsb == 0 || mem == 0)
287 return NULL;
288
289 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
290 latency = &cxsr_latency_table[i];
291 if (is_desktop == latency->is_desktop &&
292 is_ddr3 == latency->is_ddr3 &&
293 fsb == latency->fsb_freq && mem == latency->mem_freq)
294 return latency;
295 }
296
297 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
298
299 return NULL;
300}
301
Ville Syrjäläfc1ac8d2015-03-05 21:19:52 +0200302static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
303{
304 u32 val;
305
306 mutex_lock(&dev_priv->rps.hw_lock);
307
308 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
309 if (enable)
310 val &= ~FORCE_DDR_HIGH_FREQ;
311 else
312 val |= FORCE_DDR_HIGH_FREQ;
313 val &= ~FORCE_DDR_LOW_FREQ;
314 val |= FORCE_DDR_FREQ_REQ_ACK;
315 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
316
317 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
318 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
319 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
320
321 mutex_unlock(&dev_priv->rps.hw_lock);
322}
323
Ville Syrjäläcfb41412015-03-05 21:19:51 +0200324static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
325{
326 u32 val;
327
328 mutex_lock(&dev_priv->rps.hw_lock);
329
330 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
331 if (enable)
332 val |= DSP_MAXFIFO_PM5_ENABLE;
333 else
334 val &= ~DSP_MAXFIFO_PM5_ENABLE;
335 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
336
337 mutex_unlock(&dev_priv->rps.hw_lock);
338}
339
Ville Syrjäläf4998962015-03-10 17:02:21 +0200340#define FW_WM(value, plane) \
341 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
342
Imre Deak5209b1f2014-07-01 12:36:17 +0300343void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300344{
Imre Deak5209b1f2014-07-01 12:36:17 +0300345 struct drm_device *dev = dev_priv->dev;
346 u32 val;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300347
Imre Deak5209b1f2014-07-01 12:36:17 +0300348 if (IS_VALLEYVIEW(dev)) {
349 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
Ville Syrjäläa7a6c492015-06-24 22:00:01 +0300350 POSTING_READ(FW_BLC_SELF_VLV);
Ville Syrjälä852eb002015-06-24 22:00:07 +0300351 dev_priv->wm.vlv.cxsr = enable;
Imre Deak5209b1f2014-07-01 12:36:17 +0300352 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
353 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
Ville Syrjäläa7a6c492015-06-24 22:00:01 +0300354 POSTING_READ(FW_BLC_SELF);
Imre Deak5209b1f2014-07-01 12:36:17 +0300355 } else if (IS_PINEVIEW(dev)) {
356 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
357 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
358 I915_WRITE(DSPFW3, val);
Ville Syrjäläa7a6c492015-06-24 22:00:01 +0300359 POSTING_READ(DSPFW3);
Imre Deak5209b1f2014-07-01 12:36:17 +0300360 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
361 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
362 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
363 I915_WRITE(FW_BLC_SELF, val);
Ville Syrjäläa7a6c492015-06-24 22:00:01 +0300364 POSTING_READ(FW_BLC_SELF);
Imre Deak5209b1f2014-07-01 12:36:17 +0300365 } else if (IS_I915GM(dev)) {
366 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
367 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
368 I915_WRITE(INSTPM, val);
Ville Syrjäläa7a6c492015-06-24 22:00:01 +0300369 POSTING_READ(INSTPM);
Imre Deak5209b1f2014-07-01 12:36:17 +0300370 } else {
371 return;
372 }
373
374 DRM_DEBUG_KMS("memory self-refresh is %s\n",
375 enable ? "enabled" : "disabled");
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300376}
377
Ville Syrjäläfc1ac8d2015-03-05 21:19:52 +0200378
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300379/*
380 * Latency for FIFO fetches is dependent on several factors:
381 * - memory configuration (speed, channels)
382 * - chipset
383 * - current MCH state
384 * It can be fairly high in some situations, so here we assume a fairly
385 * pessimal value. It's a tradeoff between extra memory fetches (if we
386 * set this value too high, the FIFO will fetch frequently to stay full)
387 * and power consumption (set it too low to save power and we might see
388 * FIFO underruns and display "flicker").
389 *
390 * A value of 5us seems to be a good balance; safe for very low end
391 * platforms but not overly aggressive on lower latency configs.
392 */
Chris Wilson5aef6002014-09-03 11:56:07 +0100393static const int pessimal_latency_ns = 5000;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300394
Ville Syrjäläb5004722015-03-05 21:19:47 +0200395#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
396 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
397
398static int vlv_get_fifo_size(struct drm_device *dev,
399 enum pipe pipe, int plane)
400{
401 struct drm_i915_private *dev_priv = dev->dev_private;
402 int sprite0_start, sprite1_start, size;
403
404 switch (pipe) {
405 uint32_t dsparb, dsparb2, dsparb3;
406 case PIPE_A:
407 dsparb = I915_READ(DSPARB);
408 dsparb2 = I915_READ(DSPARB2);
409 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
410 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
411 break;
412 case PIPE_B:
413 dsparb = I915_READ(DSPARB);
414 dsparb2 = I915_READ(DSPARB2);
415 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
416 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
417 break;
418 case PIPE_C:
419 dsparb2 = I915_READ(DSPARB2);
420 dsparb3 = I915_READ(DSPARB3);
421 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
422 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
423 break;
424 default:
425 return 0;
426 }
427
428 switch (plane) {
429 case 0:
430 size = sprite0_start;
431 break;
432 case 1:
433 size = sprite1_start - sprite0_start;
434 break;
435 case 2:
436 size = 512 - 1 - sprite1_start;
437 break;
438 default:
439 return 0;
440 }
441
442 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
443 pipe_name(pipe), plane == 0 ? "primary" : "sprite",
444 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
445 size);
446
447 return size;
448}
449
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300450static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300451{
452 struct drm_i915_private *dev_priv = dev->dev_private;
453 uint32_t dsparb = I915_READ(DSPARB);
454 int size;
455
456 size = dsparb & 0x7f;
457 if (plane)
458 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
459
460 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
461 plane ? "B" : "A", size);
462
463 return size;
464}
465
Daniel Vetterfeb56b92013-12-14 20:38:30 -0200466static int i830_get_fifo_size(struct drm_device *dev, int plane)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300467{
468 struct drm_i915_private *dev_priv = dev->dev_private;
469 uint32_t dsparb = I915_READ(DSPARB);
470 int size;
471
472 size = dsparb & 0x1ff;
473 if (plane)
474 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
475 size >>= 1; /* Convert to cachelines */
476
477 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
478 plane ? "B" : "A", size);
479
480 return size;
481}
482
Eugeni Dodonov1fa61102012-04-18 15:29:26 -0300483static int i845_get_fifo_size(struct drm_device *dev, int plane)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300484{
485 struct drm_i915_private *dev_priv = dev->dev_private;
486 uint32_t dsparb = I915_READ(DSPARB);
487 int size;
488
489 size = dsparb & 0x7f;
490 size >>= 2; /* Convert to cachelines */
491
492 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
493 plane ? "B" : "A",
494 size);
495
496 return size;
497}
498
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300499/* Pineview has different values for various configs */
500static const struct intel_watermark_params pineview_display_wm = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300501 .fifo_size = PINEVIEW_DISPLAY_FIFO,
502 .max_wm = PINEVIEW_MAX_WM,
503 .default_wm = PINEVIEW_DFT_WM,
504 .guard_size = PINEVIEW_GUARD_WM,
505 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300506};
507static const struct intel_watermark_params pineview_display_hplloff_wm = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300508 .fifo_size = PINEVIEW_DISPLAY_FIFO,
509 .max_wm = PINEVIEW_MAX_WM,
510 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
511 .guard_size = PINEVIEW_GUARD_WM,
512 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300513};
514static const struct intel_watermark_params pineview_cursor_wm = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300515 .fifo_size = PINEVIEW_CURSOR_FIFO,
516 .max_wm = PINEVIEW_CURSOR_MAX_WM,
517 .default_wm = PINEVIEW_CURSOR_DFT_WM,
518 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
519 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300520};
521static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300522 .fifo_size = PINEVIEW_CURSOR_FIFO,
523 .max_wm = PINEVIEW_CURSOR_MAX_WM,
524 .default_wm = PINEVIEW_CURSOR_DFT_WM,
525 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
526 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300527};
528static const struct intel_watermark_params g4x_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300529 .fifo_size = G4X_FIFO_SIZE,
530 .max_wm = G4X_MAX_WM,
531 .default_wm = G4X_MAX_WM,
532 .guard_size = 2,
533 .cacheline_size = G4X_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300534};
535static const struct intel_watermark_params g4x_cursor_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300536 .fifo_size = I965_CURSOR_FIFO,
537 .max_wm = I965_CURSOR_MAX_WM,
538 .default_wm = I965_CURSOR_DFT_WM,
539 .guard_size = 2,
540 .cacheline_size = G4X_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300541};
542static const struct intel_watermark_params valleyview_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300543 .fifo_size = VALLEYVIEW_FIFO_SIZE,
544 .max_wm = VALLEYVIEW_MAX_WM,
545 .default_wm = VALLEYVIEW_MAX_WM,
546 .guard_size = 2,
547 .cacheline_size = G4X_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300548};
549static const struct intel_watermark_params valleyview_cursor_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300550 .fifo_size = I965_CURSOR_FIFO,
551 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
552 .default_wm = I965_CURSOR_DFT_WM,
553 .guard_size = 2,
554 .cacheline_size = G4X_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300555};
556static const struct intel_watermark_params i965_cursor_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300557 .fifo_size = I965_CURSOR_FIFO,
558 .max_wm = I965_CURSOR_MAX_WM,
559 .default_wm = I965_CURSOR_DFT_WM,
560 .guard_size = 2,
561 .cacheline_size = I915_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300562};
563static const struct intel_watermark_params i945_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300564 .fifo_size = I945_FIFO_SIZE,
565 .max_wm = I915_MAX_WM,
566 .default_wm = 1,
567 .guard_size = 2,
568 .cacheline_size = I915_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300569};
570static const struct intel_watermark_params i915_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300571 .fifo_size = I915_FIFO_SIZE,
572 .max_wm = I915_MAX_WM,
573 .default_wm = 1,
574 .guard_size = 2,
575 .cacheline_size = I915_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300576};
Ville Syrjälä9d539102014-08-15 01:21:53 +0300577static const struct intel_watermark_params i830_a_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300578 .fifo_size = I855GM_FIFO_SIZE,
579 .max_wm = I915_MAX_WM,
580 .default_wm = 1,
581 .guard_size = 2,
582 .cacheline_size = I830_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300583};
Ville Syrjälä9d539102014-08-15 01:21:53 +0300584static const struct intel_watermark_params i830_bc_wm_info = {
585 .fifo_size = I855GM_FIFO_SIZE,
586 .max_wm = I915_MAX_WM/2,
587 .default_wm = 1,
588 .guard_size = 2,
589 .cacheline_size = I830_FIFO_LINE_SIZE,
590};
Daniel Vetterfeb56b92013-12-14 20:38:30 -0200591static const struct intel_watermark_params i845_wm_info = {
Ville Syrjäläe0f02732014-06-05 19:15:50 +0300592 .fifo_size = I830_FIFO_SIZE,
593 .max_wm = I915_MAX_WM,
594 .default_wm = 1,
595 .guard_size = 2,
596 .cacheline_size = I830_FIFO_LINE_SIZE,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300597};
598
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300599/**
600 * intel_calculate_wm - calculate watermark level
601 * @clock_in_khz: pixel clock
602 * @wm: chip FIFO params
603 * @pixel_size: display pixel size
604 * @latency_ns: memory latency for the platform
605 *
606 * Calculate the watermark level (the level at which the display plane will
607 * start fetching from memory again). Each chip has a different display
608 * FIFO size and allocation, so the caller needs to figure that out and pass
609 * in the correct intel_watermark_params structure.
610 *
611 * As the pixel clock runs, the FIFO will be drained at a rate that depends
612 * on the pixel size. When it reaches the watermark level, it'll start
613 * fetching FIFO line sized based chunks from memory until the FIFO fills
614 * past the watermark point. If the FIFO drains completely, a FIFO underrun
615 * will occur, and a display engine hang could result.
616 */
617static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
618 const struct intel_watermark_params *wm,
619 int fifo_size,
620 int pixel_size,
621 unsigned long latency_ns)
622{
623 long entries_required, wm_size;
624
625 /*
626 * Note: we need to make sure we don't overflow for various clock &
627 * latency values.
628 * clocks go from a few thousand to several hundred thousand.
629 * latency is usually a few thousand
630 */
631 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
632 1000;
633 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
634
635 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
636
637 wm_size = fifo_size - (entries_required + wm->guard_size);
638
639 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
640
641 /* Don't promote wm_size to unsigned... */
642 if (wm_size > (long)wm->max_wm)
643 wm_size = wm->max_wm;
644 if (wm_size <= 0)
645 wm_size = wm->default_wm;
Ville Syrjäläd6feb192014-09-05 21:54:13 +0300646
647 /*
648 * Bspec seems to indicate that the value shouldn't be lower than
649 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
650 * Lets go for 8 which is the burst size since certain platforms
651 * already use a hardcoded 8 (which is what the spec says should be
652 * done).
653 */
654 if (wm_size <= 8)
655 wm_size = 8;
656
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300657 return wm_size;
658}
659
660static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
661{
662 struct drm_crtc *crtc, *enabled = NULL;
663
Damien Lespiau70e1e0e2014-05-13 23:32:24 +0100664 for_each_crtc(dev, crtc) {
Chris Wilson3490ea52013-01-07 10:11:40 +0000665 if (intel_crtc_active(crtc)) {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300666 if (enabled)
667 return NULL;
668 enabled = crtc;
669 }
670 }
671
672 return enabled;
673}
674
Ville Syrjälä46ba6142013-09-10 11:40:40 +0300675static void pineview_update_wm(struct drm_crtc *unused_crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300676{
Ville Syrjälä46ba6142013-09-10 11:40:40 +0300677 struct drm_device *dev = unused_crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300678 struct drm_i915_private *dev_priv = dev->dev_private;
679 struct drm_crtc *crtc;
680 const struct cxsr_latency *latency;
681 u32 reg;
682 unsigned long wm;
683
684 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
685 dev_priv->fsb_freq, dev_priv->mem_freq);
686 if (!latency) {
687 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
Imre Deak5209b1f2014-07-01 12:36:17 +0300688 intel_set_memory_cxsr(dev_priv, false);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300689 return;
690 }
691
692 crtc = single_enabled_crtc(dev);
693 if (crtc) {
Damien Lespiau241bfc32013-09-25 16:45:37 +0100694 const struct drm_display_mode *adjusted_mode;
Matt Roper59bea882015-02-27 10:12:01 -0800695 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
Damien Lespiau241bfc32013-09-25 16:45:37 +0100696 int clock;
697
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +0200698 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +0100699 clock = adjusted_mode->crtc_clock;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300700
701 /* Display SR */
702 wm = intel_calculate_wm(clock, &pineview_display_wm,
703 pineview_display_wm.fifo_size,
704 pixel_size, latency->display_sr);
705 reg = I915_READ(DSPFW1);
706 reg &= ~DSPFW_SR_MASK;
Ville Syrjäläf4998962015-03-10 17:02:21 +0200707 reg |= FW_WM(wm, SR);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300708 I915_WRITE(DSPFW1, reg);
709 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
710
711 /* cursor SR */
712 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
713 pineview_display_wm.fifo_size,
714 pixel_size, latency->cursor_sr);
715 reg = I915_READ(DSPFW3);
716 reg &= ~DSPFW_CURSOR_SR_MASK;
Ville Syrjäläf4998962015-03-10 17:02:21 +0200717 reg |= FW_WM(wm, CURSOR_SR);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300718 I915_WRITE(DSPFW3, reg);
719
720 /* Display HPLL off SR */
721 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
722 pineview_display_hplloff_wm.fifo_size,
723 pixel_size, latency->display_hpll_disable);
724 reg = I915_READ(DSPFW3);
725 reg &= ~DSPFW_HPLL_SR_MASK;
Ville Syrjäläf4998962015-03-10 17:02:21 +0200726 reg |= FW_WM(wm, HPLL_SR);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300727 I915_WRITE(DSPFW3, reg);
728
729 /* cursor HPLL off SR */
730 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
731 pineview_display_hplloff_wm.fifo_size,
732 pixel_size, latency->cursor_hpll_disable);
733 reg = I915_READ(DSPFW3);
734 reg &= ~DSPFW_HPLL_CURSOR_MASK;
Ville Syrjäläf4998962015-03-10 17:02:21 +0200735 reg |= FW_WM(wm, HPLL_CURSOR);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300736 I915_WRITE(DSPFW3, reg);
737 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
738
Imre Deak5209b1f2014-07-01 12:36:17 +0300739 intel_set_memory_cxsr(dev_priv, true);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300740 } else {
Imre Deak5209b1f2014-07-01 12:36:17 +0300741 intel_set_memory_cxsr(dev_priv, false);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300742 }
743}
744
745static bool g4x_compute_wm0(struct drm_device *dev,
746 int plane,
747 const struct intel_watermark_params *display,
748 int display_latency_ns,
749 const struct intel_watermark_params *cursor,
750 int cursor_latency_ns,
751 int *plane_wm,
752 int *cursor_wm)
753{
754 struct drm_crtc *crtc;
Ville Syrjälä4fe85902013-09-04 18:25:22 +0300755 const struct drm_display_mode *adjusted_mode;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300756 int htotal, hdisplay, clock, pixel_size;
757 int line_time_us, line_count;
758 int entries, tlb_miss;
759
760 crtc = intel_get_crtc_for_plane(dev, plane);
Chris Wilson3490ea52013-01-07 10:11:40 +0000761 if (!intel_crtc_active(crtc)) {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300762 *cursor_wm = cursor->guard_size;
763 *plane_wm = display->guard_size;
764 return false;
765 }
766
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +0200767 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +0100768 clock = adjusted_mode->crtc_clock;
Jesse Barnesfec8cba2013-11-27 11:10:26 -0800769 htotal = adjusted_mode->crtc_htotal;
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +0200770 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
Matt Roper59bea882015-02-27 10:12:01 -0800771 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300772
773 /* Use the small buffer method to calculate plane watermark */
774 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
775 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
776 if (tlb_miss > 0)
777 entries += tlb_miss;
778 entries = DIV_ROUND_UP(entries, display->cacheline_size);
779 *plane_wm = entries + display->guard_size;
780 if (*plane_wm > (int)display->max_wm)
781 *plane_wm = display->max_wm;
782
783 /* Use the large buffer method to calculate cursor watermark */
Ville Syrjälä922044c2014-02-14 14:18:57 +0200784 line_time_us = max(htotal * 1000 / clock, 1);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300785 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
Matt Roper3dd512f2015-02-27 10:12:00 -0800786 entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300787 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
788 if (tlb_miss > 0)
789 entries += tlb_miss;
790 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
791 *cursor_wm = entries + cursor->guard_size;
792 if (*cursor_wm > (int)cursor->max_wm)
793 *cursor_wm = (int)cursor->max_wm;
794
795 return true;
796}
797
798/*
799 * Check the wm result.
800 *
801 * If any calculated watermark values is larger than the maximum value that
802 * can be programmed into the associated watermark register, that watermark
803 * must be disabled.
804 */
805static bool g4x_check_srwm(struct drm_device *dev,
806 int display_wm, int cursor_wm,
807 const struct intel_watermark_params *display,
808 const struct intel_watermark_params *cursor)
809{
810 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
811 display_wm, cursor_wm);
812
813 if (display_wm > display->max_wm) {
814 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
815 display_wm, display->max_wm);
816 return false;
817 }
818
819 if (cursor_wm > cursor->max_wm) {
820 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
821 cursor_wm, cursor->max_wm);
822 return false;
823 }
824
825 if (!(display_wm || cursor_wm)) {
826 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
827 return false;
828 }
829
830 return true;
831}
832
833static bool g4x_compute_srwm(struct drm_device *dev,
834 int plane,
835 int latency_ns,
836 const struct intel_watermark_params *display,
837 const struct intel_watermark_params *cursor,
838 int *display_wm, int *cursor_wm)
839{
840 struct drm_crtc *crtc;
Ville Syrjälä4fe85902013-09-04 18:25:22 +0300841 const struct drm_display_mode *adjusted_mode;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300842 int hdisplay, htotal, pixel_size, clock;
843 unsigned long line_time_us;
844 int line_count, line_size;
845 int small, large;
846 int entries;
847
848 if (!latency_ns) {
849 *display_wm = *cursor_wm = 0;
850 return false;
851 }
852
853 crtc = intel_get_crtc_for_plane(dev, plane);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +0200854 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +0100855 clock = adjusted_mode->crtc_clock;
Jesse Barnesfec8cba2013-11-27 11:10:26 -0800856 htotal = adjusted_mode->crtc_htotal;
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +0200857 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
Matt Roper59bea882015-02-27 10:12:01 -0800858 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300859
Ville Syrjälä922044c2014-02-14 14:18:57 +0200860 line_time_us = max(htotal * 1000 / clock, 1);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300861 line_count = (latency_ns / line_time_us + 1000) / 1000;
862 line_size = hdisplay * pixel_size;
863
864 /* Use the minimum of the small and large buffer method for primary */
865 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
866 large = line_count * line_size;
867
868 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
869 *display_wm = entries + display->guard_size;
870
871 /* calculate the self-refresh watermark for display cursor */
Matt Roper3dd512f2015-02-27 10:12:00 -0800872 entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -0300873 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
874 *cursor_wm = entries + cursor->guard_size;
875
876 return g4x_check_srwm(dev,
877 *display_wm, *cursor_wm,
878 display, cursor);
879}
880
Ville Syrjälä15665972015-03-10 16:16:28 +0200881#define FW_WM_VLV(value, plane) \
882 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
883
Ville Syrjälä0018fda2015-03-05 21:19:45 +0200884static void vlv_write_wm_values(struct intel_crtc *crtc,
885 const struct vlv_wm_values *wm)
886{
887 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
888 enum pipe pipe = crtc->pipe;
889
890 I915_WRITE(VLV_DDL(pipe),
891 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
892 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
893 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
894 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
895
Ville Syrjäläae801522015-03-05 21:19:49 +0200896 I915_WRITE(DSPFW1,
Ville Syrjälä15665972015-03-10 16:16:28 +0200897 FW_WM(wm->sr.plane, SR) |
898 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
899 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
900 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
Ville Syrjäläae801522015-03-05 21:19:49 +0200901 I915_WRITE(DSPFW2,
Ville Syrjälä15665972015-03-10 16:16:28 +0200902 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
903 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
904 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
Ville Syrjäläae801522015-03-05 21:19:49 +0200905 I915_WRITE(DSPFW3,
Ville Syrjälä15665972015-03-10 16:16:28 +0200906 FW_WM(wm->sr.cursor, CURSOR_SR));
Ville Syrjäläae801522015-03-05 21:19:49 +0200907
908 if (IS_CHERRYVIEW(dev_priv)) {
909 I915_WRITE(DSPFW7_CHV,
Ville Syrjälä15665972015-03-10 16:16:28 +0200910 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
911 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
Ville Syrjäläae801522015-03-05 21:19:49 +0200912 I915_WRITE(DSPFW8_CHV,
Ville Syrjälä15665972015-03-10 16:16:28 +0200913 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
914 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
Ville Syrjäläae801522015-03-05 21:19:49 +0200915 I915_WRITE(DSPFW9_CHV,
Ville Syrjälä15665972015-03-10 16:16:28 +0200916 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
917 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
Ville Syrjäläae801522015-03-05 21:19:49 +0200918 I915_WRITE(DSPHOWM,
Ville Syrjälä15665972015-03-10 16:16:28 +0200919 FW_WM(wm->sr.plane >> 9, SR_HI) |
920 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
921 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
922 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
923 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
924 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
925 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
926 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
927 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
928 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
Ville Syrjäläae801522015-03-05 21:19:49 +0200929 } else {
930 I915_WRITE(DSPFW7,
Ville Syrjälä15665972015-03-10 16:16:28 +0200931 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
932 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
Ville Syrjäläae801522015-03-05 21:19:49 +0200933 I915_WRITE(DSPHOWM,
Ville Syrjälä15665972015-03-10 16:16:28 +0200934 FW_WM(wm->sr.plane >> 9, SR_HI) |
935 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
936 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
937 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
938 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
939 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
940 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
Ville Syrjäläae801522015-03-05 21:19:49 +0200941 }
942
Ville Syrjälä2cb389b2015-06-24 22:00:10 +0300943 /* zero (unused) WM1 watermarks */
944 I915_WRITE(DSPFW4, 0);
945 I915_WRITE(DSPFW5, 0);
946 I915_WRITE(DSPFW6, 0);
947 I915_WRITE(DSPHOWM1, 0);
948
Ville Syrjäläae801522015-03-05 21:19:49 +0200949 POSTING_READ(DSPFW1);
Ville Syrjälä0018fda2015-03-05 21:19:45 +0200950}
951
Ville Syrjälä15665972015-03-10 16:16:28 +0200952#undef FW_WM_VLV
953
Ville Syrjälä6eb1a682015-06-24 22:00:03 +0300954enum vlv_wm_level {
955 VLV_WM_LEVEL_PM2,
956 VLV_WM_LEVEL_PM5,
957 VLV_WM_LEVEL_DDR_DVFS,
958 CHV_WM_NUM_LEVELS,
959 VLV_WM_NUM_LEVELS = 1,
960};
961
Ville Syrjälä262cd2e2015-06-24 22:00:04 +0300962/* latency must be in 0.1us units. */
963static unsigned int vlv_wm_method2(unsigned int pixel_rate,
964 unsigned int pipe_htotal,
965 unsigned int horiz_pixels,
966 unsigned int bytes_per_pixel,
967 unsigned int latency)
968{
969 unsigned int ret;
970
971 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
972 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
973 ret = DIV_ROUND_UP(ret, 64);
974
975 return ret;
976}
977
978static void vlv_setup_wm_latency(struct drm_device *dev)
979{
980 struct drm_i915_private *dev_priv = dev->dev_private;
981
982 /* all latencies in usec */
983 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
984
985 if (IS_CHERRYVIEW(dev_priv)) {
986 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
987 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
988 }
989}
990
991static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
992 struct intel_crtc *crtc,
993 const struct intel_plane_state *state,
994 int level)
995{
996 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
997 int clock, htotal, pixel_size, width, wm;
998
999 if (dev_priv->wm.pri_latency[level] == 0)
1000 return USHRT_MAX;
1001
1002 if (!state->visible)
1003 return 0;
1004
1005 pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1006 clock = crtc->config->base.adjusted_mode.crtc_clock;
1007 htotal = crtc->config->base.adjusted_mode.crtc_htotal;
1008 width = crtc->config->pipe_src_w;
1009 if (WARN_ON(htotal == 0))
1010 htotal = 1;
1011
1012 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1013 /*
1014 * FIXME the formula gives values that are
1015 * too big for the cursor FIFO, and hence we
1016 * would never be able to use cursors. For
1017 * now just hardcode the watermark.
1018 */
1019 wm = 63;
1020 } else {
1021 wm = vlv_wm_method2(clock, htotal, width, pixel_size,
1022 dev_priv->wm.pri_latency[level] * 10);
1023 }
1024
1025 return min_t(int, wm, USHRT_MAX);
1026}
1027
Ville Syrjälä54f1b6e2015-06-24 22:00:05 +03001028static void vlv_compute_fifo(struct intel_crtc *crtc)
1029{
1030 struct drm_device *dev = crtc->base.dev;
1031 struct vlv_wm_state *wm_state = &crtc->wm_state;
1032 struct intel_plane *plane;
1033 unsigned int total_rate = 0;
1034 const int fifo_size = 512 - 1;
1035 int fifo_extra, fifo_left = fifo_size;
1036
1037 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1038 struct intel_plane_state *state =
1039 to_intel_plane_state(plane->base.state);
1040
1041 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1042 continue;
1043
1044 if (state->visible) {
1045 wm_state->num_active_planes++;
1046 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1047 }
1048 }
1049
1050 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1051 struct intel_plane_state *state =
1052 to_intel_plane_state(plane->base.state);
1053 unsigned int rate;
1054
1055 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1056 plane->wm.fifo_size = 63;
1057 continue;
1058 }
1059
1060 if (!state->visible) {
1061 plane->wm.fifo_size = 0;
1062 continue;
1063 }
1064
1065 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1066 plane->wm.fifo_size = fifo_size * rate / total_rate;
1067 fifo_left -= plane->wm.fifo_size;
1068 }
1069
1070 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1071
1072 /* spread the remainder evenly */
1073 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1074 int plane_extra;
1075
1076 if (fifo_left == 0)
1077 break;
1078
1079 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1080 continue;
1081
1082 /* give it all to the first plane if none are active */
1083 if (plane->wm.fifo_size == 0 &&
1084 wm_state->num_active_planes)
1085 continue;
1086
1087 plane_extra = min(fifo_extra, fifo_left);
1088 plane->wm.fifo_size += plane_extra;
1089 fifo_left -= plane_extra;
1090 }
1091
1092 WARN_ON(fifo_left != 0);
1093}
1094
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001095static void vlv_invert_wms(struct intel_crtc *crtc)
1096{
1097 struct vlv_wm_state *wm_state = &crtc->wm_state;
1098 int level;
1099
1100 for (level = 0; level < wm_state->num_levels; level++) {
1101 struct drm_device *dev = crtc->base.dev;
1102 const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1103 struct intel_plane *plane;
1104
1105 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
1106 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
1107
1108 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1109 switch (plane->base.type) {
1110 int sprite;
1111 case DRM_PLANE_TYPE_CURSOR:
1112 wm_state->wm[level].cursor = plane->wm.fifo_size -
1113 wm_state->wm[level].cursor;
1114 break;
1115 case DRM_PLANE_TYPE_PRIMARY:
1116 wm_state->wm[level].primary = plane->wm.fifo_size -
1117 wm_state->wm[level].primary;
1118 break;
1119 case DRM_PLANE_TYPE_OVERLAY:
1120 sprite = plane->plane;
1121 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
1122 wm_state->wm[level].sprite[sprite];
1123 break;
1124 }
1125 }
1126 }
1127}
1128
Ville Syrjälä26e1fe42015-06-24 22:00:06 +03001129static void vlv_compute_wm(struct intel_crtc *crtc)
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001130{
1131 struct drm_device *dev = crtc->base.dev;
1132 struct vlv_wm_state *wm_state = &crtc->wm_state;
1133 struct intel_plane *plane;
1134 int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1135 int level;
1136
1137 memset(wm_state, 0, sizeof(*wm_state));
1138
Ville Syrjälä852eb002015-06-24 22:00:07 +03001139 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001140 if (IS_CHERRYVIEW(dev))
1141 wm_state->num_levels = CHV_WM_NUM_LEVELS;
1142 else
1143 wm_state->num_levels = VLV_WM_NUM_LEVELS;
1144
1145 wm_state->num_active_planes = 0;
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001146
Ville Syrjälä54f1b6e2015-06-24 22:00:05 +03001147 vlv_compute_fifo(crtc);
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001148
1149 if (wm_state->num_active_planes != 1)
1150 wm_state->cxsr = false;
1151
1152 if (wm_state->cxsr) {
1153 for (level = 0; level < wm_state->num_levels; level++) {
1154 wm_state->sr[level].plane = sr_fifo_size;
1155 wm_state->sr[level].cursor = 63;
1156 }
1157 }
1158
1159 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1160 struct intel_plane_state *state =
1161 to_intel_plane_state(plane->base.state);
1162
1163 if (!state->visible)
1164 continue;
1165
1166 /* normal watermarks */
1167 for (level = 0; level < wm_state->num_levels; level++) {
1168 int wm = vlv_compute_wm_level(plane, crtc, state, level);
1169 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
1170
1171 /* hack */
1172 if (WARN_ON(level == 0 && wm > max_wm))
1173 wm = max_wm;
1174
1175 if (wm > plane->wm.fifo_size)
1176 break;
1177
1178 switch (plane->base.type) {
1179 int sprite;
1180 case DRM_PLANE_TYPE_CURSOR:
1181 wm_state->wm[level].cursor = wm;
1182 break;
1183 case DRM_PLANE_TYPE_PRIMARY:
1184 wm_state->wm[level].primary = wm;
1185 break;
1186 case DRM_PLANE_TYPE_OVERLAY:
1187 sprite = plane->plane;
1188 wm_state->wm[level].sprite[sprite] = wm;
1189 break;
1190 }
1191 }
1192
1193 wm_state->num_levels = level;
1194
1195 if (!wm_state->cxsr)
1196 continue;
1197
1198 /* maxfifo watermarks */
1199 switch (plane->base.type) {
1200 int sprite, level;
1201 case DRM_PLANE_TYPE_CURSOR:
1202 for (level = 0; level < wm_state->num_levels; level++)
1203 wm_state->sr[level].cursor =
1204 wm_state->sr[level].cursor;
1205 break;
1206 case DRM_PLANE_TYPE_PRIMARY:
1207 for (level = 0; level < wm_state->num_levels; level++)
1208 wm_state->sr[level].plane =
1209 min(wm_state->sr[level].plane,
1210 wm_state->wm[level].primary);
1211 break;
1212 case DRM_PLANE_TYPE_OVERLAY:
1213 sprite = plane->plane;
1214 for (level = 0; level < wm_state->num_levels; level++)
1215 wm_state->sr[level].plane =
1216 min(wm_state->sr[level].plane,
1217 wm_state->wm[level].sprite[sprite]);
1218 break;
1219 }
1220 }
1221
1222 /* clear any (partially) filled invalid levels */
1223 for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) {
1224 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1225 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1226 }
1227
1228 vlv_invert_wms(crtc);
1229}
1230
Ville Syrjälä54f1b6e2015-06-24 22:00:05 +03001231#define VLV_FIFO(plane, value) \
1232 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1233
1234static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
1235{
1236 struct drm_device *dev = crtc->base.dev;
1237 struct drm_i915_private *dev_priv = to_i915(dev);
1238 struct intel_plane *plane;
1239 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
1240
1241 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1242 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1243 WARN_ON(plane->wm.fifo_size != 63);
1244 continue;
1245 }
1246
1247 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
1248 sprite0_start = plane->wm.fifo_size;
1249 else if (plane->plane == 0)
1250 sprite1_start = sprite0_start + plane->wm.fifo_size;
1251 else
1252 fifo_size = sprite1_start + plane->wm.fifo_size;
1253 }
1254
1255 WARN_ON(fifo_size != 512 - 1);
1256
1257 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1258 pipe_name(crtc->pipe), sprite0_start,
1259 sprite1_start, fifo_size);
1260
1261 switch (crtc->pipe) {
1262 uint32_t dsparb, dsparb2, dsparb3;
1263 case PIPE_A:
1264 dsparb = I915_READ(DSPARB);
1265 dsparb2 = I915_READ(DSPARB2);
1266
1267 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1268 VLV_FIFO(SPRITEB, 0xff));
1269 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1270 VLV_FIFO(SPRITEB, sprite1_start));
1271
1272 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1273 VLV_FIFO(SPRITEB_HI, 0x1));
1274 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1275 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1276
1277 I915_WRITE(DSPARB, dsparb);
1278 I915_WRITE(DSPARB2, dsparb2);
1279 break;
1280 case PIPE_B:
1281 dsparb = I915_READ(DSPARB);
1282 dsparb2 = I915_READ(DSPARB2);
1283
1284 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1285 VLV_FIFO(SPRITED, 0xff));
1286 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1287 VLV_FIFO(SPRITED, sprite1_start));
1288
1289 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1290 VLV_FIFO(SPRITED_HI, 0xff));
1291 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1292 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1293
1294 I915_WRITE(DSPARB, dsparb);
1295 I915_WRITE(DSPARB2, dsparb2);
1296 break;
1297 case PIPE_C:
1298 dsparb3 = I915_READ(DSPARB3);
1299 dsparb2 = I915_READ(DSPARB2);
1300
1301 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1302 VLV_FIFO(SPRITEF, 0xff));
1303 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1304 VLV_FIFO(SPRITEF, sprite1_start));
1305
1306 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1307 VLV_FIFO(SPRITEF_HI, 0xff));
1308 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1309 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1310
1311 I915_WRITE(DSPARB3, dsparb3);
1312 I915_WRITE(DSPARB2, dsparb2);
1313 break;
1314 default:
1315 break;
1316 }
1317}
1318
1319#undef VLV_FIFO
1320
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001321static void vlv_merge_wm(struct drm_device *dev,
1322 struct vlv_wm_values *wm)
1323{
1324 struct intel_crtc *crtc;
1325 int num_active_crtcs = 0;
1326
1327 if (IS_CHERRYVIEW(dev))
1328 wm->level = VLV_WM_LEVEL_DDR_DVFS;
1329 else
1330 wm->level = VLV_WM_LEVEL_PM2;
1331 wm->cxsr = true;
1332
1333 for_each_intel_crtc(dev, crtc) {
1334 const struct vlv_wm_state *wm_state = &crtc->wm_state;
1335
1336 if (!crtc->active)
1337 continue;
1338
1339 if (!wm_state->cxsr)
1340 wm->cxsr = false;
1341
1342 num_active_crtcs++;
1343 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1344 }
1345
1346 if (num_active_crtcs != 1)
1347 wm->cxsr = false;
1348
Ville Syrjälä6f9c7842015-06-24 22:00:08 +03001349 if (num_active_crtcs > 1)
1350 wm->level = VLV_WM_LEVEL_PM2;
1351
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001352 for_each_intel_crtc(dev, crtc) {
1353 struct vlv_wm_state *wm_state = &crtc->wm_state;
1354 enum pipe pipe = crtc->pipe;
1355
1356 if (!crtc->active)
1357 continue;
1358
1359 wm->pipe[pipe] = wm_state->wm[wm->level];
1360 if (wm->cxsr)
1361 wm->sr = wm_state->sr[wm->level];
1362
1363 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
1364 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
1365 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
1366 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
1367 }
1368}
1369
1370static void vlv_update_wm(struct drm_crtc *crtc)
1371{
1372 struct drm_device *dev = crtc->dev;
1373 struct drm_i915_private *dev_priv = dev->dev_private;
1374 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1375 enum pipe pipe = intel_crtc->pipe;
1376 struct vlv_wm_values wm = {};
1377
Ville Syrjälä26e1fe42015-06-24 22:00:06 +03001378 vlv_compute_wm(intel_crtc);
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001379 vlv_merge_wm(dev, &wm);
1380
Ville Syrjälä54f1b6e2015-06-24 22:00:05 +03001381 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
1382 /* FIXME should be part of crtc atomic commit */
1383 vlv_pipe_set_fifo_size(intel_crtc);
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001384 return;
Ville Syrjälä54f1b6e2015-06-24 22:00:05 +03001385 }
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001386
1387 if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
1388 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
1389 chv_set_memory_dvfs(dev_priv, false);
1390
1391 if (wm.level < VLV_WM_LEVEL_PM5 &&
1392 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
1393 chv_set_memory_pm5(dev_priv, false);
1394
Ville Syrjälä852eb002015-06-24 22:00:07 +03001395 if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001396 intel_set_memory_cxsr(dev_priv, false);
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001397
Ville Syrjälä54f1b6e2015-06-24 22:00:05 +03001398 /* FIXME should be part of crtc atomic commit */
1399 vlv_pipe_set_fifo_size(intel_crtc);
1400
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001401 vlv_write_wm_values(intel_crtc, &wm);
1402
1403 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1404 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1405 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1406 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
1407 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
1408
Ville Syrjälä852eb002015-06-24 22:00:07 +03001409 if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001410 intel_set_memory_cxsr(dev_priv, true);
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03001411
1412 if (wm.level >= VLV_WM_LEVEL_PM5 &&
1413 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
1414 chv_set_memory_pm5(dev_priv, true);
1415
1416 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
1417 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
1418 chv_set_memory_dvfs(dev_priv, true);
1419
1420 dev_priv->wm.vlv = wm;
Ville Syrjälä3c2777f2014-06-26 17:03:06 +03001421}
1422
Ville Syrjäläae801522015-03-05 21:19:49 +02001423#define single_plane_enabled(mask) is_power_of_2(mask)
1424
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001425static void g4x_update_wm(struct drm_crtc *crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001426{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001427 struct drm_device *dev = crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001428 static const int sr_latency_ns = 12000;
1429 struct drm_i915_private *dev_priv = dev->dev_private;
1430 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1431 int plane_sr, cursor_sr;
1432 unsigned int enabled = 0;
Imre Deak98584252014-06-13 14:54:20 +03001433 bool cxsr_enabled;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001434
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001435 if (g4x_compute_wm0(dev, PIPE_A,
Chris Wilson5aef6002014-09-03 11:56:07 +01001436 &g4x_wm_info, pessimal_latency_ns,
1437 &g4x_cursor_wm_info, pessimal_latency_ns,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001438 &planea_wm, &cursora_wm))
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001439 enabled |= 1 << PIPE_A;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001440
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001441 if (g4x_compute_wm0(dev, PIPE_B,
Chris Wilson5aef6002014-09-03 11:56:07 +01001442 &g4x_wm_info, pessimal_latency_ns,
1443 &g4x_cursor_wm_info, pessimal_latency_ns,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001444 &planeb_wm, &cursorb_wm))
Ville Syrjälä51cea1f2013-03-21 13:10:44 +02001445 enabled |= 1 << PIPE_B;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001446
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001447 if (single_plane_enabled(enabled) &&
1448 g4x_compute_srwm(dev, ffs(enabled) - 1,
1449 sr_latency_ns,
1450 &g4x_wm_info,
1451 &g4x_cursor_wm_info,
Chris Wilson52bd02d2012-12-07 10:43:24 +00001452 &plane_sr, &cursor_sr)) {
Imre Deak98584252014-06-13 14:54:20 +03001453 cxsr_enabled = true;
Chris Wilson52bd02d2012-12-07 10:43:24 +00001454 } else {
Imre Deak98584252014-06-13 14:54:20 +03001455 cxsr_enabled = false;
Imre Deak5209b1f2014-07-01 12:36:17 +03001456 intel_set_memory_cxsr(dev_priv, false);
Chris Wilson52bd02d2012-12-07 10:43:24 +00001457 plane_sr = cursor_sr = 0;
1458 }
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001459
Ville Syrjäläa5043452014-06-28 02:04:18 +03001460 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1461 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001462 planea_wm, cursora_wm,
1463 planeb_wm, cursorb_wm,
1464 plane_sr, cursor_sr);
1465
1466 I915_WRITE(DSPFW1,
Ville Syrjäläf4998962015-03-10 17:02:21 +02001467 FW_WM(plane_sr, SR) |
1468 FW_WM(cursorb_wm, CURSORB) |
1469 FW_WM(planeb_wm, PLANEB) |
1470 FW_WM(planea_wm, PLANEA));
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001471 I915_WRITE(DSPFW2,
Chris Wilson8c919b22012-12-04 16:33:19 +00001472 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
Ville Syrjäläf4998962015-03-10 17:02:21 +02001473 FW_WM(cursora_wm, CURSORA));
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001474 /* HPLL off in SR has some issues on G4x... disable it */
1475 I915_WRITE(DSPFW3,
Chris Wilson8c919b22012-12-04 16:33:19 +00001476 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
Ville Syrjäläf4998962015-03-10 17:02:21 +02001477 FW_WM(cursor_sr, CURSOR_SR));
Imre Deak98584252014-06-13 14:54:20 +03001478
1479 if (cxsr_enabled)
1480 intel_set_memory_cxsr(dev_priv, true);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001481}
1482
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001483static void i965_update_wm(struct drm_crtc *unused_crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001484{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001485 struct drm_device *dev = unused_crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001486 struct drm_i915_private *dev_priv = dev->dev_private;
1487 struct drm_crtc *crtc;
1488 int srwm = 1;
1489 int cursor_sr = 16;
Imre Deak98584252014-06-13 14:54:20 +03001490 bool cxsr_enabled;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001491
1492 /* Calc sr entries for one plane configs */
1493 crtc = single_enabled_crtc(dev);
1494 if (crtc) {
1495 /* self-refresh has much higher latency */
1496 static const int sr_latency_ns = 12000;
Ville Syrjälä4fe85902013-09-04 18:25:22 +03001497 const struct drm_display_mode *adjusted_mode =
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001498 &to_intel_crtc(crtc)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +01001499 int clock = adjusted_mode->crtc_clock;
Jesse Barnesfec8cba2013-11-27 11:10:26 -08001500 int htotal = adjusted_mode->crtc_htotal;
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001501 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
Matt Roper59bea882015-02-27 10:12:01 -08001502 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001503 unsigned long line_time_us;
1504 int entries;
1505
Ville Syrjälä922044c2014-02-14 14:18:57 +02001506 line_time_us = max(htotal * 1000 / clock, 1);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001507
1508 /* Use ns/us then divide to preserve precision */
1509 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1510 pixel_size * hdisplay;
1511 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1512 srwm = I965_FIFO_SIZE - entries;
1513 if (srwm < 0)
1514 srwm = 1;
1515 srwm &= 0x1ff;
1516 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1517 entries, srwm);
1518
1519 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
Matt Roper3dd512f2015-02-27 10:12:00 -08001520 pixel_size * crtc->cursor->state->crtc_w;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001521 entries = DIV_ROUND_UP(entries,
1522 i965_cursor_wm_info.cacheline_size);
1523 cursor_sr = i965_cursor_wm_info.fifo_size -
1524 (entries + i965_cursor_wm_info.guard_size);
1525
1526 if (cursor_sr > i965_cursor_wm_info.max_wm)
1527 cursor_sr = i965_cursor_wm_info.max_wm;
1528
1529 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1530 "cursor %d\n", srwm, cursor_sr);
1531
Imre Deak98584252014-06-13 14:54:20 +03001532 cxsr_enabled = true;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001533 } else {
Imre Deak98584252014-06-13 14:54:20 +03001534 cxsr_enabled = false;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001535 /* Turn off self refresh if both pipes are enabled */
Imre Deak5209b1f2014-07-01 12:36:17 +03001536 intel_set_memory_cxsr(dev_priv, false);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001537 }
1538
1539 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1540 srwm);
1541
1542 /* 965 has limitations... */
Ville Syrjäläf4998962015-03-10 17:02:21 +02001543 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1544 FW_WM(8, CURSORB) |
1545 FW_WM(8, PLANEB) |
1546 FW_WM(8, PLANEA));
1547 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1548 FW_WM(8, PLANEC_OLD));
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001549 /* update cursor SR watermark */
Ville Syrjäläf4998962015-03-10 17:02:21 +02001550 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
Imre Deak98584252014-06-13 14:54:20 +03001551
1552 if (cxsr_enabled)
1553 intel_set_memory_cxsr(dev_priv, true);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001554}
1555
Ville Syrjäläf4998962015-03-10 17:02:21 +02001556#undef FW_WM
1557
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001558static void i9xx_update_wm(struct drm_crtc *unused_crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001559{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001560 struct drm_device *dev = unused_crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001561 struct drm_i915_private *dev_priv = dev->dev_private;
1562 const struct intel_watermark_params *wm_info;
1563 uint32_t fwater_lo;
1564 uint32_t fwater_hi;
1565 int cwm, srwm = 1;
1566 int fifo_size;
1567 int planea_wm, planeb_wm;
1568 struct drm_crtc *crtc, *enabled = NULL;
1569
1570 if (IS_I945GM(dev))
1571 wm_info = &i945_wm_info;
1572 else if (!IS_GEN2(dev))
1573 wm_info = &i915_wm_info;
1574 else
Ville Syrjälä9d539102014-08-15 01:21:53 +03001575 wm_info = &i830_a_wm_info;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001576
1577 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1578 crtc = intel_get_crtc_for_plane(dev, 0);
Chris Wilson3490ea52013-01-07 10:11:40 +00001579 if (intel_crtc_active(crtc)) {
Damien Lespiau241bfc32013-09-25 16:45:37 +01001580 const struct drm_display_mode *adjusted_mode;
Matt Roper59bea882015-02-27 10:12:01 -08001581 int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
Chris Wilsonb9e0bda2012-10-22 12:32:15 +01001582 if (IS_GEN2(dev))
1583 cpp = 4;
1584
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001585 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +01001586 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
Chris Wilsonb9e0bda2012-10-22 12:32:15 +01001587 wm_info, fifo_size, cpp,
Chris Wilson5aef6002014-09-03 11:56:07 +01001588 pessimal_latency_ns);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001589 enabled = crtc;
Ville Syrjälä9d539102014-08-15 01:21:53 +03001590 } else {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001591 planea_wm = fifo_size - wm_info->guard_size;
Ville Syrjälä9d539102014-08-15 01:21:53 +03001592 if (planea_wm > (long)wm_info->max_wm)
1593 planea_wm = wm_info->max_wm;
1594 }
1595
1596 if (IS_GEN2(dev))
1597 wm_info = &i830_bc_wm_info;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001598
1599 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1600 crtc = intel_get_crtc_for_plane(dev, 1);
Chris Wilson3490ea52013-01-07 10:11:40 +00001601 if (intel_crtc_active(crtc)) {
Damien Lespiau241bfc32013-09-25 16:45:37 +01001602 const struct drm_display_mode *adjusted_mode;
Matt Roper59bea882015-02-27 10:12:01 -08001603 int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
Chris Wilsonb9e0bda2012-10-22 12:32:15 +01001604 if (IS_GEN2(dev))
1605 cpp = 4;
1606
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001607 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +01001608 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
Chris Wilsonb9e0bda2012-10-22 12:32:15 +01001609 wm_info, fifo_size, cpp,
Chris Wilson5aef6002014-09-03 11:56:07 +01001610 pessimal_latency_ns);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001611 if (enabled == NULL)
1612 enabled = crtc;
1613 else
1614 enabled = NULL;
Ville Syrjälä9d539102014-08-15 01:21:53 +03001615 } else {
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001616 planeb_wm = fifo_size - wm_info->guard_size;
Ville Syrjälä9d539102014-08-15 01:21:53 +03001617 if (planeb_wm > (long)wm_info->max_wm)
1618 planeb_wm = wm_info->max_wm;
1619 }
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001620
1621 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1622
Daniel Vetter2ab1bc92014-04-07 08:54:21 +02001623 if (IS_I915GM(dev) && enabled) {
Matt Roper2ff8fde2014-07-08 07:50:07 -07001624 struct drm_i915_gem_object *obj;
Daniel Vetter2ab1bc92014-04-07 08:54:21 +02001625
Matt Roper59bea882015-02-27 10:12:01 -08001626 obj = intel_fb_obj(enabled->primary->state->fb);
Daniel Vetter2ab1bc92014-04-07 08:54:21 +02001627
1628 /* self-refresh seems busted with untiled */
Matt Roper2ff8fde2014-07-08 07:50:07 -07001629 if (obj->tiling_mode == I915_TILING_NONE)
Daniel Vetter2ab1bc92014-04-07 08:54:21 +02001630 enabled = NULL;
1631 }
1632
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001633 /*
1634 * Overlay gets an aggressive default since video jitter is bad.
1635 */
1636 cwm = 2;
1637
1638 /* Play safe and disable self-refresh before adjusting watermarks. */
Imre Deak5209b1f2014-07-01 12:36:17 +03001639 intel_set_memory_cxsr(dev_priv, false);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001640
1641 /* Calc sr entries for one plane configs */
1642 if (HAS_FW_BLC(dev) && enabled) {
1643 /* self-refresh has much higher latency */
1644 static const int sr_latency_ns = 6000;
Ville Syrjälä4fe85902013-09-04 18:25:22 +03001645 const struct drm_display_mode *adjusted_mode =
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001646 &to_intel_crtc(enabled)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +01001647 int clock = adjusted_mode->crtc_clock;
Jesse Barnesfec8cba2013-11-27 11:10:26 -08001648 int htotal = adjusted_mode->crtc_htotal;
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001649 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
Matt Roper59bea882015-02-27 10:12:01 -08001650 int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001651 unsigned long line_time_us;
1652 int entries;
1653
Ville Syrjälä922044c2014-02-14 14:18:57 +02001654 line_time_us = max(htotal * 1000 / clock, 1);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001655
1656 /* Use ns/us then divide to preserve precision */
1657 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1658 pixel_size * hdisplay;
1659 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1660 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1661 srwm = wm_info->fifo_size - entries;
1662 if (srwm < 0)
1663 srwm = 1;
1664
1665 if (IS_I945G(dev) || IS_I945GM(dev))
1666 I915_WRITE(FW_BLC_SELF,
1667 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1668 else if (IS_I915GM(dev))
1669 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1670 }
1671
1672 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1673 planea_wm, planeb_wm, cwm, srwm);
1674
1675 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1676 fwater_hi = (cwm & 0x1f);
1677
1678 /* Set request length to 8 cachelines per fetch */
1679 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1680 fwater_hi = fwater_hi | (1 << 8);
1681
1682 I915_WRITE(FW_BLC, fwater_lo);
1683 I915_WRITE(FW_BLC2, fwater_hi);
1684
Imre Deak5209b1f2014-07-01 12:36:17 +03001685 if (enabled)
1686 intel_set_memory_cxsr(dev_priv, true);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001687}
1688
Daniel Vetterfeb56b92013-12-14 20:38:30 -02001689static void i845_update_wm(struct drm_crtc *unused_crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001690{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03001691 struct drm_device *dev = unused_crtc->dev;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001692 struct drm_i915_private *dev_priv = dev->dev_private;
1693 struct drm_crtc *crtc;
Damien Lespiau241bfc32013-09-25 16:45:37 +01001694 const struct drm_display_mode *adjusted_mode;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001695 uint32_t fwater_lo;
1696 int planea_wm;
1697
1698 crtc = single_enabled_crtc(dev);
1699 if (crtc == NULL)
1700 return;
1701
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001702 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
Damien Lespiau241bfc32013-09-25 16:45:37 +01001703 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
Daniel Vetterfeb56b92013-12-14 20:38:30 -02001704 &i845_wm_info,
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001705 dev_priv->display.get_fifo_size(dev, 0),
Chris Wilson5aef6002014-09-03 11:56:07 +01001706 4, pessimal_latency_ns);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03001707 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1708 fwater_lo |= (3<<8) | planea_wm;
1709
1710 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1711
1712 I915_WRITE(FW_BLC, fwater_lo);
1713}
1714
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03001715uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001716{
Chris Wilsonfd4daa92013-08-27 17:04:17 +01001717 uint32_t pixel_rate;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001718
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03001719 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001720
1721 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1722 * adjust the pixel_rate here. */
1723
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03001724 if (pipe_config->pch_pfit.enabled) {
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001725 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03001726 uint32_t pfit_size = pipe_config->pch_pfit.size;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001727
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03001728 pipe_w = pipe_config->pipe_src_w;
1729 pipe_h = pipe_config->pipe_src_h;
1730
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001731 pfit_w = (pfit_size >> 16) & 0xFFFF;
1732 pfit_h = pfit_size & 0xFFFF;
1733 if (pipe_w < pfit_w)
1734 pipe_w = pfit_w;
1735 if (pipe_h < pfit_h)
1736 pipe_h = pfit_h;
1737
1738 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1739 pfit_w * pfit_h);
1740 }
1741
1742 return pixel_rate;
1743}
1744
Ville Syrjälä37126462013-08-01 16:18:55 +03001745/* latency must be in 0.1us units. */
Ville Syrjälä23297042013-07-05 11:57:17 +03001746static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001747 uint32_t latency)
1748{
1749 uint64_t ret;
1750
Ville Syrjälä3312ba62013-08-01 16:18:53 +03001751 if (WARN(latency == 0, "Latency value missing\n"))
1752 return UINT_MAX;
1753
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001754 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1755 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1756
1757 return ret;
1758}
1759
Ville Syrjälä37126462013-08-01 16:18:55 +03001760/* latency must be in 0.1us units. */
Ville Syrjälä23297042013-07-05 11:57:17 +03001761static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001762 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1763 uint32_t latency)
1764{
1765 uint32_t ret;
1766
Ville Syrjälä3312ba62013-08-01 16:18:53 +03001767 if (WARN(latency == 0, "Latency value missing\n"))
1768 return UINT_MAX;
1769
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001770 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1771 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1772 ret = DIV_ROUND_UP(ret, 64) + 2;
1773 return ret;
1774}
1775
Ville Syrjälä23297042013-07-05 11:57:17 +03001776static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
Paulo Zanonicca32e92013-05-31 11:45:06 -03001777 uint8_t bytes_per_pixel)
1778{
1779 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1780}
1781
Pradeep Bhat2ac96d22014-11-04 17:06:40 +00001782struct skl_pipe_wm_parameters {
1783 bool active;
1784 uint32_t pipe_htotal;
1785 uint32_t pixel_rate; /* in KHz */
1786 struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
1787 struct intel_plane_wm_parameters cursor;
1788};
1789
Imre Deak820c1982013-12-17 14:46:36 +02001790struct ilk_pipe_wm_parameters {
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001791 bool active;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001792 uint32_t pipe_htotal;
1793 uint32_t pixel_rate;
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001794 struct intel_plane_wm_parameters pri;
1795 struct intel_plane_wm_parameters spr;
1796 struct intel_plane_wm_parameters cur;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001797};
1798
Imre Deak820c1982013-12-17 14:46:36 +02001799struct ilk_wm_maximums {
Paulo Zanonicca32e92013-05-31 11:45:06 -03001800 uint16_t pri;
1801 uint16_t spr;
1802 uint16_t cur;
1803 uint16_t fbc;
1804};
1805
Ville Syrjälä240264f2013-08-07 13:29:12 +03001806/* used in computing the new watermarks state */
1807struct intel_wm_config {
1808 unsigned int num_pipes_active;
1809 bool sprites_enabled;
1810 bool sprites_scaled;
Ville Syrjälä240264f2013-08-07 13:29:12 +03001811};
1812
Ville Syrjälä37126462013-08-01 16:18:55 +03001813/*
1814 * For both WM_PIPE and WM_LP.
1815 * mem_value must be in 0.1us units.
1816 */
Imre Deak820c1982013-12-17 14:46:36 +02001817static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
Paulo Zanonicca32e92013-05-31 11:45:06 -03001818 uint32_t mem_value,
1819 bool is_lp)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001820{
Paulo Zanonicca32e92013-05-31 11:45:06 -03001821 uint32_t method1, method2;
1822
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001823 if (!params->active || !params->pri.enabled)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001824 return 0;
1825
Ville Syrjälä23297042013-07-05 11:57:17 +03001826 method1 = ilk_wm_method1(params->pixel_rate,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001827 params->pri.bytes_per_pixel,
Paulo Zanonicca32e92013-05-31 11:45:06 -03001828 mem_value);
1829
1830 if (!is_lp)
1831 return method1;
1832
Ville Syrjälä23297042013-07-05 11:57:17 +03001833 method2 = ilk_wm_method2(params->pixel_rate,
Paulo Zanonicca32e92013-05-31 11:45:06 -03001834 params->pipe_htotal,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001835 params->pri.horiz_pixels,
1836 params->pri.bytes_per_pixel,
Paulo Zanonicca32e92013-05-31 11:45:06 -03001837 mem_value);
1838
1839 return min(method1, method2);
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001840}
1841
Ville Syrjälä37126462013-08-01 16:18:55 +03001842/*
1843 * For both WM_PIPE and WM_LP.
1844 * mem_value must be in 0.1us units.
1845 */
Imre Deak820c1982013-12-17 14:46:36 +02001846static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001847 uint32_t mem_value)
1848{
1849 uint32_t method1, method2;
1850
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001851 if (!params->active || !params->spr.enabled)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001852 return 0;
1853
Ville Syrjälä23297042013-07-05 11:57:17 +03001854 method1 = ilk_wm_method1(params->pixel_rate,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001855 params->spr.bytes_per_pixel,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001856 mem_value);
Ville Syrjälä23297042013-07-05 11:57:17 +03001857 method2 = ilk_wm_method2(params->pixel_rate,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001858 params->pipe_htotal,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001859 params->spr.horiz_pixels,
1860 params->spr.bytes_per_pixel,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001861 mem_value);
1862 return min(method1, method2);
1863}
1864
Ville Syrjälä37126462013-08-01 16:18:55 +03001865/*
1866 * For both WM_PIPE and WM_LP.
1867 * mem_value must be in 0.1us units.
1868 */
Imre Deak820c1982013-12-17 14:46:36 +02001869static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001870 uint32_t mem_value)
1871{
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001872 if (!params->active || !params->cur.enabled)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001873 return 0;
1874
Ville Syrjälä23297042013-07-05 11:57:17 +03001875 return ilk_wm_method2(params->pixel_rate,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001876 params->pipe_htotal,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001877 params->cur.horiz_pixels,
1878 params->cur.bytes_per_pixel,
Paulo Zanoni801bcff2013-05-31 10:08:35 -03001879 mem_value);
1880}
1881
Paulo Zanonicca32e92013-05-31 11:45:06 -03001882/* Only for WM_LP. */
Imre Deak820c1982013-12-17 14:46:36 +02001883static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
Ville Syrjälä1fda9882013-07-05 11:57:19 +03001884 uint32_t pri_val)
Paulo Zanonicca32e92013-05-31 11:45:06 -03001885{
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001886 if (!params->active || !params->pri.enabled)
Paulo Zanonicca32e92013-05-31 11:45:06 -03001887 return 0;
1888
Ville Syrjälä23297042013-07-05 11:57:17 +03001889 return ilk_wm_fbc(pri_val,
Ville Syrjäläc35426d2013-08-07 13:29:50 +03001890 params->pri.horiz_pixels,
1891 params->pri.bytes_per_pixel);
Paulo Zanonicca32e92013-05-31 11:45:06 -03001892}
1893
Ville Syrjälä158ae642013-08-07 13:28:19 +03001894static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1895{
Ville Syrjälä416f4722013-11-02 21:07:46 -07001896 if (INTEL_INFO(dev)->gen >= 8)
1897 return 3072;
1898 else if (INTEL_INFO(dev)->gen >= 7)
Ville Syrjälä158ae642013-08-07 13:28:19 +03001899 return 768;
1900 else
1901 return 512;
1902}
1903
Ville Syrjälä4e975082014-03-07 18:32:11 +02001904static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1905 int level, bool is_sprite)
1906{
1907 if (INTEL_INFO(dev)->gen >= 8)
1908 /* BDW primary/sprite plane watermarks */
1909 return level == 0 ? 255 : 2047;
1910 else if (INTEL_INFO(dev)->gen >= 7)
1911 /* IVB/HSW primary/sprite plane watermarks */
1912 return level == 0 ? 127 : 1023;
1913 else if (!is_sprite)
1914 /* ILK/SNB primary plane watermarks */
1915 return level == 0 ? 127 : 511;
1916 else
1917 /* ILK/SNB sprite plane watermarks */
1918 return level == 0 ? 63 : 255;
1919}
1920
1921static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1922 int level)
1923{
1924 if (INTEL_INFO(dev)->gen >= 7)
1925 return level == 0 ? 63 : 255;
1926 else
1927 return level == 0 ? 31 : 63;
1928}
1929
1930static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1931{
1932 if (INTEL_INFO(dev)->gen >= 8)
1933 return 31;
1934 else
1935 return 15;
1936}
1937
Ville Syrjälä158ae642013-08-07 13:28:19 +03001938/* Calculate the maximum primary/sprite plane watermark */
1939static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1940 int level,
Ville Syrjälä240264f2013-08-07 13:29:12 +03001941 const struct intel_wm_config *config,
Ville Syrjälä158ae642013-08-07 13:28:19 +03001942 enum intel_ddb_partitioning ddb_partitioning,
1943 bool is_sprite)
1944{
1945 unsigned int fifo_size = ilk_display_fifo_size(dev);
Ville Syrjälä158ae642013-08-07 13:28:19 +03001946
1947 /* if sprites aren't enabled, sprites get nothing */
Ville Syrjälä240264f2013-08-07 13:29:12 +03001948 if (is_sprite && !config->sprites_enabled)
Ville Syrjälä158ae642013-08-07 13:28:19 +03001949 return 0;
1950
1951 /* HSW allows LP1+ watermarks even with multiple pipes */
Ville Syrjälä240264f2013-08-07 13:29:12 +03001952 if (level == 0 || config->num_pipes_active > 1) {
Ville Syrjälä158ae642013-08-07 13:28:19 +03001953 fifo_size /= INTEL_INFO(dev)->num_pipes;
1954
1955 /*
1956 * For some reason the non self refresh
1957 * FIFO size is only half of the self
1958 * refresh FIFO size on ILK/SNB.
1959 */
1960 if (INTEL_INFO(dev)->gen <= 6)
1961 fifo_size /= 2;
1962 }
1963
Ville Syrjälä240264f2013-08-07 13:29:12 +03001964 if (config->sprites_enabled) {
Ville Syrjälä158ae642013-08-07 13:28:19 +03001965 /* level 0 is always calculated with 1:1 split */
1966 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1967 if (is_sprite)
1968 fifo_size *= 5;
1969 fifo_size /= 6;
1970 } else {
1971 fifo_size /= 2;
1972 }
1973 }
1974
1975 /* clamp to max that the registers can hold */
Ville Syrjälä4e975082014-03-07 18:32:11 +02001976 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
Ville Syrjälä158ae642013-08-07 13:28:19 +03001977}
1978
1979/* Calculate the maximum cursor plane watermark */
1980static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
Ville Syrjälä240264f2013-08-07 13:29:12 +03001981 int level,
1982 const struct intel_wm_config *config)
Ville Syrjälä158ae642013-08-07 13:28:19 +03001983{
1984 /* HSW LP1+ watermarks w/ multiple pipes */
Ville Syrjälä240264f2013-08-07 13:29:12 +03001985 if (level > 0 && config->num_pipes_active > 1)
Ville Syrjälä158ae642013-08-07 13:28:19 +03001986 return 64;
1987
1988 /* otherwise just report max that registers can hold */
Ville Syrjälä4e975082014-03-07 18:32:11 +02001989 return ilk_cursor_wm_reg_max(dev, level);
Ville Syrjälä158ae642013-08-07 13:28:19 +03001990}
1991
Damien Lespiaud34ff9c2014-01-06 19:17:23 +00001992static void ilk_compute_wm_maximums(const struct drm_device *dev,
Ville Syrjälä34982fe2013-10-09 19:18:09 +03001993 int level,
1994 const struct intel_wm_config *config,
1995 enum intel_ddb_partitioning ddb_partitioning,
Imre Deak820c1982013-12-17 14:46:36 +02001996 struct ilk_wm_maximums *max)
Ville Syrjälä158ae642013-08-07 13:28:19 +03001997{
Ville Syrjälä240264f2013-08-07 13:29:12 +03001998 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1999 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2000 max->cur = ilk_cursor_wm_max(dev, level, config);
Ville Syrjälä4e975082014-03-07 18:32:11 +02002001 max->fbc = ilk_fbc_wm_reg_max(dev);
Ville Syrjälä158ae642013-08-07 13:28:19 +03002002}
2003
Ville Syrjäläa3cb4042014-04-28 15:44:56 +03002004static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
2005 int level,
2006 struct ilk_wm_maximums *max)
2007{
2008 max->pri = ilk_plane_wm_reg_max(dev, level, false);
2009 max->spr = ilk_plane_wm_reg_max(dev, level, true);
2010 max->cur = ilk_cursor_wm_reg_max(dev, level);
2011 max->fbc = ilk_fbc_wm_reg_max(dev);
2012}
2013
Ville Syrjäläd9395652013-10-09 19:18:10 +03002014static bool ilk_validate_wm_level(int level,
Imre Deak820c1982013-12-17 14:46:36 +02002015 const struct ilk_wm_maximums *max,
Ville Syrjäläd9395652013-10-09 19:18:10 +03002016 struct intel_wm_level *result)
Ville Syrjäläa9786a12013-08-07 13:24:47 +03002017{
2018 bool ret;
2019
2020 /* already determined to be invalid? */
2021 if (!result->enable)
2022 return false;
2023
2024 result->enable = result->pri_val <= max->pri &&
2025 result->spr_val <= max->spr &&
2026 result->cur_val <= max->cur;
2027
2028 ret = result->enable;
2029
2030 /*
2031 * HACK until we can pre-compute everything,
2032 * and thus fail gracefully if LP0 watermarks
2033 * are exceeded...
2034 */
2035 if (level == 0 && !result->enable) {
2036 if (result->pri_val > max->pri)
2037 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2038 level, result->pri_val, max->pri);
2039 if (result->spr_val > max->spr)
2040 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2041 level, result->spr_val, max->spr);
2042 if (result->cur_val > max->cur)
2043 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2044 level, result->cur_val, max->cur);
2045
2046 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2047 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2048 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2049 result->enable = true;
2050 }
2051
Ville Syrjäläa9786a12013-08-07 13:24:47 +03002052 return ret;
2053}
2054
Damien Lespiaud34ff9c2014-01-06 19:17:23 +00002055static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
Ville Syrjälä6f5ddd12013-08-06 22:24:02 +03002056 int level,
Imre Deak820c1982013-12-17 14:46:36 +02002057 const struct ilk_pipe_wm_parameters *p,
Ville Syrjälä1fd527c2013-08-06 22:24:05 +03002058 struct intel_wm_level *result)
Ville Syrjälä6f5ddd12013-08-06 22:24:02 +03002059{
2060 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2061 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2062 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2063
2064 /* WM1+ latency values stored in 0.5us units */
2065 if (level > 0) {
2066 pri_latency *= 5;
2067 spr_latency *= 5;
2068 cur_latency *= 5;
2069 }
2070
2071 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2072 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2073 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2074 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2075 result->enable = true;
2076}
2077
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002078static uint32_t
2079hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002080{
2081 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002082 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002083 struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
Paulo Zanoni85a02de2013-05-03 17:23:43 -03002084 u32 linetime, ips_linetime;
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002085
Matt Roper3ef00282015-03-09 10:19:24 -07002086 if (!intel_crtc->active)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002087 return 0;
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002088
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002089 /* The WM are computed with base on how long it takes to fill a single
2090 * row at the given clock rate, multiplied by 8.
2091 * */
Jesse Barnesfec8cba2013-11-27 11:10:26 -08002092 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2093 mode->crtc_clock);
2094 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
Ville Syrjälä05024da2015-06-03 15:45:08 +03002095 dev_priv->cdclk_freq);
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002096
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002097 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2098 PIPE_WM_LINETIME_TIME(linetime);
Eugeni Dodonov1f8eeab2012-05-09 15:37:24 -03002099}
2100
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002101static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
Ville Syrjälä12b134d2013-07-05 11:57:21 +03002102{
2103 struct drm_i915_private *dev_priv = dev->dev_private;
2104
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002105 if (IS_GEN9(dev)) {
2106 uint32_t val;
Vandana Kannan4f947382014-11-04 17:06:47 +00002107 int ret, i;
Vandana Kannan367294b2014-11-04 17:06:46 +00002108 int level, max_level = ilk_wm_max_level(dev);
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002109
2110 /* read the first set of memory latencies[0:3] */
2111 val = 0; /* data0 to be programmed to 0 for first set */
2112 mutex_lock(&dev_priv->rps.hw_lock);
2113 ret = sandybridge_pcode_read(dev_priv,
2114 GEN9_PCODE_READ_MEM_LATENCY,
2115 &val);
2116 mutex_unlock(&dev_priv->rps.hw_lock);
2117
2118 if (ret) {
2119 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2120 return;
2121 }
2122
2123 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2124 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2125 GEN9_MEM_LATENCY_LEVEL_MASK;
2126 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2127 GEN9_MEM_LATENCY_LEVEL_MASK;
2128 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2129 GEN9_MEM_LATENCY_LEVEL_MASK;
2130
2131 /* read the second set of memory latencies[4:7] */
2132 val = 1; /* data0 to be programmed to 1 for second set */
2133 mutex_lock(&dev_priv->rps.hw_lock);
2134 ret = sandybridge_pcode_read(dev_priv,
2135 GEN9_PCODE_READ_MEM_LATENCY,
2136 &val);
2137 mutex_unlock(&dev_priv->rps.hw_lock);
2138 if (ret) {
2139 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2140 return;
2141 }
2142
2143 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2144 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2145 GEN9_MEM_LATENCY_LEVEL_MASK;
2146 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2147 GEN9_MEM_LATENCY_LEVEL_MASK;
2148 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2149 GEN9_MEM_LATENCY_LEVEL_MASK;
2150
Vandana Kannan367294b2014-11-04 17:06:46 +00002151 /*
Damien Lespiau6f972352015-02-09 19:33:07 +00002152 * WaWmMemoryReadLatency:skl
2153 *
Vandana Kannan367294b2014-11-04 17:06:46 +00002154 * punit doesn't take into account the read latency so we need
2155 * to add 2us to the various latency levels we retrieve from
2156 * the punit.
2157 * - W0 is a bit special in that it's the only level that
2158 * can't be disabled if we want to have display working, so
2159 * we always add 2us there.
2160 * - For levels >=1, punit returns 0us latency when they are
2161 * disabled, so we respect that and don't add 2us then
Vandana Kannan4f947382014-11-04 17:06:47 +00002162 *
2163 * Additionally, if a level n (n > 1) has a 0us latency, all
2164 * levels m (m >= n) need to be disabled. We make sure to
2165 * sanitize the values out of the punit to satisfy this
2166 * requirement.
Vandana Kannan367294b2014-11-04 17:06:46 +00002167 */
2168 wm[0] += 2;
2169 for (level = 1; level <= max_level; level++)
2170 if (wm[level] != 0)
2171 wm[level] += 2;
Vandana Kannan4f947382014-11-04 17:06:47 +00002172 else {
2173 for (i = level + 1; i <= max_level; i++)
2174 wm[i] = 0;
Vandana Kannan367294b2014-11-04 17:06:46 +00002175
Vandana Kannan4f947382014-11-04 17:06:47 +00002176 break;
2177 }
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002178 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
Ville Syrjälä12b134d2013-07-05 11:57:21 +03002179 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2180
2181 wm[0] = (sskpd >> 56) & 0xFF;
2182 if (wm[0] == 0)
2183 wm[0] = sskpd & 0xF;
Ville Syrjäläe5d50192013-07-05 11:57:22 +03002184 wm[1] = (sskpd >> 4) & 0xFF;
2185 wm[2] = (sskpd >> 12) & 0xFF;
2186 wm[3] = (sskpd >> 20) & 0x1FF;
2187 wm[4] = (sskpd >> 32) & 0x1FF;
Ville Syrjälä63cf9a12013-07-05 11:57:23 +03002188 } else if (INTEL_INFO(dev)->gen >= 6) {
2189 uint32_t sskpd = I915_READ(MCH_SSKPD);
2190
2191 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2192 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2193 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2194 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
Ville Syrjälä3a88d0a2013-08-01 16:18:49 +03002195 } else if (INTEL_INFO(dev)->gen >= 5) {
2196 uint32_t mltr = I915_READ(MLTR_ILK);
2197
2198 /* ILK primary LP0 latency is 700 ns */
2199 wm[0] = 7;
2200 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2201 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
Ville Syrjälä12b134d2013-07-05 11:57:21 +03002202 }
2203}
2204
Ville Syrjälä53615a52013-08-01 16:18:50 +03002205static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2206{
2207 /* ILK sprite LP0 latency is 1300 ns */
2208 if (INTEL_INFO(dev)->gen == 5)
2209 wm[0] = 13;
2210}
2211
2212static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2213{
2214 /* ILK cursor LP0 latency is 1300 ns */
2215 if (INTEL_INFO(dev)->gen == 5)
2216 wm[0] = 13;
2217
2218 /* WaDoubleCursorLP3Latency:ivb */
2219 if (IS_IVYBRIDGE(dev))
2220 wm[3] *= 2;
2221}
2222
Damien Lespiau546c81f2014-05-13 15:30:26 +01002223int ilk_wm_max_level(const struct drm_device *dev)
Ville Syrjäläad0d6dc2013-08-30 14:30:25 +03002224{
2225 /* how many WM levels are we expecting */
Damien Lespiaub6e742f2015-05-09 02:05:55 +01002226 if (INTEL_INFO(dev)->gen >= 9)
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002227 return 7;
2228 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjäläad0d6dc2013-08-30 14:30:25 +03002229 return 4;
2230 else if (INTEL_INFO(dev)->gen >= 6)
2231 return 3;
2232 else
2233 return 2;
2234}
Daniel Vetter7526ed72014-09-29 15:07:19 +02002235
Ville Syrjälä26ec9712013-08-01 16:18:52 +03002236static void intel_print_wm_latency(struct drm_device *dev,
2237 const char *name,
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002238 const uint16_t wm[8])
Ville Syrjälä26ec9712013-08-01 16:18:52 +03002239{
Ville Syrjäläad0d6dc2013-08-30 14:30:25 +03002240 int level, max_level = ilk_wm_max_level(dev);
Ville Syrjälä26ec9712013-08-01 16:18:52 +03002241
2242 for (level = 0; level <= max_level; level++) {
2243 unsigned int latency = wm[level];
2244
2245 if (latency == 0) {
2246 DRM_ERROR("%s WM%d latency not provided\n",
2247 name, level);
2248 continue;
2249 }
2250
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002251 /*
2252 * - latencies are in us on gen9.
2253 * - before then, WM1+ latency values are in 0.5us units
2254 */
2255 if (IS_GEN9(dev))
2256 latency *= 10;
2257 else if (level > 0)
Ville Syrjälä26ec9712013-08-01 16:18:52 +03002258 latency *= 5;
2259
2260 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2261 name, level, wm[level],
2262 latency / 10, latency % 10);
2263 }
2264}
2265
Ville Syrjäläe95a2f72014-05-08 15:09:19 +03002266static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2267 uint16_t wm[5], uint16_t min)
2268{
2269 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2270
2271 if (wm[0] >= min)
2272 return false;
2273
2274 wm[0] = max(wm[0], min);
2275 for (level = 1; level <= max_level; level++)
2276 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2277
2278 return true;
2279}
2280
2281static void snb_wm_latency_quirk(struct drm_device *dev)
2282{
2283 struct drm_i915_private *dev_priv = dev->dev_private;
2284 bool changed;
2285
2286 /*
2287 * The BIOS provided WM memory latency values are often
2288 * inadequate for high resolution displays. Adjust them.
2289 */
2290 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2291 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2292 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2293
2294 if (!changed)
2295 return;
2296
2297 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2298 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2299 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2300 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2301}
2302
Damien Lespiaufa50ad62014-03-17 18:01:16 +00002303static void ilk_setup_wm_latency(struct drm_device *dev)
Ville Syrjälä53615a52013-08-01 16:18:50 +03002304{
2305 struct drm_i915_private *dev_priv = dev->dev_private;
2306
2307 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2308
2309 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2310 sizeof(dev_priv->wm.pri_latency));
2311 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2312 sizeof(dev_priv->wm.pri_latency));
2313
2314 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2315 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
Ville Syrjälä26ec9712013-08-01 16:18:52 +03002316
2317 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2318 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2319 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
Ville Syrjäläe95a2f72014-05-08 15:09:19 +03002320
2321 if (IS_GEN6(dev))
2322 snb_wm_latency_quirk(dev);
Ville Syrjälä53615a52013-08-01 16:18:50 +03002323}
2324
Pradeep Bhat2af30a52014-11-04 17:06:38 +00002325static void skl_setup_wm_latency(struct drm_device *dev)
2326{
2327 struct drm_i915_private *dev_priv = dev->dev_private;
2328
2329 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2330 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2331}
2332
Imre Deak820c1982013-12-17 14:46:36 +02002333static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002334 struct ilk_pipe_wm_parameters *p)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002335{
Ville Syrjälä7c4a3952013-10-09 19:17:56 +03002336 struct drm_device *dev = crtc->dev;
2337 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2338 enum pipe pipe = intel_crtc->pipe;
Ville Syrjälä7c4a3952013-10-09 19:17:56 +03002339 struct drm_plane *plane;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002340
Matt Roper3ef00282015-03-09 10:19:24 -07002341 if (!intel_crtc->active)
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002342 return;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002343
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002344 p->active = true;
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002345 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03002346 p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config);
Matt Roperc9f038a2015-03-09 11:06:02 -07002347
Thomas Gummerer54da6912015-05-14 09:16:39 +02002348 if (crtc->primary->state->fb)
Matt Roperc9f038a2015-03-09 11:06:02 -07002349 p->pri.bytes_per_pixel =
2350 crtc->primary->state->fb->bits_per_pixel / 8;
Thomas Gummerer54da6912015-05-14 09:16:39 +02002351 else
2352 p->pri.bytes_per_pixel = 4;
Matt Roperc9f038a2015-03-09 11:06:02 -07002353
Thomas Gummerer54da6912015-05-14 09:16:39 +02002354 p->cur.bytes_per_pixel = 4;
2355 /*
2356 * TODO: for now, assume primary and cursor planes are always enabled.
2357 * Setting them to false makes the screen flicker.
2358 */
2359 p->pri.enabled = true;
2360 p->cur.enabled = true;
2361
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002362 p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
Matt Roper3dd512f2015-02-27 10:12:00 -08002363 p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
Ville Syrjälä7c4a3952013-10-09 19:17:56 +03002364
Matt Roperaf2b6532014-04-01 15:22:32 -07002365 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002366 struct intel_plane *intel_plane = to_intel_plane(plane);
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002367
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002368 if (intel_plane->pipe == pipe) {
Ville Syrjälä7c4a3952013-10-09 19:17:56 +03002369 p->spr = intel_plane->wm;
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002370 break;
2371 }
2372 }
2373}
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002374
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002375static void ilk_compute_wm_config(struct drm_device *dev,
2376 struct intel_wm_config *config)
2377{
2378 struct intel_crtc *intel_crtc;
2379
2380 /* Compute the currently _active_ config */
Damien Lespiaud3fcc802014-05-13 23:32:22 +01002381 for_each_intel_crtc(dev, intel_crtc) {
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002382 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2383
2384 if (!wm->pipe_enabled)
2385 continue;
2386
2387 config->sprites_enabled |= wm->sprites_enabled;
2388 config->sprites_scaled |= wm->sprites_scaled;
2389 config->num_pipes_active++;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002390 }
2391}
2392
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002393/* Compute new watermarks for the pipe */
2394static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
Imre Deak820c1982013-12-17 14:46:36 +02002395 const struct ilk_pipe_wm_parameters *params,
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002396 struct intel_pipe_wm *pipe_wm)
2397{
2398 struct drm_device *dev = crtc->dev;
Damien Lespiaud34ff9c2014-01-06 19:17:23 +00002399 const struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002400 int level, max_level = ilk_wm_max_level(dev);
2401 /* LP0 watermark maximums depend on this pipe alone */
2402 struct intel_wm_config config = {
2403 .num_pipes_active = 1,
2404 .sprites_enabled = params->spr.enabled,
2405 .sprites_scaled = params->spr.scaled,
2406 };
Imre Deak820c1982013-12-17 14:46:36 +02002407 struct ilk_wm_maximums max;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002408
Ville Syrjälä2a44b762014-03-07 18:32:09 +02002409 pipe_wm->pipe_enabled = params->active;
2410 pipe_wm->sprites_enabled = params->spr.enabled;
2411 pipe_wm->sprites_scaled = params->spr.scaled;
2412
Ville Syrjälä7b39a0b2013-12-05 15:51:30 +02002413 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2414 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2415 max_level = 1;
2416
2417 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2418 if (params->spr.scaled)
2419 max_level = 0;
2420
Ville Syrjäläa3cb4042014-04-28 15:44:56 +03002421 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002422
Ville Syrjäläa42a5712014-01-07 16:14:08 +02002423 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjäläce0e0712013-12-05 15:51:36 +02002424 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002425
Ville Syrjäläa3cb4042014-04-28 15:44:56 +03002426 /* LP0 watermarks always use 1/2 DDB partitioning */
2427 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2428
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002429 /* At least LP0 must be valid */
Ville Syrjäläa3cb4042014-04-28 15:44:56 +03002430 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2431 return false;
2432
2433 ilk_compute_wm_reg_maximums(dev, 1, &max);
2434
2435 for (level = 1; level <= max_level; level++) {
2436 struct intel_wm_level wm = {};
2437
2438 ilk_compute_wm_level(dev_priv, level, params, &wm);
2439
2440 /*
2441 * Disable any watermark level that exceeds the
2442 * register maximums since such watermarks are
2443 * always invalid.
2444 */
2445 if (!ilk_validate_wm_level(level, &max, &wm))
2446 break;
2447
2448 pipe_wm->wm[level] = wm;
2449 }
2450
2451 return true;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002452}
2453
2454/*
2455 * Merge the watermarks from all active pipes for a specific level.
2456 */
2457static void ilk_merge_wm_level(struct drm_device *dev,
2458 int level,
2459 struct intel_wm_level *ret_wm)
2460{
2461 const struct intel_crtc *intel_crtc;
2462
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002463 ret_wm->enable = true;
2464
Damien Lespiaud3fcc802014-05-13 23:32:22 +01002465 for_each_intel_crtc(dev, intel_crtc) {
Ville Syrjäläfe392ef2014-03-07 18:32:10 +02002466 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2467 const struct intel_wm_level *wm = &active->wm[level];
2468
2469 if (!active->pipe_enabled)
2470 continue;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002471
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002472 /*
2473 * The watermark values may have been used in the past,
2474 * so we must maintain them in the registers for some
2475 * time even if the level is now disabled.
2476 */
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002477 if (!wm->enable)
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002478 ret_wm->enable = false;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002479
2480 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2481 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2482 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2483 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2484 }
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002485}
2486
2487/*
2488 * Merge all low power watermarks for all active pipes.
2489 */
2490static void ilk_wm_merge(struct drm_device *dev,
Ville Syrjälä0ba22e22013-12-05 15:51:34 +02002491 const struct intel_wm_config *config,
Imre Deak820c1982013-12-17 14:46:36 +02002492 const struct ilk_wm_maximums *max,
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002493 struct intel_pipe_wm *merged)
2494{
Paulo Zanoni7733b492015-07-07 15:26:04 -03002495 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002496 int level, max_level = ilk_wm_max_level(dev);
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002497 int last_enabled_level = max_level;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002498
Ville Syrjälä0ba22e22013-12-05 15:51:34 +02002499 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2500 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2501 config->num_pipes_active > 1)
2502 return;
2503
Ville Syrjälä6c8b6c22013-12-05 15:51:35 +02002504 /* ILK: FBC WM must be disabled always */
2505 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002506
2507 /* merge each WM1+ level */
2508 for (level = 1; level <= max_level; level++) {
2509 struct intel_wm_level *wm = &merged->wm[level];
2510
2511 ilk_merge_wm_level(dev, level, wm);
2512
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002513 if (level > last_enabled_level)
2514 wm->enable = false;
2515 else if (!ilk_validate_wm_level(level, max, wm))
2516 /* make sure all following levels get disabled */
2517 last_enabled_level = level - 1;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002518
2519 /*
2520 * The spec says it is preferred to disable
2521 * FBC WMs instead of disabling a WM level.
2522 */
2523 if (wm->fbc_val > max->fbc) {
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002524 if (wm->enable)
2525 merged->fbc_wm_enabled = false;
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002526 wm->fbc_val = 0;
2527 }
2528 }
Ville Syrjälä6c8b6c22013-12-05 15:51:35 +02002529
2530 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2531 /*
2532 * FIXME this is racy. FBC might get enabled later.
2533 * What we should check here is whether FBC can be
2534 * enabled sometime later.
2535 */
Paulo Zanoni7733b492015-07-07 15:26:04 -03002536 if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
2537 intel_fbc_enabled(dev_priv)) {
Ville Syrjälä6c8b6c22013-12-05 15:51:35 +02002538 for (level = 2; level <= max_level; level++) {
2539 struct intel_wm_level *wm = &merged->wm[level];
2540
2541 wm->enable = false;
2542 }
2543 }
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002544}
2545
Ville Syrjäläb380ca32013-10-09 19:18:01 +03002546static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2547{
2548 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2549 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2550}
2551
Ville Syrjäläa68d68e2013-12-05 15:51:29 +02002552/* The value we need to program into the WM_LPx latency field */
2553static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2554{
2555 struct drm_i915_private *dev_priv = dev->dev_private;
2556
Ville Syrjäläa42a5712014-01-07 16:14:08 +02002557 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjäläa68d68e2013-12-05 15:51:29 +02002558 return 2 * level;
2559 else
2560 return dev_priv->wm.pri_latency[level];
2561}
2562
Imre Deak820c1982013-12-17 14:46:36 +02002563static void ilk_compute_wm_results(struct drm_device *dev,
Ville Syrjälä0362c782013-10-09 19:17:57 +03002564 const struct intel_pipe_wm *merged,
Ville Syrjälä609cede2013-10-09 19:18:03 +03002565 enum intel_ddb_partitioning partitioning,
Imre Deak820c1982013-12-17 14:46:36 +02002566 struct ilk_wm_values *results)
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002567{
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002568 struct intel_crtc *intel_crtc;
2569 int level, wm_lp;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002570
Ville Syrjälä0362c782013-10-09 19:17:57 +03002571 results->enable_fbc_wm = merged->fbc_wm_enabled;
Ville Syrjälä609cede2013-10-09 19:18:03 +03002572 results->partitioning = partitioning;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002573
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002574 /* LP1+ register values */
Paulo Zanonicca32e92013-05-31 11:45:06 -03002575 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
Ville Syrjälä1fd527c2013-08-06 22:24:05 +03002576 const struct intel_wm_level *r;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002577
Ville Syrjäläb380ca32013-10-09 19:18:01 +03002578 level = ilk_wm_lp_to_level(wm_lp, merged);
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002579
Ville Syrjälä0362c782013-10-09 19:17:57 +03002580 r = &merged->wm[level];
Paulo Zanonicca32e92013-05-31 11:45:06 -03002581
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002582 /*
2583 * Maintain the watermark values even if the level is
2584 * disabled. Doing otherwise could cause underruns.
2585 */
2586 results->wm_lp[wm_lp - 1] =
Ville Syrjäläa68d68e2013-12-05 15:51:29 +02002587 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
Ville Syrjälä416f4722013-11-02 21:07:46 -07002588 (r->pri_val << WM1_LP_SR_SHIFT) |
2589 r->cur_val;
2590
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002591 if (r->enable)
2592 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2593
Ville Syrjälä416f4722013-11-02 21:07:46 -07002594 if (INTEL_INFO(dev)->gen >= 8)
2595 results->wm_lp[wm_lp - 1] |=
2596 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2597 else
2598 results->wm_lp[wm_lp - 1] |=
2599 r->fbc_val << WM1_LP_FBC_SHIFT;
2600
Ville Syrjäläd52fea52014-04-28 15:44:57 +03002601 /*
2602 * Always set WM1S_LP_EN when spr_val != 0, even if the
2603 * level is disabled. Doing otherwise could cause underruns.
2604 */
Ville Syrjälä6cef2b8a2013-12-05 15:51:32 +02002605 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2606 WARN_ON(wm_lp != 1);
2607 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2608 } else
2609 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
Paulo Zanonicca32e92013-05-31 11:45:06 -03002610 }
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002611
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002612 /* LP0 register values */
Damien Lespiaud3fcc802014-05-13 23:32:22 +01002613 for_each_intel_crtc(dev, intel_crtc) {
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002614 enum pipe pipe = intel_crtc->pipe;
2615 const struct intel_wm_level *r =
2616 &intel_crtc->wm.active.wm[0];
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002617
Ville Syrjälä0b2ae6d2013-10-09 19:17:55 +03002618 if (WARN_ON(!r->enable))
2619 continue;
2620
2621 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2622
2623 results->wm_pipe[pipe] =
2624 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2625 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2626 r->cur_val;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002627 }
2628}
2629
Paulo Zanoni861f3382013-05-31 10:19:21 -03002630/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2631 * case both are at the same level. Prefer r1 in case they're the same. */
Imre Deak820c1982013-12-17 14:46:36 +02002632static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
Ville Syrjälä198a1e92013-10-09 19:17:58 +03002633 struct intel_pipe_wm *r1,
2634 struct intel_pipe_wm *r2)
Paulo Zanoni861f3382013-05-31 10:19:21 -03002635{
Ville Syrjälä198a1e92013-10-09 19:17:58 +03002636 int level, max_level = ilk_wm_max_level(dev);
2637 int level1 = 0, level2 = 0;
Paulo Zanoni861f3382013-05-31 10:19:21 -03002638
Ville Syrjälä198a1e92013-10-09 19:17:58 +03002639 for (level = 1; level <= max_level; level++) {
2640 if (r1->wm[level].enable)
2641 level1 = level;
2642 if (r2->wm[level].enable)
2643 level2 = level;
Paulo Zanoni861f3382013-05-31 10:19:21 -03002644 }
2645
Ville Syrjälä198a1e92013-10-09 19:17:58 +03002646 if (level1 == level2) {
2647 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
Paulo Zanoni861f3382013-05-31 10:19:21 -03002648 return r2;
2649 else
2650 return r1;
Ville Syrjälä198a1e92013-10-09 19:17:58 +03002651 } else if (level1 > level2) {
Paulo Zanoni861f3382013-05-31 10:19:21 -03002652 return r1;
2653 } else {
2654 return r2;
2655 }
2656}
2657
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002658/* dirty bits used to track which watermarks need changes */
2659#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2660#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2661#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2662#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2663#define WM_DIRTY_FBC (1 << 24)
2664#define WM_DIRTY_DDB (1 << 25)
2665
Damien Lespiau055e3932014-08-18 13:49:10 +01002666static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
Imre Deak820c1982013-12-17 14:46:36 +02002667 const struct ilk_wm_values *old,
2668 const struct ilk_wm_values *new)
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002669{
2670 unsigned int dirty = 0;
2671 enum pipe pipe;
2672 int wm_lp;
2673
Damien Lespiau055e3932014-08-18 13:49:10 +01002674 for_each_pipe(dev_priv, pipe) {
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002675 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2676 dirty |= WM_DIRTY_LINETIME(pipe);
2677 /* Must disable LP1+ watermarks too */
2678 dirty |= WM_DIRTY_LP_ALL;
2679 }
2680
2681 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2682 dirty |= WM_DIRTY_PIPE(pipe);
2683 /* Must disable LP1+ watermarks too */
2684 dirty |= WM_DIRTY_LP_ALL;
2685 }
2686 }
2687
2688 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2689 dirty |= WM_DIRTY_FBC;
2690 /* Must disable LP1+ watermarks too */
2691 dirty |= WM_DIRTY_LP_ALL;
2692 }
2693
2694 if (old->partitioning != new->partitioning) {
2695 dirty |= WM_DIRTY_DDB;
2696 /* Must disable LP1+ watermarks too */
2697 dirty |= WM_DIRTY_LP_ALL;
2698 }
2699
2700 /* LP1+ watermarks already deemed dirty, no need to continue */
2701 if (dirty & WM_DIRTY_LP_ALL)
2702 return dirty;
2703
2704 /* Find the lowest numbered LP1+ watermark in need of an update... */
2705 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2706 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2707 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2708 break;
2709 }
2710
2711 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2712 for (; wm_lp <= 3; wm_lp++)
2713 dirty |= WM_DIRTY_LP(wm_lp);
2714
2715 return dirty;
2716}
2717
Ville Syrjälä8553c182013-12-05 15:51:39 +02002718static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2719 unsigned int dirty)
2720{
Imre Deak820c1982013-12-17 14:46:36 +02002721 struct ilk_wm_values *previous = &dev_priv->wm.hw;
Ville Syrjälä8553c182013-12-05 15:51:39 +02002722 bool changed = false;
2723
2724 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2725 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2726 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2727 changed = true;
2728 }
2729 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2730 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2731 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2732 changed = true;
2733 }
2734 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2735 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2736 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2737 changed = true;
2738 }
2739
2740 /*
2741 * Don't touch WM1S_LP_EN here.
2742 * Doing so could cause underruns.
2743 */
2744
2745 return changed;
2746}
2747
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002748/*
2749 * The spec says we shouldn't write when we don't need, because every write
2750 * causes WMs to be re-evaluated, expending some power.
2751 */
Imre Deak820c1982013-12-17 14:46:36 +02002752static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2753 struct ilk_wm_values *results)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002754{
Ville Syrjäläac9545f2013-12-05 15:51:28 +02002755 struct drm_device *dev = dev_priv->dev;
Imre Deak820c1982013-12-17 14:46:36 +02002756 struct ilk_wm_values *previous = &dev_priv->wm.hw;
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002757 unsigned int dirty;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002758 uint32_t val;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002759
Damien Lespiau055e3932014-08-18 13:49:10 +01002760 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002761 if (!dirty)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002762 return;
2763
Ville Syrjälä8553c182013-12-05 15:51:39 +02002764 _ilk_disable_lp_wm(dev_priv, dirty);
Ville Syrjälä6cef2b8a2013-12-05 15:51:32 +02002765
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002766 if (dirty & WM_DIRTY_PIPE(PIPE_A))
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002767 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002768 if (dirty & WM_DIRTY_PIPE(PIPE_B))
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002769 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002770 if (dirty & WM_DIRTY_PIPE(PIPE_C))
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002771 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2772
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002773 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002774 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002775 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002776 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002777 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002778 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2779
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002780 if (dirty & WM_DIRTY_DDB) {
Ville Syrjäläa42a5712014-01-07 16:14:08 +02002781 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
Ville Syrjäläac9545f2013-12-05 15:51:28 +02002782 val = I915_READ(WM_MISC);
2783 if (results->partitioning == INTEL_DDB_PART_1_2)
2784 val &= ~WM_MISC_DATA_PARTITION_5_6;
2785 else
2786 val |= WM_MISC_DATA_PARTITION_5_6;
2787 I915_WRITE(WM_MISC, val);
2788 } else {
2789 val = I915_READ(DISP_ARB_CTL2);
2790 if (results->partitioning == INTEL_DDB_PART_1_2)
2791 val &= ~DISP_DATA_PARTITION_5_6;
2792 else
2793 val |= DISP_DATA_PARTITION_5_6;
2794 I915_WRITE(DISP_ARB_CTL2, val);
2795 }
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03002796 }
2797
Ville Syrjälä49a687c2013-10-11 19:39:52 +03002798 if (dirty & WM_DIRTY_FBC) {
Paulo Zanonicca32e92013-05-31 11:45:06 -03002799 val = I915_READ(DISP_ARB_CTL);
2800 if (results->enable_fbc_wm)
2801 val &= ~DISP_FBC_WM_DIS;
2802 else
2803 val |= DISP_FBC_WM_DIS;
2804 I915_WRITE(DISP_ARB_CTL, val);
2805 }
2806
Imre Deak954911e2013-12-17 14:46:34 +02002807 if (dirty & WM_DIRTY_LP(1) &&
2808 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2809 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2810
2811 if (INTEL_INFO(dev)->gen >= 7) {
Ville Syrjälä6cef2b8a2013-12-05 15:51:32 +02002812 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2813 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2814 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2815 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2816 }
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002817
Ville Syrjäläfacd6192013-12-05 15:51:33 +02002818 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002819 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
Ville Syrjäläfacd6192013-12-05 15:51:33 +02002820 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002821 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
Ville Syrjäläfacd6192013-12-05 15:51:33 +02002822 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002823 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
Ville Syrjälä609cede2013-10-09 19:18:03 +03002824
2825 dev_priv->wm.hw = *results;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03002826}
2827
Ville Syrjälä8553c182013-12-05 15:51:39 +02002828static bool ilk_disable_lp_wm(struct drm_device *dev)
2829{
2830 struct drm_i915_private *dev_priv = dev->dev_private;
2831
2832 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2833}
2834
Damien Lespiaub9cec072014-11-04 17:06:43 +00002835/*
2836 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2837 * different active planes.
2838 */
2839
2840#define SKL_DDB_SIZE 896 /* in blocks */
Damien Lespiau43d735a2015-03-17 11:39:34 +02002841#define BXT_DDB_SIZE 512
Damien Lespiaub9cec072014-11-04 17:06:43 +00002842
2843static void
2844skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2845 struct drm_crtc *for_crtc,
2846 const struct intel_wm_config *config,
2847 const struct skl_pipe_wm_parameters *params,
2848 struct skl_ddb_entry *alloc /* out */)
2849{
2850 struct drm_crtc *crtc;
2851 unsigned int pipe_size, ddb_size;
2852 int nth_active_pipe;
2853
2854 if (!params->active) {
2855 alloc->start = 0;
2856 alloc->end = 0;
2857 return;
2858 }
2859
Damien Lespiau43d735a2015-03-17 11:39:34 +02002860 if (IS_BROXTON(dev))
2861 ddb_size = BXT_DDB_SIZE;
2862 else
2863 ddb_size = SKL_DDB_SIZE;
Damien Lespiaub9cec072014-11-04 17:06:43 +00002864
2865 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2866
2867 nth_active_pipe = 0;
2868 for_each_crtc(dev, crtc) {
Matt Roper3ef00282015-03-09 10:19:24 -07002869 if (!to_intel_crtc(crtc)->active)
Damien Lespiaub9cec072014-11-04 17:06:43 +00002870 continue;
2871
2872 if (crtc == for_crtc)
2873 break;
2874
2875 nth_active_pipe++;
2876 }
2877
2878 pipe_size = ddb_size / config->num_pipes_active;
2879 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
Damien Lespiau16160e32014-11-04 17:06:53 +00002880 alloc->end = alloc->start + pipe_size;
Damien Lespiaub9cec072014-11-04 17:06:43 +00002881}
2882
2883static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
2884{
2885 if (config->num_pipes_active == 1)
2886 return 32;
2887
2888 return 8;
2889}
2890
Damien Lespiaua269c582014-11-04 17:06:49 +00002891static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
2892{
2893 entry->start = reg & 0x3ff;
2894 entry->end = (reg >> 16) & 0x3ff;
Damien Lespiau16160e32014-11-04 17:06:53 +00002895 if (entry->end)
2896 entry->end += 1;
Damien Lespiaua269c582014-11-04 17:06:49 +00002897}
2898
Damien Lespiau08db6652014-11-04 17:06:52 +00002899void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2900 struct skl_ddb_allocation *ddb /* out */)
Damien Lespiaua269c582014-11-04 17:06:49 +00002901{
Damien Lespiaua269c582014-11-04 17:06:49 +00002902 enum pipe pipe;
2903 int plane;
2904 u32 val;
2905
2906 for_each_pipe(dev_priv, pipe) {
Damien Lespiaudd740782015-02-28 14:54:08 +00002907 for_each_plane(dev_priv, pipe, plane) {
Damien Lespiaua269c582014-11-04 17:06:49 +00002908 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2909 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
2910 val);
2911 }
2912
2913 val = I915_READ(CUR_BUF_CFG(pipe));
2914 skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
2915 }
2916}
2917
Damien Lespiaub9cec072014-11-04 17:06:43 +00002918static unsigned int
Chandra Konduru2cd601c2015-04-27 15:47:37 -07002919skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y)
Damien Lespiaub9cec072014-11-04 17:06:43 +00002920{
Chandra Konduru2cd601c2015-04-27 15:47:37 -07002921
2922 /* for planar format */
2923 if (p->y_bytes_per_pixel) {
2924 if (y) /* y-plane data rate */
2925 return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel;
2926 else /* uv-plane data rate */
2927 return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel;
2928 }
2929
2930 /* for packed formats */
Damien Lespiaub9cec072014-11-04 17:06:43 +00002931 return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
2932}
2933
2934/*
2935 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
2936 * a 8192x4096@32bpp framebuffer:
2937 * 3 * 4096 * 8192 * 4 < 2^32
2938 */
2939static unsigned int
2940skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
2941 const struct skl_pipe_wm_parameters *params)
2942{
2943 unsigned int total_data_rate = 0;
2944 int plane;
2945
2946 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
2947 const struct intel_plane_wm_parameters *p;
2948
2949 p = &params->plane[plane];
2950 if (!p->enabled)
2951 continue;
2952
Chandra Konduru2cd601c2015-04-27 15:47:37 -07002953 total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */
2954 if (p->y_bytes_per_pixel) {
2955 total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */
2956 }
Damien Lespiaub9cec072014-11-04 17:06:43 +00002957 }
2958
2959 return total_data_rate;
2960}
2961
2962static void
2963skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2964 const struct intel_wm_config *config,
2965 const struct skl_pipe_wm_parameters *params,
2966 struct skl_ddb_allocation *ddb /* out */)
2967{
2968 struct drm_device *dev = crtc->dev;
Damien Lespiaudd740782015-02-28 14:54:08 +00002969 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiaub9cec072014-11-04 17:06:43 +00002970 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2971 enum pipe pipe = intel_crtc->pipe;
Damien Lespiau34bb56a2014-11-04 17:07:01 +00002972 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
Damien Lespiaub9cec072014-11-04 17:06:43 +00002973 uint16_t alloc_size, start, cursor_blocks;
Damien Lespiau80958152015-02-09 13:35:10 +00002974 uint16_t minimum[I915_MAX_PLANES];
Chandra Konduru2cd601c2015-04-27 15:47:37 -07002975 uint16_t y_minimum[I915_MAX_PLANES];
Damien Lespiaub9cec072014-11-04 17:06:43 +00002976 unsigned int total_data_rate;
2977 int plane;
2978
Damien Lespiau34bb56a2014-11-04 17:07:01 +00002979 skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
2980 alloc_size = skl_ddb_entry_size(alloc);
Damien Lespiaub9cec072014-11-04 17:06:43 +00002981 if (alloc_size == 0) {
2982 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
2983 memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
2984 return;
2985 }
2986
2987 cursor_blocks = skl_cursor_allocation(config);
Damien Lespiau34bb56a2014-11-04 17:07:01 +00002988 ddb->cursor[pipe].start = alloc->end - cursor_blocks;
2989 ddb->cursor[pipe].end = alloc->end;
Damien Lespiaub9cec072014-11-04 17:06:43 +00002990
2991 alloc_size -= cursor_blocks;
Damien Lespiau34bb56a2014-11-04 17:07:01 +00002992 alloc->end -= cursor_blocks;
Damien Lespiaub9cec072014-11-04 17:06:43 +00002993
Damien Lespiau80958152015-02-09 13:35:10 +00002994 /* 1. Allocate the mininum required blocks for each active plane */
Damien Lespiaudd740782015-02-28 14:54:08 +00002995 for_each_plane(dev_priv, pipe, plane) {
Damien Lespiau80958152015-02-09 13:35:10 +00002996 const struct intel_plane_wm_parameters *p;
2997
2998 p = &params->plane[plane];
2999 if (!p->enabled)
3000 continue;
3001
3002 minimum[plane] = 8;
3003 alloc_size -= minimum[plane];
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003004 y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0;
3005 alloc_size -= y_minimum[plane];
Damien Lespiau80958152015-02-09 13:35:10 +00003006 }
3007
Damien Lespiaub9cec072014-11-04 17:06:43 +00003008 /*
Damien Lespiau80958152015-02-09 13:35:10 +00003009 * 2. Distribute the remaining space in proportion to the amount of
3010 * data each plane needs to fetch from memory.
Damien Lespiaub9cec072014-11-04 17:06:43 +00003011 *
3012 * FIXME: we may not allocate every single block here.
3013 */
3014 total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
3015
Damien Lespiau34bb56a2014-11-04 17:07:01 +00003016 start = alloc->start;
Damien Lespiaub9cec072014-11-04 17:06:43 +00003017 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
3018 const struct intel_plane_wm_parameters *p;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003019 unsigned int data_rate, y_data_rate;
3020 uint16_t plane_blocks, y_plane_blocks = 0;
Damien Lespiaub9cec072014-11-04 17:06:43 +00003021
3022 p = &params->plane[plane];
3023 if (!p->enabled)
3024 continue;
3025
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003026 data_rate = skl_plane_relative_data_rate(p, 0);
Damien Lespiaub9cec072014-11-04 17:06:43 +00003027
3028 /*
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003029 * allocation for (packed formats) or (uv-plane part of planar format):
Damien Lespiaub9cec072014-11-04 17:06:43 +00003030 * promote the expression to 64 bits to avoid overflowing, the
3031 * result is < available as data_rate / total_data_rate < 1
3032 */
Damien Lespiau80958152015-02-09 13:35:10 +00003033 plane_blocks = minimum[plane];
3034 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3035 total_data_rate);
Damien Lespiaub9cec072014-11-04 17:06:43 +00003036
3037 ddb->plane[pipe][plane].start = start;
Damien Lespiau16160e32014-11-04 17:06:53 +00003038 ddb->plane[pipe][plane].end = start + plane_blocks;
Damien Lespiaub9cec072014-11-04 17:06:43 +00003039
3040 start += plane_blocks;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003041
3042 /*
3043 * allocation for y_plane part of planar format:
3044 */
3045 if (p->y_bytes_per_pixel) {
3046 y_data_rate = skl_plane_relative_data_rate(p, 1);
3047 y_plane_blocks = y_minimum[plane];
3048 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3049 total_data_rate);
3050
3051 ddb->y_plane[pipe][plane].start = start;
3052 ddb->y_plane[pipe][plane].end = start + y_plane_blocks;
3053
3054 start += y_plane_blocks;
3055 }
3056
Damien Lespiaub9cec072014-11-04 17:06:43 +00003057 }
3058
3059}
3060
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02003061static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003062{
3063 /* TODO: Take into account the scalers once we support them */
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02003064 return config->base.adjusted_mode.crtc_clock;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003065}
3066
3067/*
3068 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3069 * for the read latency) and bytes_per_pixel should always be <= 8, so that
3070 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3071 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3072*/
3073static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
3074 uint32_t latency)
3075{
3076 uint32_t wm_intermediate_val, ret;
3077
3078 if (latency == 0)
3079 return UINT_MAX;
3080
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003081 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003082 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3083
3084 return ret;
3085}
3086
3087static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3088 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003089 uint64_t tiling, uint32_t latency)
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003090{
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003091 uint32_t ret;
3092 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3093 uint32_t wm_intermediate_val;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003094
3095 if (latency == 0)
3096 return UINT_MAX;
3097
3098 plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003099
3100 if (tiling == I915_FORMAT_MOD_Y_TILED ||
3101 tiling == I915_FORMAT_MOD_Yf_TILED) {
3102 plane_bytes_per_line *= 4;
3103 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3104 plane_blocks_per_line /= 4;
3105 } else {
3106 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3107 }
3108
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003109 wm_intermediate_val = latency * pixel_rate;
3110 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003111 plane_blocks_per_line;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003112
3113 return ret;
3114}
3115
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003116static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
3117 const struct intel_crtc *intel_crtc)
3118{
3119 struct drm_device *dev = intel_crtc->base.dev;
3120 struct drm_i915_private *dev_priv = dev->dev_private;
3121 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3122 enum pipe pipe = intel_crtc->pipe;
3123
3124 if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
3125 sizeof(new_ddb->plane[pipe])))
3126 return true;
3127
3128 if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
3129 sizeof(new_ddb->cursor[pipe])))
3130 return true;
3131
3132 return false;
3133}
3134
3135static void skl_compute_wm_global_parameters(struct drm_device *dev,
3136 struct intel_wm_config *config)
3137{
3138 struct drm_crtc *crtc;
3139 struct drm_plane *plane;
3140
3141 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
Matt Roper3ef00282015-03-09 10:19:24 -07003142 config->num_pipes_active += to_intel_crtc(crtc)->active;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003143
3144 /* FIXME: I don't think we need those two global parameters on SKL */
3145 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3146 struct intel_plane *intel_plane = to_intel_plane(plane);
3147
3148 config->sprites_enabled |= intel_plane->wm.enabled;
3149 config->sprites_scaled |= intel_plane->wm.scaled;
3150 }
3151}
3152
3153static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
3154 struct skl_pipe_wm_parameters *p)
3155{
3156 struct drm_device *dev = crtc->dev;
3157 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3158 enum pipe pipe = intel_crtc->pipe;
3159 struct drm_plane *plane;
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003160 struct drm_framebuffer *fb;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003161 int i = 1; /* Index for sprite planes start */
3162
Matt Roper3ef00282015-03-09 10:19:24 -07003163 p->active = intel_crtc->active;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003164 if (p->active) {
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02003165 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
3166 p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003167
Matt Roperc9f038a2015-03-09 11:06:02 -07003168 fb = crtc->primary->state->fb;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003169 /* For planar: Bpp is for uv plane, y_Bpp is for y plane */
Matt Roperc9f038a2015-03-09 11:06:02 -07003170 if (fb) {
3171 p->plane[0].enabled = true;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003172 p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
3173 drm_format_plane_cpp(fb->pixel_format, 1) : fb->bits_per_pixel / 8;
3174 p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
3175 drm_format_plane_cpp(fb->pixel_format, 0) : 0;
Matt Roperc9f038a2015-03-09 11:06:02 -07003176 p->plane[0].tiling = fb->modifier[0];
3177 } else {
3178 p->plane[0].enabled = false;
3179 p->plane[0].bytes_per_pixel = 0;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003180 p->plane[0].y_bytes_per_pixel = 0;
Matt Roperc9f038a2015-03-09 11:06:02 -07003181 p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
3182 }
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02003183 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
3184 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h;
Tvrtko Ursulin1fc0a8f2015-03-23 11:10:38 +00003185 p->plane[0].rotation = crtc->primary->state->rotation;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003186
Matt Roperc9f038a2015-03-09 11:06:02 -07003187 fb = crtc->cursor->state->fb;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003188 p->cursor.y_bytes_per_pixel = 0;
Matt Roperc9f038a2015-03-09 11:06:02 -07003189 if (fb) {
3190 p->cursor.enabled = true;
3191 p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8;
3192 p->cursor.horiz_pixels = crtc->cursor->state->crtc_w;
3193 p->cursor.vert_pixels = crtc->cursor->state->crtc_h;
3194 } else {
3195 p->cursor.enabled = false;
3196 p->cursor.bytes_per_pixel = 0;
3197 p->cursor.horiz_pixels = 64;
3198 p->cursor.vert_pixels = 64;
3199 }
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003200 }
3201
3202 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3203 struct intel_plane *intel_plane = to_intel_plane(plane);
3204
Sonika Jindala712f8e2014-12-09 10:59:15 +05303205 if (intel_plane->pipe == pipe &&
3206 plane->type == DRM_PLANE_TYPE_OVERLAY)
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003207 p->plane[i++] = intel_plane->wm;
3208 }
3209}
3210
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003211static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3212 struct skl_pipe_wm_parameters *p,
Damien Lespiauafb024a2014-11-04 17:06:59 +00003213 struct intel_plane_wm_parameters *p_params,
3214 uint16_t ddb_allocation,
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003215 int level,
Damien Lespiauafb024a2014-11-04 17:06:59 +00003216 uint16_t *out_blocks, /* out */
3217 uint8_t *out_lines /* out */)
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003218{
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003219 uint32_t latency = dev_priv->wm.skl_latency[level];
3220 uint32_t method1, method2;
3221 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3222 uint32_t res_blocks, res_lines;
3223 uint32_t selected_result;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003224 uint8_t bytes_per_pixel;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003225
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003226 if (latency == 0 || !p->active || !p_params->enabled)
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003227 return false;
3228
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003229 bytes_per_pixel = p_params->y_bytes_per_pixel ?
3230 p_params->y_bytes_per_pixel :
3231 p_params->bytes_per_pixel;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003232 method1 = skl_wm_method1(p->pixel_rate,
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003233 bytes_per_pixel,
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003234 latency);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003235 method2 = skl_wm_method2(p->pixel_rate,
3236 p->pipe_htotal,
3237 p_params->horiz_pixels,
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003238 bytes_per_pixel,
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003239 p_params->tiling,
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003240 latency);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003241
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003242 plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel;
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003243 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003244
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003245 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
3246 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) {
Tvrtko Ursulin1fc0a8f2015-03-23 11:10:38 +00003247 uint32_t min_scanlines = 4;
3248 uint32_t y_tile_minimum;
3249 if (intel_rotation_90_or_270(p_params->rotation)) {
3250 switch (p_params->bytes_per_pixel) {
3251 case 1:
3252 min_scanlines = 16;
3253 break;
3254 case 2:
3255 min_scanlines = 8;
3256 break;
3257 case 8:
3258 WARN(1, "Unsupported pixel depth for rotation");
kbuild test robot2f0b5792015-03-26 22:30:21 +08003259 }
Tvrtko Ursulin1fc0a8f2015-03-23 11:10:38 +00003260 }
3261 y_tile_minimum = plane_blocks_per_line * min_scanlines;
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003262 selected_result = max(method2, y_tile_minimum);
3263 } else {
3264 if ((ddb_allocation / plane_blocks_per_line) >= 1)
3265 selected_result = min(method1, method2);
3266 else
3267 selected_result = method1;
3268 }
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003269
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003270 res_blocks = selected_result + 1;
3271 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
Damien Lespiaue6d66172014-11-04 17:06:55 +00003272
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003273 if (level >= 1 && level <= 7) {
3274 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
3275 p_params->tiling == I915_FORMAT_MOD_Yf_TILED)
3276 res_lines += 4;
3277 else
3278 res_blocks++;
3279 }
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003280
3281 if (res_blocks >= ddb_allocation || res_lines > 31)
Damien Lespiaue6d66172014-11-04 17:06:55 +00003282 return false;
3283
3284 *out_blocks = res_blocks;
3285 *out_lines = res_lines;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003286
3287 return true;
3288}
3289
3290static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3291 struct skl_ddb_allocation *ddb,
3292 struct skl_pipe_wm_parameters *p,
3293 enum pipe pipe,
3294 int level,
3295 int num_planes,
3296 struct skl_wm_level *result)
3297{
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003298 uint16_t ddb_blocks;
3299 int i;
3300
3301 for (i = 0; i < num_planes; i++) {
3302 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3303
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003304 result->plane_en[i] = skl_compute_plane_wm(dev_priv,
3305 p, &p->plane[i],
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003306 ddb_blocks,
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003307 level,
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003308 &result->plane_res_b[i],
3309 &result->plane_res_l[i]);
3310 }
3311
3312 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
Tvrtko Ursulind4c2aa62015-02-27 11:15:22 +00003313 result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor,
3314 ddb_blocks, level,
3315 &result->cursor_res_b,
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003316 &result->cursor_res_l);
3317}
3318
Damien Lespiau407b50f2014-11-04 17:06:57 +00003319static uint32_t
3320skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
3321{
Matt Roper3ef00282015-03-09 10:19:24 -07003322 if (!to_intel_crtc(crtc)->active)
Damien Lespiau407b50f2014-11-04 17:06:57 +00003323 return 0;
3324
Mika Kuoppala661abfc2015-07-16 19:36:51 +03003325 if (WARN_ON(p->pixel_rate == 0))
3326 return 0;
Damien Lespiau407b50f2014-11-04 17:06:57 +00003327
Mika Kuoppala661abfc2015-07-16 19:36:51 +03003328 return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
Damien Lespiau407b50f2014-11-04 17:06:57 +00003329}
3330
3331static void skl_compute_transition_wm(struct drm_crtc *crtc,
3332 struct skl_pipe_wm_parameters *params,
Damien Lespiau9414f562014-11-04 17:06:58 +00003333 struct skl_wm_level *trans_wm /* out */)
Damien Lespiau407b50f2014-11-04 17:06:57 +00003334{
Damien Lespiau9414f562014-11-04 17:06:58 +00003335 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3336 int i;
3337
Damien Lespiau407b50f2014-11-04 17:06:57 +00003338 if (!params->active)
3339 return;
Damien Lespiau9414f562014-11-04 17:06:58 +00003340
3341 /* Until we know more, just disable transition WMs */
3342 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3343 trans_wm->plane_en[i] = false;
3344 trans_wm->cursor_en = false;
Damien Lespiau407b50f2014-11-04 17:06:57 +00003345}
3346
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003347static void skl_compute_pipe_wm(struct drm_crtc *crtc,
3348 struct skl_ddb_allocation *ddb,
3349 struct skl_pipe_wm_parameters *params,
3350 struct skl_pipe_wm *pipe_wm)
3351{
3352 struct drm_device *dev = crtc->dev;
3353 const struct drm_i915_private *dev_priv = dev->dev_private;
3354 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3355 int level, max_level = ilk_wm_max_level(dev);
3356
3357 for (level = 0; level <= max_level; level++) {
3358 skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
3359 level, intel_num_planes(intel_crtc),
3360 &pipe_wm->wm[level]);
3361 }
3362 pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
3363
Damien Lespiau9414f562014-11-04 17:06:58 +00003364 skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003365}
3366
3367static void skl_compute_wm_results(struct drm_device *dev,
3368 struct skl_pipe_wm_parameters *p,
3369 struct skl_pipe_wm *p_wm,
3370 struct skl_wm_values *r,
3371 struct intel_crtc *intel_crtc)
3372{
3373 int level, max_level = ilk_wm_max_level(dev);
3374 enum pipe pipe = intel_crtc->pipe;
Damien Lespiau9414f562014-11-04 17:06:58 +00003375 uint32_t temp;
3376 int i;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003377
3378 for (level = 0; level <= max_level; level++) {
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003379 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3380 temp = 0;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003381
3382 temp |= p_wm->wm[level].plane_res_l[i] <<
3383 PLANE_WM_LINES_SHIFT;
3384 temp |= p_wm->wm[level].plane_res_b[i];
3385 if (p_wm->wm[level].plane_en[i])
3386 temp |= PLANE_WM_EN;
3387
3388 r->plane[pipe][i][level] = temp;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003389 }
3390
3391 temp = 0;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003392
3393 temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
3394 temp |= p_wm->wm[level].cursor_res_b;
3395
3396 if (p_wm->wm[level].cursor_en)
3397 temp |= PLANE_WM_EN;
3398
3399 r->cursor[pipe][level] = temp;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003400
3401 }
3402
Damien Lespiau9414f562014-11-04 17:06:58 +00003403 /* transition WMs */
3404 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3405 temp = 0;
3406 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3407 temp |= p_wm->trans_wm.plane_res_b[i];
3408 if (p_wm->trans_wm.plane_en[i])
3409 temp |= PLANE_WM_EN;
3410
3411 r->plane_trans[pipe][i] = temp;
3412 }
3413
3414 temp = 0;
3415 temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
3416 temp |= p_wm->trans_wm.cursor_res_b;
3417 if (p_wm->trans_wm.cursor_en)
3418 temp |= PLANE_WM_EN;
3419
3420 r->cursor_trans[pipe] = temp;
3421
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003422 r->wm_linetime[pipe] = p_wm->linetime;
3423}
3424
Damien Lespiau16160e32014-11-04 17:06:53 +00003425static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
3426 const struct skl_ddb_entry *entry)
3427{
3428 if (entry->end)
3429 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3430 else
3431 I915_WRITE(reg, 0);
3432}
3433
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003434static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3435 const struct skl_wm_values *new)
3436{
3437 struct drm_device *dev = dev_priv->dev;
3438 struct intel_crtc *crtc;
3439
3440 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
3441 int i, level, max_level = ilk_wm_max_level(dev);
3442 enum pipe pipe = crtc->pipe;
3443
Damien Lespiau5d374d92014-11-04 17:07:00 +00003444 if (!new->dirty[pipe])
3445 continue;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003446
Damien Lespiau5d374d92014-11-04 17:07:00 +00003447 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
3448
3449 for (level = 0; level <= max_level; level++) {
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003450 for (i = 0; i < intel_num_planes(crtc); i++)
Damien Lespiau5d374d92014-11-04 17:07:00 +00003451 I915_WRITE(PLANE_WM(pipe, i, level),
3452 new->plane[pipe][i][level]);
3453 I915_WRITE(CUR_WM(pipe, level),
3454 new->cursor[pipe][level]);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003455 }
Damien Lespiau5d374d92014-11-04 17:07:00 +00003456 for (i = 0; i < intel_num_planes(crtc); i++)
3457 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3458 new->plane_trans[pipe][i]);
3459 I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
3460
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003461 for (i = 0; i < intel_num_planes(crtc); i++) {
Damien Lespiau5d374d92014-11-04 17:07:00 +00003462 skl_ddb_entry_write(dev_priv,
3463 PLANE_BUF_CFG(pipe, i),
3464 &new->ddb.plane[pipe][i]);
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003465 skl_ddb_entry_write(dev_priv,
3466 PLANE_NV12_BUF_CFG(pipe, i),
3467 &new->ddb.y_plane[pipe][i]);
3468 }
Damien Lespiau5d374d92014-11-04 17:07:00 +00003469
3470 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3471 &new->ddb.cursor[pipe]);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003472 }
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003473}
3474
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003475/*
3476 * When setting up a new DDB allocation arrangement, we need to correctly
3477 * sequence the times at which the new allocations for the pipes are taken into
3478 * account or we'll have pipes fetching from space previously allocated to
3479 * another pipe.
3480 *
3481 * Roughly the sequence looks like:
3482 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3483 * overlapping with a previous light-up pipe (another way to put it is:
3484 * pipes with their new allocation strickly included into their old ones).
3485 * 2. re-allocate the other pipes that get their allocation reduced
3486 * 3. allocate the pipes having their allocation increased
3487 *
3488 * Steps 1. and 2. are here to take care of the following case:
3489 * - Initially DDB looks like this:
3490 * | B | C |
3491 * - enable pipe A.
3492 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3493 * allocation
3494 * | A | B | C |
3495 *
3496 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3497 */
3498
Damien Lespiaud21b7952014-11-04 17:07:03 +00003499static void
3500skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003501{
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003502 int plane;
3503
Damien Lespiaud21b7952014-11-04 17:07:03 +00003504 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
3505
Damien Lespiaudd740782015-02-28 14:54:08 +00003506 for_each_plane(dev_priv, pipe, plane) {
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003507 I915_WRITE(PLANE_SURF(pipe, plane),
3508 I915_READ(PLANE_SURF(pipe, plane)));
3509 }
3510 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3511}
3512
3513static bool
3514skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3515 const struct skl_ddb_allocation *new,
3516 enum pipe pipe)
3517{
3518 uint16_t old_size, new_size;
3519
3520 old_size = skl_ddb_entry_size(&old->pipe[pipe]);
3521 new_size = skl_ddb_entry_size(&new->pipe[pipe]);
3522
3523 return old_size != new_size &&
3524 new->pipe[pipe].start >= old->pipe[pipe].start &&
3525 new->pipe[pipe].end <= old->pipe[pipe].end;
3526}
3527
3528static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3529 struct skl_wm_values *new_values)
3530{
3531 struct drm_device *dev = dev_priv->dev;
3532 struct skl_ddb_allocation *cur_ddb, *new_ddb;
Ville Syrjäläc929cb42015-04-02 18:28:07 +03003533 bool reallocated[I915_MAX_PIPES] = {};
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003534 struct intel_crtc *crtc;
3535 enum pipe pipe;
3536
3537 new_ddb = &new_values->ddb;
3538 cur_ddb = &dev_priv->wm.skl_hw.ddb;
3539
3540 /*
3541 * First pass: flush the pipes with the new allocation contained into
3542 * the old space.
3543 *
3544 * We'll wait for the vblank on those pipes to ensure we can safely
3545 * re-allocate the freed space without this pipe fetching from it.
3546 */
3547 for_each_intel_crtc(dev, crtc) {
3548 if (!crtc->active)
3549 continue;
3550
3551 pipe = crtc->pipe;
3552
3553 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
3554 continue;
3555
Damien Lespiaud21b7952014-11-04 17:07:03 +00003556 skl_wm_flush_pipe(dev_priv, pipe, 1);
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003557 intel_wait_for_vblank(dev, pipe);
3558
3559 reallocated[pipe] = true;
3560 }
3561
3562
3563 /*
3564 * Second pass: flush the pipes that are having their allocation
3565 * reduced, but overlapping with a previous allocation.
3566 *
3567 * Here as well we need to wait for the vblank to make sure the freed
3568 * space is not used anymore.
3569 */
3570 for_each_intel_crtc(dev, crtc) {
3571 if (!crtc->active)
3572 continue;
3573
3574 pipe = crtc->pipe;
3575
3576 if (reallocated[pipe])
3577 continue;
3578
3579 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3580 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
Damien Lespiaud21b7952014-11-04 17:07:03 +00003581 skl_wm_flush_pipe(dev_priv, pipe, 2);
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003582 intel_wait_for_vblank(dev, pipe);
Sonika Jindald9d8e6b2014-12-11 17:58:15 +05303583 reallocated[pipe] = true;
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003584 }
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003585 }
3586
3587 /*
3588 * Third pass: flush the pipes that got more space allocated.
3589 *
3590 * We don't need to actively wait for the update here, next vblank
3591 * will just get more DDB space with the correct WM values.
3592 */
3593 for_each_intel_crtc(dev, crtc) {
3594 if (!crtc->active)
3595 continue;
3596
3597 pipe = crtc->pipe;
3598
3599 /*
3600 * At this point, only the pipes more space than before are
3601 * left to re-allocate.
3602 */
3603 if (reallocated[pipe])
3604 continue;
3605
Damien Lespiaud21b7952014-11-04 17:07:03 +00003606 skl_wm_flush_pipe(dev_priv, pipe, 3);
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003607 }
3608}
3609
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003610static bool skl_update_pipe_wm(struct drm_crtc *crtc,
3611 struct skl_pipe_wm_parameters *params,
3612 struct intel_wm_config *config,
3613 struct skl_ddb_allocation *ddb, /* out */
3614 struct skl_pipe_wm *pipe_wm /* out */)
3615{
3616 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3617
3618 skl_compute_wm_pipe_parameters(crtc, params);
Damien Lespiaub9cec072014-11-04 17:06:43 +00003619 skl_allocate_pipe_ddb(crtc, config, params, ddb);
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003620 skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
3621
3622 if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
3623 return false;
3624
3625 intel_crtc->wm.skl_active = *pipe_wm;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003626
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003627 return true;
3628}
3629
3630static void skl_update_other_pipe_wm(struct drm_device *dev,
3631 struct drm_crtc *crtc,
3632 struct intel_wm_config *config,
3633 struct skl_wm_values *r)
3634{
3635 struct intel_crtc *intel_crtc;
3636 struct intel_crtc *this_crtc = to_intel_crtc(crtc);
3637
3638 /*
3639 * If the WM update hasn't changed the allocation for this_crtc (the
3640 * crtc we are currently computing the new WM values for), other
3641 * enabled crtcs will keep the same allocation and we don't need to
3642 * recompute anything for them.
3643 */
3644 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
3645 return;
3646
3647 /*
3648 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3649 * other active pipes need new DDB allocation and WM values.
3650 */
3651 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
3652 base.head) {
3653 struct skl_pipe_wm_parameters params = {};
3654 struct skl_pipe_wm pipe_wm = {};
3655 bool wm_changed;
3656
3657 if (this_crtc->pipe == intel_crtc->pipe)
3658 continue;
3659
3660 if (!intel_crtc->active)
3661 continue;
3662
3663 wm_changed = skl_update_pipe_wm(&intel_crtc->base,
3664 &params, config,
3665 &r->ddb, &pipe_wm);
3666
3667 /*
3668 * If we end up re-computing the other pipe WM values, it's
3669 * because it was really needed, so we expect the WM values to
3670 * be different.
3671 */
3672 WARN_ON(!wm_changed);
3673
3674 skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc);
3675 r->dirty[intel_crtc->pipe] = true;
3676 }
3677}
3678
3679static void skl_update_wm(struct drm_crtc *crtc)
3680{
3681 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3682 struct drm_device *dev = crtc->dev;
3683 struct drm_i915_private *dev_priv = dev->dev_private;
3684 struct skl_pipe_wm_parameters params = {};
3685 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3686 struct skl_pipe_wm pipe_wm = {};
3687 struct intel_wm_config config = {};
3688
3689 memset(results, 0, sizeof(*results));
3690
3691 skl_compute_wm_global_parameters(dev, &config);
3692
3693 if (!skl_update_pipe_wm(crtc, &params, &config,
3694 &results->ddb, &pipe_wm))
3695 return;
3696
3697 skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc);
3698 results->dirty[intel_crtc->pipe] = true;
3699
3700 skl_update_other_pipe_wm(dev, crtc, &config, results);
3701 skl_write_wm_values(dev_priv, results);
Damien Lespiau0e8fb7b2014-11-04 17:07:02 +00003702 skl_flush_wm_values(dev_priv, results);
Damien Lespiau53b0deb2014-11-04 17:06:48 +00003703
3704 /* store the new configuration */
3705 dev_priv->wm.skl_hw = *results;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003706}
3707
3708static void
3709skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3710 uint32_t sprite_width, uint32_t sprite_height,
3711 int pixel_size, bool enabled, bool scaled)
3712{
3713 struct intel_plane *intel_plane = to_intel_plane(plane);
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003714 struct drm_framebuffer *fb = plane->state->fb;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003715
3716 intel_plane->wm.enabled = enabled;
3717 intel_plane->wm.scaled = scaled;
3718 intel_plane->wm.horiz_pixels = sprite_width;
3719 intel_plane->wm.vert_pixels = sprite_height;
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003720 intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
Chandra Konduru2cd601c2015-04-27 15:47:37 -07003721
3722 /* For planar: Bpp is for UV plane, y_Bpp is for Y plane */
3723 intel_plane->wm.bytes_per_pixel =
3724 (fb && fb->pixel_format == DRM_FORMAT_NV12) ?
3725 drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size;
3726 intel_plane->wm.y_bytes_per_pixel =
3727 (fb && fb->pixel_format == DRM_FORMAT_NV12) ?
3728 drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0;
3729
Tvrtko Ursulin0fda6562015-02-27 15:12:35 +00003730 /*
3731 * Framebuffer can be NULL on plane disable, but it does not
3732 * matter for watermarks if we assume no tiling in that case.
3733 */
3734 if (fb)
3735 intel_plane->wm.tiling = fb->modifier[0];
Tvrtko Ursulin1fc0a8f2015-03-23 11:10:38 +00003736 intel_plane->wm.rotation = plane->state->rotation;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00003737
3738 skl_update_wm(crtc);
3739}
3740
Imre Deak820c1982013-12-17 14:46:36 +02003741static void ilk_update_wm(struct drm_crtc *crtc)
Paulo Zanoni801bcff2013-05-31 10:08:35 -03003742{
Ville Syrjälä7c4a3952013-10-09 19:17:56 +03003743 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjälä46ba6142013-09-10 11:40:40 +03003744 struct drm_device *dev = crtc->dev;
Paulo Zanoni801bcff2013-05-31 10:08:35 -03003745 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak820c1982013-12-17 14:46:36 +02003746 struct ilk_wm_maximums max;
3747 struct ilk_pipe_wm_parameters params = {};
3748 struct ilk_wm_values results = {};
Ville Syrjälä77c122b2013-08-06 22:24:04 +03003749 enum intel_ddb_partitioning partitioning;
Ville Syrjälä7c4a3952013-10-09 19:17:56 +03003750 struct intel_pipe_wm pipe_wm = {};
Ville Syrjälä198a1e92013-10-09 19:17:58 +03003751 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
Ville Syrjäläa485bfb2013-10-09 19:17:59 +03003752 struct intel_wm_config config = {};
Paulo Zanoni801bcff2013-05-31 10:08:35 -03003753
Ville Syrjälä2a44b762014-03-07 18:32:09 +02003754 ilk_compute_wm_parameters(crtc, &params);
Paulo Zanoni861f3382013-05-31 10:19:21 -03003755
Ville Syrjälä7c4a3952013-10-09 19:17:56 +03003756 intel_compute_pipe_wm(crtc, &params, &pipe_wm);
3757
3758 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
3759 return;
3760
3761 intel_crtc->wm.active = pipe_wm;
3762
Ville Syrjälä2a44b762014-03-07 18:32:09 +02003763 ilk_compute_wm_config(dev, &config);
3764
Ville Syrjälä34982fe2013-10-09 19:18:09 +03003765 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
Ville Syrjälä0ba22e22013-12-05 15:51:34 +02003766 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
Ville Syrjälä0362c782013-10-09 19:17:57 +03003767
Ville Syrjäläa485bfb2013-10-09 19:17:59 +03003768 /* 5/6 split only in single pipe config on IVB+ */
Ville Syrjäläec98c8d2013-10-11 15:26:26 +03003769 if (INTEL_INFO(dev)->gen >= 7 &&
3770 config.num_pipes_active == 1 && config.sprites_enabled) {
Ville Syrjälä34982fe2013-10-09 19:18:09 +03003771 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
Ville Syrjälä0ba22e22013-12-05 15:51:34 +02003772 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
Ville Syrjäläa485bfb2013-10-09 19:17:59 +03003773
Imre Deak820c1982013-12-17 14:46:36 +02003774 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
Paulo Zanoni861f3382013-05-31 10:19:21 -03003775 } else {
Ville Syrjälä198a1e92013-10-09 19:17:58 +03003776 best_lp_wm = &lp_wm_1_2;
Paulo Zanoni861f3382013-05-31 10:19:21 -03003777 }
3778
Ville Syrjälä198a1e92013-10-09 19:17:58 +03003779 partitioning = (best_lp_wm == &lp_wm_1_2) ?
Ville Syrjälä77c122b2013-08-06 22:24:04 +03003780 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
Paulo Zanoni861f3382013-05-31 10:19:21 -03003781
Imre Deak820c1982013-12-17 14:46:36 +02003782 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
Ville Syrjälä609cede2013-10-09 19:18:03 +03003783
Imre Deak820c1982013-12-17 14:46:36 +02003784 ilk_write_wm_values(dev_priv, &results);
Paulo Zanoni1011d8c2013-05-09 16:55:50 -03003785}
3786
Damien Lespiaued57cb82014-07-15 09:21:24 +02003787static void
3788ilk_update_sprite_wm(struct drm_plane *plane,
3789 struct drm_crtc *crtc,
3790 uint32_t sprite_width, uint32_t sprite_height,
3791 int pixel_size, bool enabled, bool scaled)
Paulo Zanoni526682e2013-05-24 11:59:18 -03003792{
Ville Syrjälä8553c182013-12-05 15:51:39 +02003793 struct drm_device *dev = plane->dev;
Ville Syrjäläadf3d352013-08-06 22:24:11 +03003794 struct intel_plane *intel_plane = to_intel_plane(plane);
Paulo Zanoni526682e2013-05-24 11:59:18 -03003795
Ville Syrjäläadf3d352013-08-06 22:24:11 +03003796 intel_plane->wm.enabled = enabled;
3797 intel_plane->wm.scaled = scaled;
3798 intel_plane->wm.horiz_pixels = sprite_width;
Damien Lespiaued57cb82014-07-15 09:21:24 +02003799 intel_plane->wm.vert_pixels = sprite_width;
Ville Syrjäläadf3d352013-08-06 22:24:11 +03003800 intel_plane->wm.bytes_per_pixel = pixel_size;
Paulo Zanoni526682e2013-05-24 11:59:18 -03003801
Ville Syrjälä8553c182013-12-05 15:51:39 +02003802 /*
3803 * IVB workaround: must disable low power watermarks for at least
3804 * one frame before enabling scaling. LP watermarks can be re-enabled
3805 * when scaling is disabled.
3806 *
3807 * WaCxSRDisabledForSpriteScaling:ivb
3808 */
3809 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
3810 intel_wait_for_vblank(dev, intel_plane->pipe);
3811
Imre Deak820c1982013-12-17 14:46:36 +02003812 ilk_update_wm(crtc);
Paulo Zanoni526682e2013-05-24 11:59:18 -03003813}
3814
Pradeep Bhat30789992014-11-04 17:06:45 +00003815static void skl_pipe_wm_active_state(uint32_t val,
3816 struct skl_pipe_wm *active,
3817 bool is_transwm,
3818 bool is_cursor,
3819 int i,
3820 int level)
3821{
3822 bool is_enabled = (val & PLANE_WM_EN) != 0;
3823
3824 if (!is_transwm) {
3825 if (!is_cursor) {
3826 active->wm[level].plane_en[i] = is_enabled;
3827 active->wm[level].plane_res_b[i] =
3828 val & PLANE_WM_BLOCKS_MASK;
3829 active->wm[level].plane_res_l[i] =
3830 (val >> PLANE_WM_LINES_SHIFT) &
3831 PLANE_WM_LINES_MASK;
3832 } else {
3833 active->wm[level].cursor_en = is_enabled;
3834 active->wm[level].cursor_res_b =
3835 val & PLANE_WM_BLOCKS_MASK;
3836 active->wm[level].cursor_res_l =
3837 (val >> PLANE_WM_LINES_SHIFT) &
3838 PLANE_WM_LINES_MASK;
3839 }
3840 } else {
3841 if (!is_cursor) {
3842 active->trans_wm.plane_en[i] = is_enabled;
3843 active->trans_wm.plane_res_b[i] =
3844 val & PLANE_WM_BLOCKS_MASK;
3845 active->trans_wm.plane_res_l[i] =
3846 (val >> PLANE_WM_LINES_SHIFT) &
3847 PLANE_WM_LINES_MASK;
3848 } else {
3849 active->trans_wm.cursor_en = is_enabled;
3850 active->trans_wm.cursor_res_b =
3851 val & PLANE_WM_BLOCKS_MASK;
3852 active->trans_wm.cursor_res_l =
3853 (val >> PLANE_WM_LINES_SHIFT) &
3854 PLANE_WM_LINES_MASK;
3855 }
3856 }
3857}
3858
3859static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3860{
3861 struct drm_device *dev = crtc->dev;
3862 struct drm_i915_private *dev_priv = dev->dev_private;
3863 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3864 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3865 struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
3866 enum pipe pipe = intel_crtc->pipe;
3867 int level, i, max_level;
3868 uint32_t temp;
3869
3870 max_level = ilk_wm_max_level(dev);
3871
3872 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3873
3874 for (level = 0; level <= max_level; level++) {
3875 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3876 hw->plane[pipe][i][level] =
3877 I915_READ(PLANE_WM(pipe, i, level));
3878 hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
3879 }
3880
3881 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3882 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
3883 hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
3884
Matt Roper3ef00282015-03-09 10:19:24 -07003885 if (!intel_crtc->active)
Pradeep Bhat30789992014-11-04 17:06:45 +00003886 return;
3887
3888 hw->dirty[pipe] = true;
3889
3890 active->linetime = hw->wm_linetime[pipe];
3891
3892 for (level = 0; level <= max_level; level++) {
3893 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3894 temp = hw->plane[pipe][i][level];
3895 skl_pipe_wm_active_state(temp, active, false,
3896 false, i, level);
3897 }
3898 temp = hw->cursor[pipe][level];
3899 skl_pipe_wm_active_state(temp, active, false, true, i, level);
3900 }
3901
3902 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3903 temp = hw->plane_trans[pipe][i];
3904 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
3905 }
3906
3907 temp = hw->cursor_trans[pipe];
3908 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3909}
3910
3911void skl_wm_get_hw_state(struct drm_device *dev)
3912{
Damien Lespiaua269c582014-11-04 17:06:49 +00003913 struct drm_i915_private *dev_priv = dev->dev_private;
3914 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
Pradeep Bhat30789992014-11-04 17:06:45 +00003915 struct drm_crtc *crtc;
3916
Damien Lespiaua269c582014-11-04 17:06:49 +00003917 skl_ddb_get_hw_state(dev_priv, ddb);
Pradeep Bhat30789992014-11-04 17:06:45 +00003918 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3919 skl_pipe_wm_get_hw_state(crtc);
3920}
3921
Ville Syrjälä243e6a42013-10-14 14:55:24 +03003922static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3923{
3924 struct drm_device *dev = crtc->dev;
3925 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak820c1982013-12-17 14:46:36 +02003926 struct ilk_wm_values *hw = &dev_priv->wm.hw;
Ville Syrjälä243e6a42013-10-14 14:55:24 +03003927 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3928 struct intel_pipe_wm *active = &intel_crtc->wm.active;
3929 enum pipe pipe = intel_crtc->pipe;
3930 static const unsigned int wm0_pipe_reg[] = {
3931 [PIPE_A] = WM0_PIPEA_ILK,
3932 [PIPE_B] = WM0_PIPEB_ILK,
3933 [PIPE_C] = WM0_PIPEC_IVB,
3934 };
3935
3936 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
Ville Syrjäläa42a5712014-01-07 16:14:08 +02003937 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjäläce0e0712013-12-05 15:51:36 +02003938 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
Ville Syrjälä243e6a42013-10-14 14:55:24 +03003939
Matt Roper3ef00282015-03-09 10:19:24 -07003940 active->pipe_enabled = intel_crtc->active;
Ville Syrjälä2a44b762014-03-07 18:32:09 +02003941
3942 if (active->pipe_enabled) {
Ville Syrjälä243e6a42013-10-14 14:55:24 +03003943 u32 tmp = hw->wm_pipe[pipe];
3944
3945 /*
3946 * For active pipes LP0 watermark is marked as
3947 * enabled, and LP1+ watermaks as disabled since
3948 * we can't really reverse compute them in case
3949 * multiple pipes are active.
3950 */
3951 active->wm[0].enable = true;
3952 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
3953 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
3954 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
3955 active->linetime = hw->wm_linetime[pipe];
3956 } else {
3957 int level, max_level = ilk_wm_max_level(dev);
3958
3959 /*
3960 * For inactive pipes, all watermark levels
3961 * should be marked as enabled but zeroed,
3962 * which is what we'd compute them to.
3963 */
3964 for (level = 0; level <= max_level; level++)
3965 active->wm[level].enable = true;
3966 }
3967}
3968
Ville Syrjälä6eb1a682015-06-24 22:00:03 +03003969#define _FW_WM(value, plane) \
3970 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
3971#define _FW_WM_VLV(value, plane) \
3972 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
3973
3974static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
3975 struct vlv_wm_values *wm)
3976{
3977 enum pipe pipe;
3978 uint32_t tmp;
3979
3980 for_each_pipe(dev_priv, pipe) {
3981 tmp = I915_READ(VLV_DDL(pipe));
3982
3983 wm->ddl[pipe].primary =
3984 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3985 wm->ddl[pipe].cursor =
3986 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3987 wm->ddl[pipe].sprite[0] =
3988 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3989 wm->ddl[pipe].sprite[1] =
3990 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3991 }
3992
3993 tmp = I915_READ(DSPFW1);
3994 wm->sr.plane = _FW_WM(tmp, SR);
3995 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
3996 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
3997 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
3998
3999 tmp = I915_READ(DSPFW2);
4000 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
4001 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
4002 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
4003
4004 tmp = I915_READ(DSPFW3);
4005 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
4006
4007 if (IS_CHERRYVIEW(dev_priv)) {
4008 tmp = I915_READ(DSPFW7_CHV);
4009 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4010 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4011
4012 tmp = I915_READ(DSPFW8_CHV);
4013 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
4014 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
4015
4016 tmp = I915_READ(DSPFW9_CHV);
4017 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
4018 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
4019
4020 tmp = I915_READ(DSPHOWM);
4021 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4022 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
4023 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
4024 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
4025 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4026 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4027 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4028 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4029 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4030 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4031 } else {
4032 tmp = I915_READ(DSPFW7);
4033 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4034 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4035
4036 tmp = I915_READ(DSPHOWM);
4037 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4038 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4039 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4040 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4041 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4042 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4043 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4044 }
4045}
4046
4047#undef _FW_WM
4048#undef _FW_WM_VLV
4049
4050void vlv_wm_get_hw_state(struct drm_device *dev)
4051{
4052 struct drm_i915_private *dev_priv = to_i915(dev);
4053 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
4054 struct intel_plane *plane;
4055 enum pipe pipe;
4056 u32 val;
4057
4058 vlv_read_wm_values(dev_priv, wm);
4059
4060 for_each_intel_plane(dev, plane) {
4061 switch (plane->base.type) {
4062 int sprite;
4063 case DRM_PLANE_TYPE_CURSOR:
4064 plane->wm.fifo_size = 63;
4065 break;
4066 case DRM_PLANE_TYPE_PRIMARY:
4067 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
4068 break;
4069 case DRM_PLANE_TYPE_OVERLAY:
4070 sprite = plane->plane;
4071 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
4072 break;
4073 }
4074 }
4075
4076 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4077 wm->level = VLV_WM_LEVEL_PM2;
4078
4079 if (IS_CHERRYVIEW(dev_priv)) {
4080 mutex_lock(&dev_priv->rps.hw_lock);
4081
4082 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4083 if (val & DSP_MAXFIFO_PM5_ENABLE)
4084 wm->level = VLV_WM_LEVEL_PM5;
4085
4086 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4087 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4088 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4089
4090 mutex_unlock(&dev_priv->rps.hw_lock);
4091 }
4092
4093 for_each_pipe(dev_priv, pipe)
4094 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4095 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
4096 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
4097
4098 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4099 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4100}
4101
Ville Syrjälä243e6a42013-10-14 14:55:24 +03004102void ilk_wm_get_hw_state(struct drm_device *dev)
4103{
4104 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak820c1982013-12-17 14:46:36 +02004105 struct ilk_wm_values *hw = &dev_priv->wm.hw;
Ville Syrjälä243e6a42013-10-14 14:55:24 +03004106 struct drm_crtc *crtc;
4107
Damien Lespiau70e1e0e2014-05-13 23:32:24 +01004108 for_each_crtc(dev, crtc)
Ville Syrjälä243e6a42013-10-14 14:55:24 +03004109 ilk_pipe_wm_get_hw_state(crtc);
4110
4111 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4112 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4113 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4114
4115 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
Ville Syrjäläcfa76982014-03-07 18:32:08 +02004116 if (INTEL_INFO(dev)->gen >= 7) {
4117 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4118 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4119 }
Ville Syrjälä243e6a42013-10-14 14:55:24 +03004120
Ville Syrjäläa42a5712014-01-07 16:14:08 +02004121 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjäläac9545f2013-12-05 15:51:28 +02004122 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4123 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4124 else if (IS_IVYBRIDGE(dev))
4125 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4126 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
Ville Syrjälä243e6a42013-10-14 14:55:24 +03004127
4128 hw->enable_fbc_wm =
4129 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4130}
4131
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03004132/**
4133 * intel_update_watermarks - update FIFO watermark values based on current modes
4134 *
4135 * Calculate watermark values for the various WM regs based on current mode
4136 * and plane configuration.
4137 *
4138 * There are several cases to deal with here:
4139 * - normal (i.e. non-self-refresh)
4140 * - self-refresh (SR) mode
4141 * - lines are large relative to FIFO size (buffer can hold up to 2)
4142 * - lines are small relative to FIFO size (buffer can hold more than 2
4143 * lines), so need to account for TLB latency
4144 *
4145 * The normal calculation is:
4146 * watermark = dotclock * bytes per pixel * latency
4147 * where latency is platform & configuration dependent (we assume pessimal
4148 * values here).
4149 *
4150 * The SR calculation is:
4151 * watermark = (trunc(latency/line time)+1) * surface width *
4152 * bytes per pixel
4153 * where
4154 * line time = htotal / dotclock
4155 * surface width = hdisplay for normal plane and 64 for cursor
4156 * and latency is assumed to be high, as above.
4157 *
4158 * The final value programmed to the register should always be rounded up,
4159 * and include an extra 2 entries to account for clock crossings.
4160 *
4161 * We don't use the sprite, so we can ignore that. And on Crestline we have
4162 * to set the non-SR watermarks to 8.
4163 */
Ville Syrjälä46ba6142013-09-10 11:40:40 +03004164void intel_update_watermarks(struct drm_crtc *crtc)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03004165{
Ville Syrjälä46ba6142013-09-10 11:40:40 +03004166 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03004167
4168 if (dev_priv->display.update_wm)
Ville Syrjälä46ba6142013-09-10 11:40:40 +03004169 dev_priv->display.update_wm(crtc);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03004170}
4171
Ville Syrjäläadf3d352013-08-06 22:24:11 +03004172void intel_update_sprite_watermarks(struct drm_plane *plane,
4173 struct drm_crtc *crtc,
Damien Lespiaued57cb82014-07-15 09:21:24 +02004174 uint32_t sprite_width,
4175 uint32_t sprite_height,
4176 int pixel_size,
Ville Syrjälä39db4a42013-08-06 22:24:00 +03004177 bool enabled, bool scaled)
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03004178{
Ville Syrjäläadf3d352013-08-06 22:24:11 +03004179 struct drm_i915_private *dev_priv = plane->dev->dev_private;
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03004180
4181 if (dev_priv->display.update_sprite_wm)
Damien Lespiaued57cb82014-07-15 09:21:24 +02004182 dev_priv->display.update_sprite_wm(plane, crtc,
4183 sprite_width, sprite_height,
Ville Syrjälä39db4a42013-08-06 22:24:00 +03004184 pixel_size, enabled, scaled);
Eugeni Dodonovb445e3b2012-04-16 22:20:35 -03004185}
4186
Daniel Vetter92703882012-08-09 16:46:01 +02004187/**
4188 * Lock protecting IPS related data structures
Daniel Vetter92703882012-08-09 16:46:01 +02004189 */
4190DEFINE_SPINLOCK(mchdev_lock);
4191
4192/* Global for IPS driver to get at the current i915 device. Protected by
4193 * mchdev_lock. */
4194static struct drm_i915_private *i915_mch_dev;
4195
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004196bool ironlake_set_drps(struct drm_device *dev, u8 val)
4197{
4198 struct drm_i915_private *dev_priv = dev->dev_private;
4199 u16 rgvswctl;
4200
Daniel Vetter92703882012-08-09 16:46:01 +02004201 assert_spin_locked(&mchdev_lock);
4202
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004203 rgvswctl = I915_READ16(MEMSWCTL);
4204 if (rgvswctl & MEMCTL_CMD_STS) {
4205 DRM_DEBUG("gpu busy, RCS change rejected\n");
4206 return false; /* still busy with another command */
4207 }
4208
4209 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4210 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4211 I915_WRITE16(MEMSWCTL, rgvswctl);
4212 POSTING_READ16(MEMSWCTL);
4213
4214 rgvswctl |= MEMCTL_CMD_STS;
4215 I915_WRITE16(MEMSWCTL, rgvswctl);
4216
4217 return true;
4218}
4219
Daniel Vetter8090c6b2012-06-24 16:42:32 +02004220static void ironlake_enable_drps(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004221{
4222 struct drm_i915_private *dev_priv = dev->dev_private;
4223 u32 rgvmodectl = I915_READ(MEMMODECTL);
4224 u8 fmax, fmin, fstart, vstart;
4225
Daniel Vetter92703882012-08-09 16:46:01 +02004226 spin_lock_irq(&mchdev_lock);
4227
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004228 /* Enable temp reporting */
4229 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4230 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4231
4232 /* 100ms RC evaluation intervals */
4233 I915_WRITE(RCUPEI, 100000);
4234 I915_WRITE(RCDNEI, 100000);
4235
4236 /* Set max/min thresholds to 90ms and 80ms respectively */
4237 I915_WRITE(RCBMAXAVG, 90000);
4238 I915_WRITE(RCBMINAVG, 80000);
4239
4240 I915_WRITE(MEMIHYST, 1);
4241
4242 /* Set up min, max, and cur for interrupt handling */
4243 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4244 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4245 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4246 MEMMODE_FSTART_SHIFT;
4247
4248 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
4249 PXVFREQ_PX_SHIFT;
4250
Daniel Vetter20e4d402012-08-08 23:35:39 +02004251 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4252 dev_priv->ips.fstart = fstart;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004253
Daniel Vetter20e4d402012-08-08 23:35:39 +02004254 dev_priv->ips.max_delay = fstart;
4255 dev_priv->ips.min_delay = fmin;
4256 dev_priv->ips.cur_delay = fstart;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004257
4258 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4259 fmax, fmin, fstart);
4260
4261 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4262
4263 /*
4264 * Interrupts will be enabled in ironlake_irq_postinstall
4265 */
4266
4267 I915_WRITE(VIDSTART, vstart);
4268 POSTING_READ(VIDSTART);
4269
4270 rgvmodectl |= MEMMODE_SWMODE_EN;
4271 I915_WRITE(MEMMODECTL, rgvmodectl);
4272
Daniel Vetter92703882012-08-09 16:46:01 +02004273 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004274 DRM_ERROR("stuck trying to change perf mode\n");
Daniel Vetterdd92d8d2015-07-20 10:58:21 +02004275 mdelay(1);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004276
4277 ironlake_set_drps(dev, fstart);
4278
Daniel Vetter20e4d402012-08-08 23:35:39 +02004279 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004280 I915_READ(0x112e0);
Daniel Vetter20e4d402012-08-08 23:35:39 +02004281 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4282 dev_priv->ips.last_count2 = I915_READ(0x112f4);
Thomas Gleixner5ed0bdf2014-07-16 21:05:06 +00004283 dev_priv->ips.last_time2 = ktime_get_raw_ns();
Daniel Vetter92703882012-08-09 16:46:01 +02004284
4285 spin_unlock_irq(&mchdev_lock);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004286}
4287
Daniel Vetter8090c6b2012-06-24 16:42:32 +02004288static void ironlake_disable_drps(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004289{
4290 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter92703882012-08-09 16:46:01 +02004291 u16 rgvswctl;
4292
4293 spin_lock_irq(&mchdev_lock);
4294
4295 rgvswctl = I915_READ16(MEMSWCTL);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004296
4297 /* Ack interrupts, disable EFC interrupt */
4298 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4299 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4300 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4301 I915_WRITE(DEIIR, DE_PCU_EVENT);
4302 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4303
4304 /* Go back to the starting frequency */
Daniel Vetter20e4d402012-08-08 23:35:39 +02004305 ironlake_set_drps(dev, dev_priv->ips.fstart);
Daniel Vetterdd92d8d2015-07-20 10:58:21 +02004306 mdelay(1);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004307 rgvswctl |= MEMCTL_CMD_STS;
4308 I915_WRITE(MEMSWCTL, rgvswctl);
Daniel Vetterdd92d8d2015-07-20 10:58:21 +02004309 mdelay(1);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004310
Daniel Vetter92703882012-08-09 16:46:01 +02004311 spin_unlock_irq(&mchdev_lock);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004312}
4313
Daniel Vetteracbe9472012-07-26 11:50:05 +02004314/* There's a funny hw issue where the hw returns all 0 when reading from
4315 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4316 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4317 * all limits and the gpu stuck at whatever frequency it is at atm).
4318 */
Akash Goel74ef1172015-03-06 11:07:19 +05304319static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004320{
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01004321 u32 limits;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004322
Daniel Vetter20b46e52012-07-26 11:16:14 +02004323 /* Only set the down limit when we've reached the lowest level to avoid
4324 * getting more interrupts, otherwise leave this clear. This prevents a
4325 * race in the hw when coming out of rc6: There's a tiny window where
4326 * the hw runs at the minimal clock before selecting the desired
4327 * frequency, if the down threshold expires in that window we will not
4328 * receive a down interrupt. */
Akash Goel74ef1172015-03-06 11:07:19 +05304329 if (IS_GEN9(dev_priv->dev)) {
4330 limits = (dev_priv->rps.max_freq_softlimit) << 23;
4331 if (val <= dev_priv->rps.min_freq_softlimit)
4332 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4333 } else {
4334 limits = dev_priv->rps.max_freq_softlimit << 24;
4335 if (val <= dev_priv->rps.min_freq_softlimit)
4336 limits |= dev_priv->rps.min_freq_softlimit << 16;
4337 }
Daniel Vetter20b46e52012-07-26 11:16:14 +02004338
4339 return limits;
4340}
4341
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004342static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4343{
4344 int new_power;
Akash Goel8a586432015-03-06 11:07:18 +05304345 u32 threshold_up = 0, threshold_down = 0; /* in % */
4346 u32 ei_up = 0, ei_down = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004347
4348 new_power = dev_priv->rps.power;
4349 switch (dev_priv->rps.power) {
4350 case LOW_POWER:
Ben Widawskyb39fb292014-03-19 18:31:11 -07004351 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004352 new_power = BETWEEN;
4353 break;
4354
4355 case BETWEEN:
Ben Widawskyb39fb292014-03-19 18:31:11 -07004356 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004357 new_power = LOW_POWER;
Ben Widawskyb39fb292014-03-19 18:31:11 -07004358 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004359 new_power = HIGH_POWER;
4360 break;
4361
4362 case HIGH_POWER:
Ben Widawskyb39fb292014-03-19 18:31:11 -07004363 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004364 new_power = BETWEEN;
4365 break;
4366 }
4367 /* Max/min bins are special */
Chris Wilsonaed242f2015-03-18 09:48:21 +00004368 if (val <= dev_priv->rps.min_freq_softlimit)
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004369 new_power = LOW_POWER;
Chris Wilsonaed242f2015-03-18 09:48:21 +00004370 if (val >= dev_priv->rps.max_freq_softlimit)
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004371 new_power = HIGH_POWER;
4372 if (new_power == dev_priv->rps.power)
4373 return;
4374
4375 /* Note the units here are not exactly 1us, but 1280ns. */
4376 switch (new_power) {
4377 case LOW_POWER:
4378 /* Upclock if more than 95% busy over 16ms */
Akash Goel8a586432015-03-06 11:07:18 +05304379 ei_up = 16000;
4380 threshold_up = 95;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004381
4382 /* Downclock if less than 85% busy over 32ms */
Akash Goel8a586432015-03-06 11:07:18 +05304383 ei_down = 32000;
4384 threshold_down = 85;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004385 break;
4386
4387 case BETWEEN:
4388 /* Upclock if more than 90% busy over 13ms */
Akash Goel8a586432015-03-06 11:07:18 +05304389 ei_up = 13000;
4390 threshold_up = 90;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004391
4392 /* Downclock if less than 75% busy over 32ms */
Akash Goel8a586432015-03-06 11:07:18 +05304393 ei_down = 32000;
4394 threshold_down = 75;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004395 break;
4396
4397 case HIGH_POWER:
4398 /* Upclock if more than 85% busy over 10ms */
Akash Goel8a586432015-03-06 11:07:18 +05304399 ei_up = 10000;
4400 threshold_up = 85;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004401
4402 /* Downclock if less than 60% busy over 32ms */
Akash Goel8a586432015-03-06 11:07:18 +05304403 ei_down = 32000;
4404 threshold_down = 60;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004405 break;
4406 }
4407
Akash Goel8a586432015-03-06 11:07:18 +05304408 I915_WRITE(GEN6_RP_UP_EI,
4409 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4410 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4411 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
4412
4413 I915_WRITE(GEN6_RP_DOWN_EI,
4414 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4415 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4416 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
4417
4418 I915_WRITE(GEN6_RP_CONTROL,
4419 GEN6_RP_MEDIA_TURBO |
4420 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4421 GEN6_RP_MEDIA_IS_GFX |
4422 GEN6_RP_ENABLE |
4423 GEN6_RP_UP_BUSY_AVG |
4424 GEN6_RP_DOWN_IDLE_AVG);
4425
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004426 dev_priv->rps.power = new_power;
Chris Wilson8fb55192015-04-07 16:20:28 +01004427 dev_priv->rps.up_threshold = threshold_up;
4428 dev_priv->rps.down_threshold = threshold_down;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004429 dev_priv->rps.last_adj = 0;
4430}
4431
Chris Wilson2876ce72014-03-28 08:03:34 +00004432static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4433{
4434 u32 mask = 0;
4435
4436 if (val > dev_priv->rps.min_freq_softlimit)
Chris Wilson6f4b12f82015-03-18 09:48:23 +00004437 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
Chris Wilson2876ce72014-03-28 08:03:34 +00004438 if (val < dev_priv->rps.max_freq_softlimit)
Chris Wilson6f4b12f82015-03-18 09:48:23 +00004439 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
Chris Wilson2876ce72014-03-28 08:03:34 +00004440
Chris Wilson7b3c29f2014-07-10 20:31:19 +01004441 mask &= dev_priv->pm_rps_events;
4442
Imre Deak59d02a12014-12-19 19:33:26 +02004443 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
Chris Wilson2876ce72014-03-28 08:03:34 +00004444}
4445
Jeff McGeeb8a5ff82014-02-04 11:37:01 -06004446/* gen6_set_rps is called to update the frequency request, but should also be
4447 * called when the range (min_delay and max_delay) is modified so that we can
4448 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004449static void gen6_set_rps(struct drm_device *dev, u8 val)
Daniel Vetter20b46e52012-07-26 11:16:14 +02004450{
4451 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01004452
Jesse Barnes4fc688c2012-11-02 11:14:01 -07004453 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Chris Wilsonaed242f2015-03-18 09:48:21 +00004454 WARN_ON(val > dev_priv->rps.max_freq);
4455 WARN_ON(val < dev_priv->rps.min_freq);
Daniel Vetter004777c2012-08-09 15:07:01 +02004456
Chris Wilsoneb64cad2014-03-27 08:24:20 +00004457 /* min/max delay may still have been modified so be sure to
4458 * write the limits value.
4459 */
4460 if (val != dev_priv->rps.cur_freq) {
4461 gen6_set_rps_thresholds(dev_priv, val);
Jeff McGeeb8a5ff82014-02-04 11:37:01 -06004462
Akash Goel57041952015-03-06 11:07:17 +05304463 if (IS_GEN9(dev))
4464 I915_WRITE(GEN6_RPNSWREQ,
4465 GEN9_FREQUENCY(val));
4466 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Chris Wilsoneb64cad2014-03-27 08:24:20 +00004467 I915_WRITE(GEN6_RPNSWREQ,
4468 HSW_FREQUENCY(val));
4469 else
4470 I915_WRITE(GEN6_RPNSWREQ,
4471 GEN6_FREQUENCY(val) |
4472 GEN6_OFFSET(0) |
4473 GEN6_AGGRESSIVE_TURBO);
Jeff McGeeb8a5ff82014-02-04 11:37:01 -06004474 }
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01004475
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01004476 /* Make sure we continue to get interrupts
4477 * until we hit the minimum or maximum frequencies.
4478 */
Akash Goel74ef1172015-03-06 11:07:19 +05304479 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
Chris Wilson2876ce72014-03-28 08:03:34 +00004480 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01004481
Ben Widawskyd5570a72012-09-07 19:43:41 -07004482 POSTING_READ(GEN6_RPNSWREQ);
4483
Ben Widawskyb39fb292014-03-19 18:31:11 -07004484 dev_priv->rps.cur_freq = val;
Daniel Vetterbe2cde92012-08-30 13:26:48 +02004485 trace_intel_gpu_freq_change(val * 50);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004486}
4487
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004488static void valleyview_set_rps(struct drm_device *dev, u8 val)
4489{
4490 struct drm_i915_private *dev_priv = dev->dev_private;
4491
4492 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Chris Wilsonaed242f2015-03-18 09:48:21 +00004493 WARN_ON(val > dev_priv->rps.max_freq);
4494 WARN_ON(val < dev_priv->rps.min_freq);
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004495
4496 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
4497 "Odd GPU freq value\n"))
4498 val &= ~1;
4499
Deepak Scd25dd52015-07-10 18:31:40 +05304500 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4501
Chris Wilson8fb55192015-04-07 16:20:28 +01004502 if (val != dev_priv->rps.cur_freq) {
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004503 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
Chris Wilson8fb55192015-04-07 16:20:28 +01004504 if (!IS_CHERRYVIEW(dev_priv))
4505 gen6_set_rps_thresholds(dev_priv, val);
4506 }
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004507
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004508 dev_priv->rps.cur_freq = val;
4509 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4510}
4511
Deepak Sa7f6e232015-05-09 18:04:44 +05304512/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
Deepak S76c3552f2014-01-30 23:08:16 +05304513 *
4514 * * If Gfx is Idle, then
Deepak Sa7f6e232015-05-09 18:04:44 +05304515 * 1. Forcewake Media well.
4516 * 2. Request idle freq.
4517 * 3. Release Forcewake of Media well.
Deepak S76c3552f2014-01-30 23:08:16 +05304518*/
4519static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4520{
Chris Wilsonaed242f2015-03-18 09:48:21 +00004521 u32 val = dev_priv->rps.idle_freq;
Deepak S5549d252014-06-28 11:26:11 +05304522
Chris Wilsonaed242f2015-03-18 09:48:21 +00004523 if (dev_priv->rps.cur_freq <= val)
Deepak S76c3552f2014-01-30 23:08:16 +05304524 return;
4525
Deepak Sa7f6e232015-05-09 18:04:44 +05304526 /* Wake up the media well, as that takes a lot less
4527 * power than the Render well. */
4528 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4529 valleyview_set_rps(dev_priv->dev, val);
4530 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
Deepak S76c3552f2014-01-30 23:08:16 +05304531}
4532
Chris Wilson43cf3bf2015-03-18 09:48:22 +00004533void gen6_rps_busy(struct drm_i915_private *dev_priv)
4534{
4535 mutex_lock(&dev_priv->rps.hw_lock);
4536 if (dev_priv->rps.enabled) {
4537 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4538 gen6_rps_reset_ei(dev_priv);
4539 I915_WRITE(GEN6_PMINTRMSK,
4540 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4541 }
4542 mutex_unlock(&dev_priv->rps.hw_lock);
4543}
4544
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004545void gen6_rps_idle(struct drm_i915_private *dev_priv)
4546{
Damien Lespiau691bb712013-12-12 14:36:36 +00004547 struct drm_device *dev = dev_priv->dev;
4548
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004549 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilsonc0951f02013-10-10 21:58:50 +01004550 if (dev_priv->rps.enabled) {
Ville Syrjälä21a11ff2015-01-27 16:36:15 +02004551 if (IS_VALLEYVIEW(dev))
Deepak S76c3552f2014-01-30 23:08:16 +05304552 vlv_set_rps_idle(dev_priv);
Daniel Vetter7526ed72014-09-29 15:07:19 +02004553 else
Chris Wilsonaed242f2015-03-18 09:48:21 +00004554 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
Chris Wilsonc0951f02013-10-10 21:58:50 +01004555 dev_priv->rps.last_adj = 0;
Chris Wilson43cf3bf2015-03-18 09:48:22 +00004556 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
Chris Wilsonc0951f02013-10-10 21:58:50 +01004557 }
Chris Wilson8d3afd72015-05-21 21:01:47 +01004558 mutex_unlock(&dev_priv->rps.hw_lock);
Chris Wilson1854d5c2015-04-07 16:20:32 +01004559
Chris Wilson8d3afd72015-05-21 21:01:47 +01004560 spin_lock(&dev_priv->rps.client_lock);
Chris Wilson1854d5c2015-04-07 16:20:32 +01004561 while (!list_empty(&dev_priv->rps.clients))
4562 list_del_init(dev_priv->rps.clients.next);
Chris Wilson8d3afd72015-05-21 21:01:47 +01004563 spin_unlock(&dev_priv->rps.client_lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004564}
4565
Chris Wilson1854d5c2015-04-07 16:20:32 +01004566void gen6_rps_boost(struct drm_i915_private *dev_priv,
Chris Wilsone61b9952015-04-27 13:41:24 +01004567 struct intel_rps_client *rps,
4568 unsigned long submitted)
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004569{
Chris Wilson8d3afd72015-05-21 21:01:47 +01004570 /* This is intentionally racy! We peek at the state here, then
4571 * validate inside the RPS worker.
4572 */
4573 if (!(dev_priv->mm.busy &&
4574 dev_priv->rps.enabled &&
4575 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
4576 return;
Chris Wilson43cf3bf2015-03-18 09:48:22 +00004577
Chris Wilsone61b9952015-04-27 13:41:24 +01004578 /* Force a RPS boost (and don't count it against the client) if
4579 * the GPU is severely congested.
4580 */
Chris Wilsond0bc54f2015-05-21 21:01:48 +01004581 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
Chris Wilsone61b9952015-04-27 13:41:24 +01004582 rps = NULL;
4583
Chris Wilson8d3afd72015-05-21 21:01:47 +01004584 spin_lock(&dev_priv->rps.client_lock);
4585 if (rps == NULL || list_empty(&rps->link)) {
4586 spin_lock_irq(&dev_priv->irq_lock);
4587 if (dev_priv->rps.interrupts_enabled) {
4588 dev_priv->rps.client_boost = true;
4589 queue_work(dev_priv->wq, &dev_priv->rps.work);
4590 }
4591 spin_unlock_irq(&dev_priv->irq_lock);
Chris Wilson1854d5c2015-04-07 16:20:32 +01004592
Chris Wilson2e1b8732015-04-27 13:41:22 +01004593 if (rps != NULL) {
4594 list_add(&rps->link, &dev_priv->rps.clients);
4595 rps->boosts++;
Chris Wilson1854d5c2015-04-07 16:20:32 +01004596 } else
4597 dev_priv->rps.boosts++;
Chris Wilsonc0951f02013-10-10 21:58:50 +01004598 }
Chris Wilson8d3afd72015-05-21 21:01:47 +01004599 spin_unlock(&dev_priv->rps.client_lock);
Chris Wilsonb29c19b2013-09-25 17:34:56 +01004600}
4601
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004602void intel_set_rps(struct drm_device *dev, u8 val)
Jesse Barnes0a073b82013-04-17 15:54:58 -07004603{
Ville Syrjäläffe02b42015-02-02 19:09:50 +02004604 if (IS_VALLEYVIEW(dev))
4605 valleyview_set_rps(dev, val);
4606 else
4607 gen6_set_rps(dev, val);
Jesse Barnes0a073b82013-04-17 15:54:58 -07004608}
4609
Zhe Wang20e49362014-11-04 17:07:05 +00004610static void gen9_disable_rps(struct drm_device *dev)
4611{
4612 struct drm_i915_private *dev_priv = dev->dev_private;
4613
4614 I915_WRITE(GEN6_RC_CONTROL, 0);
Zhe Wang38c23522015-01-20 12:23:04 +00004615 I915_WRITE(GEN9_PG_ENABLE, 0);
Zhe Wang20e49362014-11-04 17:07:05 +00004616}
4617
Daniel Vetter44fc7d52013-07-12 22:43:27 +02004618static void gen6_disable_rps(struct drm_device *dev)
4619{
4620 struct drm_i915_private *dev_priv = dev->dev_private;
4621
4622 I915_WRITE(GEN6_RC_CONTROL, 0);
4623 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
Daniel Vetter44fc7d52013-07-12 22:43:27 +02004624}
4625
Deepak S38807742014-05-23 21:00:15 +05304626static void cherryview_disable_rps(struct drm_device *dev)
4627{
4628 struct drm_i915_private *dev_priv = dev->dev_private;
4629
4630 I915_WRITE(GEN6_RC_CONTROL, 0);
4631}
4632
Jesse Barnesd20d4f02013-04-23 10:09:28 -07004633static void valleyview_disable_rps(struct drm_device *dev)
4634{
4635 struct drm_i915_private *dev_priv = dev->dev_private;
4636
Deepak S98a2e5f2014-08-18 10:35:27 -07004637 /* we're doing forcewake before Disabling RC6,
4638 * This what the BIOS expects when going into suspend */
Mika Kuoppala59bad942015-01-16 11:34:40 +02004639 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Deepak S98a2e5f2014-08-18 10:35:27 -07004640
Jesse Barnesd20d4f02013-04-23 10:09:28 -07004641 I915_WRITE(GEN6_RC_CONTROL, 0);
Jesse Barnesd20d4f02013-04-23 10:09:28 -07004642
Mika Kuoppala59bad942015-01-16 11:34:40 +02004643 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Jesse Barnesd20d4f02013-04-23 10:09:28 -07004644}
4645
Ben Widawskydc39fff2013-10-18 12:32:07 -07004646static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4647{
Imre Deak91ca6892014-04-14 20:24:25 +03004648 if (IS_VALLEYVIEW(dev)) {
4649 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4650 mode = GEN6_RC_CTL_RC6_ENABLE;
4651 else
4652 mode = 0;
4653 }
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -07004654 if (HAS_RC6p(dev))
4655 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4656 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
4657 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
4658 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
4659
4660 else
4661 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
4662 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
Ben Widawskydc39fff2013-10-18 12:32:07 -07004663}
4664
Imre Deake6069ca2014-04-18 16:01:02 +03004665static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004666{
Daniel Vettere7d66d82015-06-15 23:23:54 +02004667 /* No RC6 before Ironlake and code is gone for ilk. */
4668 if (INTEL_INFO(dev)->gen < 6)
Imre Deake6069ca2014-04-18 16:01:02 +03004669 return 0;
4670
Daniel Vetter456470e2012-08-08 23:35:40 +02004671 /* Respect the kernel parameter if it is set */
Imre Deake6069ca2014-04-18 16:01:02 +03004672 if (enable_rc6 >= 0) {
4673 int mask;
4674
Rodrigo Vivi58abf1d2014-10-07 07:06:50 -07004675 if (HAS_RC6p(dev))
Imre Deake6069ca2014-04-18 16:01:02 +03004676 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4677 INTEL_RC6pp_ENABLE;
4678 else
4679 mask = INTEL_RC6_ENABLE;
4680
4681 if ((enable_rc6 & mask) != enable_rc6)
Daniel Vetter8dfd1f02014-08-04 11:15:56 +02004682 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
4683 enable_rc6 & mask, enable_rc6, mask);
Imre Deake6069ca2014-04-18 16:01:02 +03004684
4685 return enable_rc6 & mask;
4686 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004687
Ben Widawsky8bade1a2014-01-28 20:25:39 -08004688 if (IS_IVYBRIDGE(dev))
Ben Widawskycca84a12014-01-28 20:25:38 -08004689 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
Ben Widawsky8bade1a2014-01-28 20:25:39 -08004690
4691 return INTEL_RC6_ENABLE;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004692}
4693
Imre Deake6069ca2014-04-18 16:01:02 +03004694int intel_enable_rc6(const struct drm_device *dev)
4695{
4696 return i915.enable_rc6;
4697}
4698
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004699static void gen6_init_rps_frequencies(struct drm_device *dev)
Ben Widawsky3280e8b2014-03-31 17:16:42 -07004700{
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004701 struct drm_i915_private *dev_priv = dev->dev_private;
4702 uint32_t rp_state_cap;
4703 u32 ddcc_status = 0;
4704 int ret;
4705
Ben Widawsky3280e8b2014-03-31 17:16:42 -07004706 /* All of these values are in units of 50MHz */
4707 dev_priv->rps.cur_freq = 0;
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004708 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
Bob Paauwe35040562015-06-25 14:54:07 -07004709 if (IS_BROXTON(dev)) {
4710 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
4711 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
4712 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4713 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
4714 } else {
4715 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4716 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
4717 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4718 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
4719 }
4720
Ben Widawsky3280e8b2014-03-31 17:16:42 -07004721 /* hw_max = RP0 until we check for overclocking */
4722 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4723
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004724 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
Akash Goelc5e06882015-06-29 14:50:19 +05304725 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004726 ret = sandybridge_pcode_read(dev_priv,
4727 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4728 &ddcc_status);
4729 if (0 == ret)
4730 dev_priv->rps.efficient_freq =
Tom O'Rourke46efa4a2015-02-10 23:06:46 -08004731 clamp_t(u8,
4732 ((ddcc_status >> 8) & 0xff),
4733 dev_priv->rps.min_freq,
4734 dev_priv->rps.max_freq);
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004735 }
4736
Akash Goelc5e06882015-06-29 14:50:19 +05304737 if (IS_SKYLAKE(dev)) {
4738 /* Store the frequency values in 16.66 MHZ units, which is
4739 the natural hardware unit for SKL */
4740 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
4741 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
4742 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
4743 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
4744 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
4745 }
4746
Chris Wilsonaed242f2015-03-18 09:48:21 +00004747 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
4748
Ben Widawsky3280e8b2014-03-31 17:16:42 -07004749 /* Preserve min/max settings in case of re-init */
4750 if (dev_priv->rps.max_freq_softlimit == 0)
4751 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4752
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004753 if (dev_priv->rps.min_freq_softlimit == 0) {
4754 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4755 dev_priv->rps.min_freq_softlimit =
Ville Syrjälä813b5e62015-03-25 19:27:16 +02004756 max_t(int, dev_priv->rps.efficient_freq,
4757 intel_freq_opcode(dev_priv, 450));
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004758 else
4759 dev_priv->rps.min_freq_softlimit =
4760 dev_priv->rps.min_freq;
4761 }
Ben Widawsky3280e8b2014-03-31 17:16:42 -07004762}
4763
Jesse Barnesb6fef0e2015-01-16 18:07:25 +00004764/* See the Gen9_GT_PM_Programming_Guide doc for the below */
Zhe Wang20e49362014-11-04 17:07:05 +00004765static void gen9_enable_rps(struct drm_device *dev)
4766{
4767 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnesb6fef0e2015-01-16 18:07:25 +00004768
4769 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4770
Damien Lespiauba1c5542015-01-16 18:07:26 +00004771 gen6_init_rps_frequencies(dev);
4772
Akash Goel0beb0592015-03-06 11:07:20 +05304773 /* Program defaults and thresholds for RPS*/
4774 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4775 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
Jesse Barnesb6fef0e2015-01-16 18:07:25 +00004776
Akash Goel0beb0592015-03-06 11:07:20 +05304777 /* 1 second timeout*/
4778 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
4779 GT_INTERVAL_FROM_US(dev_priv, 1000000));
4780
Jesse Barnesb6fef0e2015-01-16 18:07:25 +00004781 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
Jesse Barnesb6fef0e2015-01-16 18:07:25 +00004782
Akash Goel0beb0592015-03-06 11:07:20 +05304783 /* Leaning on the below call to gen6_set_rps to program/setup the
4784 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
4785 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
4786 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4787 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
Jesse Barnesb6fef0e2015-01-16 18:07:25 +00004788
4789 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4790}
4791
4792static void gen9_enable_rc6(struct drm_device *dev)
4793{
4794 struct drm_i915_private *dev_priv = dev->dev_private;
Zhe Wang20e49362014-11-04 17:07:05 +00004795 struct intel_engine_cs *ring;
4796 uint32_t rc6_mask = 0;
4797 int unused;
4798
4799 /* 1a: Software RC state - RC0 */
4800 I915_WRITE(GEN6_RC_STATE, 0);
4801
4802 /* 1b: Get forcewake during program sequence. Although the driver
4803 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
Mika Kuoppala59bad942015-01-16 11:34:40 +02004804 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Zhe Wang20e49362014-11-04 17:07:05 +00004805
4806 /* 2a: Disable RC states. */
4807 I915_WRITE(GEN6_RC_CONTROL, 0);
4808
4809 /* 2b: Program RC6 thresholds.*/
4810 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
4811 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4812 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4813 for_each_ring(ring, dev_priv, unused)
4814 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4815 I915_WRITE(GEN6_RC_SLEEP, 0);
4816 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4817
Zhe Wang38c23522015-01-20 12:23:04 +00004818 /* 2c: Program Coarse Power Gating Policies. */
4819 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
4820 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
4821
Zhe Wang20e49362014-11-04 17:07:05 +00004822 /* 3a: Enable RC6 */
4823 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4824 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4825 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4826 "on" : "off");
4827 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4828 GEN6_RC_CTL_EI_MODE(1) |
4829 rc6_mask);
4830
Sagar Kamblecb07bae2015-04-12 11:28:14 +05304831 /*
4832 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4833 * WaDisableRenderPowerGating:skl,bxt - Render PG need to be disabled with RC6.
4834 */
Sagar Kamblea4104c52015-04-10 14:11:29 +05304835 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
Sagar Kamblecb07bae2015-04-12 11:28:14 +05304836 GEN9_MEDIA_PG_ENABLE : 0);
Sagar Kamblea4104c52015-04-10 14:11:29 +05304837
Zhe Wang38c23522015-01-20 12:23:04 +00004838
Mika Kuoppala59bad942015-01-16 11:34:40 +02004839 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Zhe Wang20e49362014-11-04 17:07:05 +00004840
4841}
4842
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004843static void gen8_enable_rps(struct drm_device *dev)
4844{
4845 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01004846 struct intel_engine_cs *ring;
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004847 uint32_t rc6_mask = 0;
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004848 int unused;
4849
4850 /* 1a: Software RC state - RC0 */
4851 I915_WRITE(GEN6_RC_STATE, 0);
4852
4853 /* 1c & 1d: Get forcewake during program sequence. Although the driver
4854 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
Mika Kuoppala59bad942015-01-16 11:34:40 +02004855 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004856
4857 /* 2a: Disable RC states. */
4858 I915_WRITE(GEN6_RC_CONTROL, 0);
4859
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004860 /* Initialize rps frequencies */
4861 gen6_init_rps_frequencies(dev);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004862
4863 /* 2b: Program RC6 thresholds.*/
4864 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4865 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4866 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4867 for_each_ring(ring, dev_priv, unused)
4868 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4869 I915_WRITE(GEN6_RC_SLEEP, 0);
Tom O'Rourke0d68b252014-04-09 11:44:06 -07004870 if (IS_BROADWELL(dev))
4871 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4872 else
4873 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004874
4875 /* 3: Enable RC6 */
4876 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4877 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
Ben Widawskyabbf9d22014-01-28 20:25:41 -08004878 intel_print_rc6_info(dev, rc6_mask);
Tom O'Rourke0d68b252014-04-09 11:44:06 -07004879 if (IS_BROADWELL(dev))
4880 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4881 GEN7_RC_CTL_TO_MODE |
4882 rc6_mask);
4883 else
4884 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4885 GEN6_RC_CTL_EI_MODE(1) |
4886 rc6_mask);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004887
4888 /* 4 Program defaults and thresholds for RPS*/
Ben Widawskyf9bdc582014-03-31 17:16:41 -07004889 I915_WRITE(GEN6_RPNSWREQ,
4890 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4891 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4892 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
Daniel Vetter7526ed72014-09-29 15:07:19 +02004893 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
4894 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004895
Daniel Vetter7526ed72014-09-29 15:07:19 +02004896 /* Docs recommend 900MHz, and 300 MHz respectively */
4897 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
4898 dev_priv->rps.max_freq_softlimit << 24 |
4899 dev_priv->rps.min_freq_softlimit << 16);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004900
Daniel Vetter7526ed72014-09-29 15:07:19 +02004901 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
4902 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
4903 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
4904 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004905
Daniel Vetter7526ed72014-09-29 15:07:19 +02004906 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004907
4908 /* 5: Enable RPS */
Daniel Vetter7526ed72014-09-29 15:07:19 +02004909 I915_WRITE(GEN6_RP_CONTROL,
4910 GEN6_RP_MEDIA_TURBO |
4911 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4912 GEN6_RP_MEDIA_IS_GFX |
4913 GEN6_RP_ENABLE |
4914 GEN6_RP_UP_BUSY_AVG |
4915 GEN6_RP_DOWN_IDLE_AVG);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004916
Daniel Vetter7526ed72014-09-29 15:07:19 +02004917 /* 6: Ring frequency + overclocking (our driver does this later */
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004918
Tom O'Rourkec7f31532014-11-19 14:21:54 -08004919 dev_priv->rps.power = HIGH_POWER; /* force a reset */
Chris Wilsonaed242f2015-03-18 09:48:21 +00004920 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
Daniel Vetter7526ed72014-09-29 15:07:19 +02004921
Mika Kuoppala59bad942015-01-16 11:34:40 +02004922 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07004923}
4924
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02004925static void gen6_enable_rps(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004926{
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02004927 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01004928 struct intel_engine_cs *ring;
Ben Widawskyd060c162014-03-19 18:31:08 -07004929 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004930 u32 gtfifodbg;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004931 int rc6_mode;
Ben Widawsky42c05262012-09-26 10:34:00 -07004932 int i, ret;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004933
Jesse Barnes4fc688c2012-11-02 11:14:01 -07004934 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02004935
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004936 /* Here begins a magic sequence of register writes to enable
4937 * auto-downclocking.
4938 *
4939 * Perhaps there might be some value in exposing these to
4940 * userspace...
4941 */
4942 I915_WRITE(GEN6_RC_STATE, 0);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004943
4944 /* Clear the DBG now so we don't confuse earlier errors */
4945 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4946 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
4947 I915_WRITE(GTFIFODBG, gtfifodbg);
4948 }
4949
Mika Kuoppala59bad942015-01-16 11:34:40 +02004950 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004951
Tom O'Rourke93ee2922014-11-19 14:21:52 -08004952 /* Initialize rps frequencies */
4953 gen6_init_rps_frequencies(dev);
Jeff McGeedd0a1aa2014-02-04 11:32:31 -06004954
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004955 /* disable the counters and set deterministic thresholds */
4956 I915_WRITE(GEN6_RC_CONTROL, 0);
4957
4958 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
4959 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
4960 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
4961 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4962 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4963
Chris Wilsonb4519512012-05-11 14:29:30 +01004964 for_each_ring(ring, dev_priv, i)
4965 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004966
4967 I915_WRITE(GEN6_RC_SLEEP, 0);
4968 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
Daniel Vetter29c78f62013-11-16 16:04:26 +01004969 if (IS_IVYBRIDGE(dev))
Stéphane Marchesin351aa562013-08-13 11:55:17 -07004970 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
4971 else
4972 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
Stéphane Marchesin0920a482013-01-29 19:41:59 -08004973 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004974 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
4975
Eugeni Dodonov5a7dc922012-07-02 11:51:05 -03004976 /* Check if we are enabling RC6 */
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004977 rc6_mode = intel_enable_rc6(dev_priv->dev);
4978 if (rc6_mode & INTEL_RC6_ENABLE)
4979 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
4980
Eugeni Dodonov5a7dc922012-07-02 11:51:05 -03004981 /* We don't use those on Haswell */
4982 if (!IS_HASWELL(dev)) {
4983 if (rc6_mode & INTEL_RC6p_ENABLE)
4984 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004985
Eugeni Dodonov5a7dc922012-07-02 11:51:05 -03004986 if (rc6_mode & INTEL_RC6pp_ENABLE)
4987 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
4988 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004989
Ben Widawskydc39fff2013-10-18 12:32:07 -07004990 intel_print_rc6_info(dev, rc6_mask);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004991
4992 I915_WRITE(GEN6_RC_CONTROL,
4993 rc6_mask |
4994 GEN6_RC_CTL_EI_MODE(1) |
4995 GEN6_RC_CTL_HW_ENABLE);
4996
Chris Wilsondd75fdc2013-09-25 17:34:57 +01004997 /* Power down if completely idle for over 50ms */
4998 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03004999 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005000
Ben Widawsky42c05262012-09-26 10:34:00 -07005001 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
Ben Widawskyd060c162014-03-19 18:31:08 -07005002 if (ret)
Ben Widawsky42c05262012-09-26 10:34:00 -07005003 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
Ben Widawskyd060c162014-03-19 18:31:08 -07005004
5005 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
5006 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
5007 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
Ben Widawskyb39fb292014-03-19 18:31:11 -07005008 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
Ben Widawskyd060c162014-03-19 18:31:08 -07005009 (pcu_mbox & 0xff) * 50);
Ben Widawskyb39fb292014-03-19 18:31:11 -07005010 dev_priv->rps.max_freq = pcu_mbox & 0xff;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005011 }
5012
Chris Wilsondd75fdc2013-09-25 17:34:57 +01005013 dev_priv->rps.power = HIGH_POWER; /* force a reset */
Chris Wilsonaed242f2015-03-18 09:48:21 +00005014 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005015
Ben Widawsky31643d52012-09-26 10:34:01 -07005016 rc6vids = 0;
5017 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5018 if (IS_GEN6(dev) && ret) {
5019 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5020 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5021 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5022 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5023 rc6vids &= 0xffff00;
5024 rc6vids |= GEN6_ENCODE_RC6_VID(450);
5025 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
5026 if (ret)
5027 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5028 }
5029
Mika Kuoppala59bad942015-01-16 11:34:40 +02005030 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005031}
5032
Imre Deakc2bc2fc2014-04-18 16:16:23 +03005033static void __gen6_update_ring_freq(struct drm_device *dev)
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005034{
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02005035 struct drm_i915_private *dev_priv = dev->dev_private;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005036 int min_freq = 15;
Chris Wilson3ebecd02013-04-12 19:10:13 +01005037 unsigned int gpu_freq;
5038 unsigned int max_ia_freq, min_ring_freq;
Akash Goel4c8c7742015-06-29 14:50:20 +05305039 unsigned int max_gpu_freq, min_gpu_freq;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005040 int scaling_factor = 180;
Ben Widawskyeda79642013-10-07 17:15:48 -03005041 struct cpufreq_policy *policy;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005042
Jesse Barnes4fc688c2012-11-02 11:14:01 -07005043 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Daniel Vetter79f5b2c2012-06-24 16:42:33 +02005044
Ben Widawskyeda79642013-10-07 17:15:48 -03005045 policy = cpufreq_cpu_get(0);
5046 if (policy) {
5047 max_ia_freq = policy->cpuinfo.max_freq;
5048 cpufreq_cpu_put(policy);
5049 } else {
5050 /*
5051 * Default to measured freq if none found, PCU will ensure we
5052 * don't go over
5053 */
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005054 max_ia_freq = tsc_khz;
Ben Widawskyeda79642013-10-07 17:15:48 -03005055 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005056
5057 /* Convert from kHz to MHz */
5058 max_ia_freq /= 1000;
5059
Ben Widawsky153b4b952013-10-22 22:05:09 -07005060 min_ring_freq = I915_READ(DCLK) & 0xf;
Ben Widawskyf6aca452013-10-02 09:25:02 -07005061 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5062 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
Chris Wilson3ebecd02013-04-12 19:10:13 +01005063
Akash Goel4c8c7742015-06-29 14:50:20 +05305064 if (IS_SKYLAKE(dev)) {
5065 /* Convert GT frequency to 50 HZ units */
5066 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5067 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5068 } else {
5069 min_gpu_freq = dev_priv->rps.min_freq;
5070 max_gpu_freq = dev_priv->rps.max_freq;
5071 }
5072
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005073 /*
5074 * For each potential GPU frequency, load a ring frequency we'd like
5075 * to use for memory access. We do this by specifying the IA frequency
5076 * the PCU should use as a reference to determine the ring frequency.
5077 */
Akash Goel4c8c7742015-06-29 14:50:20 +05305078 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5079 int diff = max_gpu_freq - gpu_freq;
Chris Wilson3ebecd02013-04-12 19:10:13 +01005080 unsigned int ia_freq = 0, ring_freq = 0;
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005081
Akash Goel4c8c7742015-06-29 14:50:20 +05305082 if (IS_SKYLAKE(dev)) {
5083 /*
5084 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5085 * No floor required for ring frequency on SKL.
5086 */
5087 ring_freq = gpu_freq;
5088 } else if (INTEL_INFO(dev)->gen >= 8) {
Ben Widawsky46c764d2013-11-02 21:07:49 -07005089 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5090 ring_freq = max(min_ring_freq, gpu_freq);
5091 } else if (IS_HASWELL(dev)) {
Ben Widawskyf6aca452013-10-02 09:25:02 -07005092 ring_freq = mult_frac(gpu_freq, 5, 4);
Chris Wilson3ebecd02013-04-12 19:10:13 +01005093 ring_freq = max(min_ring_freq, ring_freq);
5094 /* leave ia_freq as the default, chosen by cpufreq */
5095 } else {
5096 /* On older processors, there is no separate ring
5097 * clock domain, so in order to boost the bandwidth
5098 * of the ring, we need to upclock the CPU (ia_freq).
5099 *
5100 * For GPU frequencies less than 750MHz,
5101 * just use the lowest ring freq.
5102 */
5103 if (gpu_freq < min_freq)
5104 ia_freq = 800;
5105 else
5106 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5107 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5108 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005109
Ben Widawsky42c05262012-09-26 10:34:00 -07005110 sandybridge_pcode_write(dev_priv,
5111 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
Chris Wilson3ebecd02013-04-12 19:10:13 +01005112 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5113 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5114 gpu_freq);
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005115 }
Eugeni Dodonov2b4e57b2012-04-18 15:29:23 -03005116}
5117
Imre Deakc2bc2fc2014-04-18 16:16:23 +03005118void gen6_update_ring_freq(struct drm_device *dev)
5119{
5120 struct drm_i915_private *dev_priv = dev->dev_private;
5121
Akash Goel97d33082015-06-29 14:50:23 +05305122 if (!HAS_CORE_RING_FREQ(dev))
Imre Deakc2bc2fc2014-04-18 16:16:23 +03005123 return;
5124
5125 mutex_lock(&dev_priv->rps.hw_lock);
5126 __gen6_update_ring_freq(dev);
5127 mutex_unlock(&dev_priv->rps.hw_lock);
5128}
5129
Ville Syrjälä03af2042014-06-28 02:03:53 +03005130static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
Deepak S2b6b3a02014-05-27 15:59:30 +05305131{
Deepak S095acd52015-01-17 11:05:59 +05305132 struct drm_device *dev = dev_priv->dev;
Deepak S2b6b3a02014-05-27 15:59:30 +05305133 u32 val, rp0;
5134
Deepak S095acd52015-01-17 11:05:59 +05305135 if (dev->pdev->revision >= 0x20) {
5136 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
Deepak S2b6b3a02014-05-27 15:59:30 +05305137
Deepak S095acd52015-01-17 11:05:59 +05305138 switch (INTEL_INFO(dev)->eu_total) {
5139 case 8:
5140 /* (2 * 4) config */
5141 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5142 break;
5143 case 12:
5144 /* (2 * 6) config */
5145 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5146 break;
5147 case 16:
5148 /* (2 * 8) config */
5149 default:
5150 /* Setting (2 * 8) Min RP0 for any other combination */
5151 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5152 break;
5153 }
5154 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5155 } else {
5156 /* For pre-production hardware */
5157 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
5158 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
5159 PUNIT_GPU_STATUS_MAX_FREQ_MASK;
5160 }
Deepak S2b6b3a02014-05-27 15:59:30 +05305161 return rp0;
5162}
5163
5164static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5165{
5166 u32 val, rpe;
5167
5168 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5169 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5170
5171 return rpe;
5172}
5173
Deepak S7707df42014-07-12 18:46:14 +05305174static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5175{
Deepak S095acd52015-01-17 11:05:59 +05305176 struct drm_device *dev = dev_priv->dev;
Deepak S7707df42014-07-12 18:46:14 +05305177 u32 val, rp1;
5178
Deepak S095acd52015-01-17 11:05:59 +05305179 if (dev->pdev->revision >= 0x20) {
5180 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5181 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5182 } else {
5183 /* For pre-production hardware */
5184 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5185 rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
5186 PUNIT_GPU_STATUS_MAX_FREQ_MASK);
5187 }
Deepak S7707df42014-07-12 18:46:14 +05305188 return rp1;
5189}
5190
Deepak Sf8f2b002014-07-10 13:16:21 +05305191static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5192{
5193 u32 val, rp1;
5194
5195 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5196
5197 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5198
5199 return rp1;
5200}
5201
Ville Syrjälä03af2042014-06-28 02:03:53 +03005202static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
Jesse Barnes0a073b82013-04-17 15:54:58 -07005203{
5204 u32 val, rp0;
5205
Jani Nikula64936252013-05-22 15:36:20 +03005206 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005207
5208 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5209 /* Clamp to max */
5210 rp0 = min_t(u32, rp0, 0xea);
5211
5212 return rp0;
5213}
5214
5215static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5216{
5217 u32 val, rpe;
5218
Jani Nikula64936252013-05-22 15:36:20 +03005219 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005220 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
Jani Nikula64936252013-05-22 15:36:20 +03005221 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005222 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5223
5224 return rpe;
5225}
5226
Ville Syrjälä03af2042014-06-28 02:03:53 +03005227static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
Jesse Barnes0a073b82013-04-17 15:54:58 -07005228{
Jani Nikula64936252013-05-22 15:36:20 +03005229 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
Jesse Barnes0a073b82013-04-17 15:54:58 -07005230}
5231
Imre Deakae484342014-03-31 15:10:44 +03005232/* Check that the pctx buffer wasn't move under us. */
5233static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5234{
5235 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5236
5237 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5238 dev_priv->vlv_pctx->stolen->start);
5239}
5240
Deepak S38807742014-05-23 21:00:15 +05305241
5242/* Check that the pcbr address is not empty. */
5243static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5244{
5245 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5246
5247 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5248}
5249
5250static void cherryview_setup_pctx(struct drm_device *dev)
5251{
5252 struct drm_i915_private *dev_priv = dev->dev_private;
5253 unsigned long pctx_paddr, paddr;
5254 struct i915_gtt *gtt = &dev_priv->gtt;
5255 u32 pcbr;
5256 int pctx_size = 32*1024;
5257
5258 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5259
5260 pcbr = I915_READ(VLV_PCBR);
5261 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
Ville Syrjäläce611ef2014-11-07 21:33:46 +02005262 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
Deepak S38807742014-05-23 21:00:15 +05305263 paddr = (dev_priv->mm.stolen_base +
5264 (gtt->stolen_size - pctx_size));
5265
5266 pctx_paddr = (paddr & (~4095));
5267 I915_WRITE(VLV_PCBR, pctx_paddr);
5268 }
Ville Syrjäläce611ef2014-11-07 21:33:46 +02005269
5270 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
Deepak S38807742014-05-23 21:00:15 +05305271}
5272
Jesse Barnesc9cddff2013-05-08 10:45:13 -07005273static void valleyview_setup_pctx(struct drm_device *dev)
5274{
5275 struct drm_i915_private *dev_priv = dev->dev_private;
5276 struct drm_i915_gem_object *pctx;
5277 unsigned long pctx_paddr;
5278 u32 pcbr;
5279 int pctx_size = 24*1024;
5280
Imre Deak17b0c1f2014-02-11 21:39:06 +02005281 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5282
Jesse Barnesc9cddff2013-05-08 10:45:13 -07005283 pcbr = I915_READ(VLV_PCBR);
5284 if (pcbr) {
5285 /* BIOS set it up already, grab the pre-alloc'd space */
5286 int pcbr_offset;
5287
5288 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5289 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
5290 pcbr_offset,
Daniel Vetter190d6cd2013-07-04 13:06:28 +02005291 I915_GTT_OFFSET_NONE,
Jesse Barnesc9cddff2013-05-08 10:45:13 -07005292 pctx_size);
5293 goto out;
5294 }
5295
Ville Syrjäläce611ef2014-11-07 21:33:46 +02005296 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5297
Jesse Barnesc9cddff2013-05-08 10:45:13 -07005298 /*
5299 * From the Gunit register HAS:
5300 * The Gfx driver is expected to program this register and ensure
5301 * proper allocation within Gfx stolen memory. For example, this
5302 * register should be programmed such than the PCBR range does not
5303 * overlap with other ranges, such as the frame buffer, protected
5304 * memory, or any other relevant ranges.
5305 */
5306 pctx = i915_gem_object_create_stolen(dev, pctx_size);
5307 if (!pctx) {
5308 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5309 return;
5310 }
5311
5312 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5313 I915_WRITE(VLV_PCBR, pctx_paddr);
5314
5315out:
Ville Syrjäläce611ef2014-11-07 21:33:46 +02005316 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
Jesse Barnesc9cddff2013-05-08 10:45:13 -07005317 dev_priv->vlv_pctx = pctx;
5318}
5319
Imre Deakae484342014-03-31 15:10:44 +03005320static void valleyview_cleanup_pctx(struct drm_device *dev)
5321{
5322 struct drm_i915_private *dev_priv = dev->dev_private;
5323
5324 if (WARN_ON(!dev_priv->vlv_pctx))
5325 return;
5326
5327 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
5328 dev_priv->vlv_pctx = NULL;
5329}
5330
Imre Deak4e805192014-04-14 20:24:41 +03005331static void valleyview_init_gt_powersave(struct drm_device *dev)
5332{
5333 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä2bb25c12014-08-18 14:42:44 +03005334 u32 val;
Imre Deak4e805192014-04-14 20:24:41 +03005335
5336 valleyview_setup_pctx(dev);
5337
5338 mutex_lock(&dev_priv->rps.hw_lock);
5339
Ville Syrjälä2bb25c12014-08-18 14:42:44 +03005340 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5341 switch ((val >> 6) & 3) {
5342 case 0:
5343 case 1:
5344 dev_priv->mem_freq = 800;
5345 break;
5346 case 2:
5347 dev_priv->mem_freq = 1066;
5348 break;
5349 case 3:
5350 dev_priv->mem_freq = 1333;
5351 break;
5352 }
Ville Syrjälä80b83b62014-11-10 22:55:14 +02005353 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
Ville Syrjälä2bb25c12014-08-18 14:42:44 +03005354
Imre Deak4e805192014-04-14 20:24:41 +03005355 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5356 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5357 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005358 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
Imre Deak4e805192014-04-14 20:24:41 +03005359 dev_priv->rps.max_freq);
5360
5361 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5362 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005363 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
Imre Deak4e805192014-04-14 20:24:41 +03005364 dev_priv->rps.efficient_freq);
5365
Deepak Sf8f2b002014-07-10 13:16:21 +05305366 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5367 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005368 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
Deepak Sf8f2b002014-07-10 13:16:21 +05305369 dev_priv->rps.rp1_freq);
5370
Imre Deak4e805192014-04-14 20:24:41 +03005371 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5372 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005373 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
Imre Deak4e805192014-04-14 20:24:41 +03005374 dev_priv->rps.min_freq);
5375
Chris Wilsonaed242f2015-03-18 09:48:21 +00005376 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5377
Imre Deak4e805192014-04-14 20:24:41 +03005378 /* Preserve min/max settings in case of re-init */
5379 if (dev_priv->rps.max_freq_softlimit == 0)
5380 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5381
5382 if (dev_priv->rps.min_freq_softlimit == 0)
5383 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5384
5385 mutex_unlock(&dev_priv->rps.hw_lock);
5386}
5387
Deepak S38807742014-05-23 21:00:15 +05305388static void cherryview_init_gt_powersave(struct drm_device *dev)
5389{
Deepak S2b6b3a02014-05-27 15:59:30 +05305390 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä2bb25c12014-08-18 14:42:44 +03005391 u32 val;
Deepak S2b6b3a02014-05-27 15:59:30 +05305392
Deepak S38807742014-05-23 21:00:15 +05305393 cherryview_setup_pctx(dev);
Deepak S2b6b3a02014-05-27 15:59:30 +05305394
5395 mutex_lock(&dev_priv->rps.hw_lock);
5396
Ville Syrjäläa5805162015-05-26 20:42:30 +03005397 mutex_lock(&dev_priv->sb_lock);
Ville Syrjäläc6e8f392014-11-07 21:33:43 +02005398 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
Ville Syrjäläa5805162015-05-26 20:42:30 +03005399 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjäläc6e8f392014-11-07 21:33:43 +02005400
Ville Syrjälä2bb25c12014-08-18 14:42:44 +03005401 switch ((val >> 2) & 0x7) {
5402 case 0:
5403 case 1:
5404 dev_priv->rps.cz_freq = 200;
5405 dev_priv->mem_freq = 1600;
5406 break;
5407 case 2:
5408 dev_priv->rps.cz_freq = 267;
5409 dev_priv->mem_freq = 1600;
5410 break;
5411 case 3:
5412 dev_priv->rps.cz_freq = 333;
5413 dev_priv->mem_freq = 2000;
5414 break;
5415 case 4:
5416 dev_priv->rps.cz_freq = 320;
5417 dev_priv->mem_freq = 1600;
5418 break;
5419 case 5:
5420 dev_priv->rps.cz_freq = 400;
5421 dev_priv->mem_freq = 1600;
5422 break;
5423 }
Ville Syrjälä80b83b62014-11-10 22:55:14 +02005424 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
Ville Syrjälä2bb25c12014-08-18 14:42:44 +03005425
Deepak S2b6b3a02014-05-27 15:59:30 +05305426 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5427 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5428 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005429 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
Deepak S2b6b3a02014-05-27 15:59:30 +05305430 dev_priv->rps.max_freq);
5431
5432 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5433 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005434 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
Deepak S2b6b3a02014-05-27 15:59:30 +05305435 dev_priv->rps.efficient_freq);
5436
Deepak S7707df42014-07-12 18:46:14 +05305437 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5438 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005439 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
Deepak S7707df42014-07-12 18:46:14 +05305440 dev_priv->rps.rp1_freq);
5441
Deepak S5b7c91b2015-05-09 18:15:46 +05305442 /* PUnit validated range is only [RPe, RP0] */
5443 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
Deepak S2b6b3a02014-05-27 15:59:30 +05305444 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005445 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
Deepak S2b6b3a02014-05-27 15:59:30 +05305446 dev_priv->rps.min_freq);
5447
Ville Syrjälä1c147622014-08-18 14:42:43 +03005448 WARN_ONCE((dev_priv->rps.max_freq |
5449 dev_priv->rps.efficient_freq |
5450 dev_priv->rps.rp1_freq |
5451 dev_priv->rps.min_freq) & 1,
5452 "Odd GPU freq values\n");
5453
Chris Wilsonaed242f2015-03-18 09:48:21 +00005454 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5455
Deepak S2b6b3a02014-05-27 15:59:30 +05305456 /* Preserve min/max settings in case of re-init */
5457 if (dev_priv->rps.max_freq_softlimit == 0)
5458 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5459
5460 if (dev_priv->rps.min_freq_softlimit == 0)
5461 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5462
5463 mutex_unlock(&dev_priv->rps.hw_lock);
Deepak S38807742014-05-23 21:00:15 +05305464}
5465
Imre Deak4e805192014-04-14 20:24:41 +03005466static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
5467{
5468 valleyview_cleanup_pctx(dev);
5469}
5470
Deepak S38807742014-05-23 21:00:15 +05305471static void cherryview_enable_rps(struct drm_device *dev)
5472{
5473 struct drm_i915_private *dev_priv = dev->dev_private;
5474 struct intel_engine_cs *ring;
Deepak S2b6b3a02014-05-27 15:59:30 +05305475 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
Deepak S38807742014-05-23 21:00:15 +05305476 int i;
5477
5478 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5479
5480 gtfifodbg = I915_READ(GTFIFODBG);
5481 if (gtfifodbg) {
5482 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5483 gtfifodbg);
5484 I915_WRITE(GTFIFODBG, gtfifodbg);
5485 }
5486
5487 cherryview_check_pctx(dev_priv);
5488
5489 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5490 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
Mika Kuoppala59bad942015-01-16 11:34:40 +02005491 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Deepak S38807742014-05-23 21:00:15 +05305492
Ville Syrjälä160614a2015-01-19 13:50:47 +02005493 /* Disable RC states. */
5494 I915_WRITE(GEN6_RC_CONTROL, 0);
5495
Deepak S38807742014-05-23 21:00:15 +05305496 /* 2a: Program RC6 thresholds.*/
5497 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5498 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5499 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5500
5501 for_each_ring(ring, dev_priv, i)
5502 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5503 I915_WRITE(GEN6_RC_SLEEP, 0);
5504
Deepak Sf4f71c72015-03-28 15:23:35 +05305505 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5506 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
Deepak S38807742014-05-23 21:00:15 +05305507
5508 /* allows RC6 residency counter to work */
5509 I915_WRITE(VLV_COUNTER_CONTROL,
5510 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5511 VLV_MEDIA_RC6_COUNT_EN |
5512 VLV_RENDER_RC6_COUNT_EN));
5513
5514 /* For now we assume BIOS is allocating and populating the PCBR */
5515 pcbr = I915_READ(VLV_PCBR);
5516
Deepak S38807742014-05-23 21:00:15 +05305517 /* 3: Enable RC6 */
5518 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
5519 (pcbr >> VLV_PCBR_ADDR_SHIFT))
Ville Syrjäläaf5a75a2015-01-19 13:50:50 +02005520 rc6_mode = GEN7_RC_CTL_TO_MODE;
Deepak S38807742014-05-23 21:00:15 +05305521
5522 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5523
Deepak S2b6b3a02014-05-27 15:59:30 +05305524 /* 4 Program defaults and thresholds for RPS*/
Ville Syrjälä3cbdb482015-01-19 13:50:49 +02005525 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
Deepak S2b6b3a02014-05-27 15:59:30 +05305526 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5527 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5528 I915_WRITE(GEN6_RP_UP_EI, 66000);
5529 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5530
5531 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5532
5533 /* 5: Enable RPS */
5534 I915_WRITE(GEN6_RP_CONTROL,
5535 GEN6_RP_MEDIA_HW_NORMAL_MODE |
Ville Syrjäläeb973a52015-01-21 19:37:59 +02005536 GEN6_RP_MEDIA_IS_GFX |
Deepak S2b6b3a02014-05-27 15:59:30 +05305537 GEN6_RP_ENABLE |
5538 GEN6_RP_UP_BUSY_AVG |
5539 GEN6_RP_DOWN_IDLE_AVG);
5540
Deepak S3ef62342015-04-29 08:36:24 +05305541 /* Setting Fixed Bias */
5542 val = VLV_OVERRIDE_EN |
5543 VLV_SOC_TDP_EN |
5544 CHV_BIAS_CPU_50_SOC_50;
5545 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5546
Deepak S2b6b3a02014-05-27 15:59:30 +05305547 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5548
Ville Syrjälä8d40c3a2014-11-07 21:33:45 +02005549 /* RPS code assumes GPLL is used */
5550 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5551
Ville Syrjäläc8e96272014-11-07 21:33:44 +02005552 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
Deepak S2b6b3a02014-05-27 15:59:30 +05305553 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5554
5555 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5556 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005557 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
Deepak S2b6b3a02014-05-27 15:59:30 +05305558 dev_priv->rps.cur_freq);
5559
5560 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005561 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
Deepak S2b6b3a02014-05-27 15:59:30 +05305562 dev_priv->rps.efficient_freq);
5563
5564 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5565
Mika Kuoppala59bad942015-01-16 11:34:40 +02005566 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Deepak S38807742014-05-23 21:00:15 +05305567}
5568
Jesse Barnes0a073b82013-04-17 15:54:58 -07005569static void valleyview_enable_rps(struct drm_device *dev)
5570{
5571 struct drm_i915_private *dev_priv = dev->dev_private;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01005572 struct intel_engine_cs *ring;
Ben Widawsky2a5913a2014-03-19 18:31:13 -07005573 u32 gtfifodbg, val, rc6_mode = 0;
Jesse Barnes0a073b82013-04-17 15:54:58 -07005574 int i;
5575
5576 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5577
Imre Deakae484342014-03-31 15:10:44 +03005578 valleyview_check_pctx(dev_priv);
5579
Jesse Barnes0a073b82013-04-17 15:54:58 -07005580 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
Jesse Barnesf7d85c12013-09-27 10:40:54 -07005581 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5582 gtfifodbg);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005583 I915_WRITE(GTFIFODBG, gtfifodbg);
5584 }
5585
Deepak Sc8d9a592013-11-23 14:55:42 +05305586 /* If VLV, Forcewake all wells, else re-direct to regular path */
Mika Kuoppala59bad942015-01-16 11:34:40 +02005587 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005588
Ville Syrjälä160614a2015-01-19 13:50:47 +02005589 /* Disable RC states. */
5590 I915_WRITE(GEN6_RC_CONTROL, 0);
5591
Ville Syrjäläcad725f2015-01-19 13:50:48 +02005592 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005593 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5594 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5595 I915_WRITE(GEN6_RP_UP_EI, 66000);
5596 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5597
5598 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5599
5600 I915_WRITE(GEN6_RP_CONTROL,
5601 GEN6_RP_MEDIA_TURBO |
5602 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5603 GEN6_RP_MEDIA_IS_GFX |
5604 GEN6_RP_ENABLE |
5605 GEN6_RP_UP_BUSY_AVG |
5606 GEN6_RP_DOWN_IDLE_CONT);
5607
5608 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
5609 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5610 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5611
5612 for_each_ring(ring, dev_priv, i)
5613 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5614
Jesse Barnes2f0aa302013-11-15 09:32:11 -08005615 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005616
5617 /* allows RC6 residency counter to work */
Jesse Barnes49798eb2013-09-26 17:55:57 -07005618 I915_WRITE(VLV_COUNTER_CONTROL,
Deepak S31685c22014-07-03 17:33:01 -04005619 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
5620 VLV_RENDER_RC0_COUNT_EN |
Jesse Barnes49798eb2013-09-26 17:55:57 -07005621 VLV_MEDIA_RC6_COUNT_EN |
5622 VLV_RENDER_RC6_COUNT_EN));
Deepak S31685c22014-07-03 17:33:01 -04005623
Jesse Barnesa2b23fe2013-09-19 09:33:13 -07005624 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
Jesse Barnes6b88f292013-11-15 09:32:12 -08005625 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
Ben Widawskydc39fff2013-10-18 12:32:07 -07005626
5627 intel_print_rc6_info(dev, rc6_mode);
5628
Jesse Barnesa2b23fe2013-09-19 09:33:13 -07005629 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005630
Deepak S3ef62342015-04-29 08:36:24 +05305631 /* Setting Fixed Bias */
5632 val = VLV_OVERRIDE_EN |
5633 VLV_SOC_TDP_EN |
5634 VLV_BIAS_CPU_125_SOC_875;
5635 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5636
Jani Nikula64936252013-05-22 15:36:20 +03005637 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005638
Ville Syrjälä8d40c3a2014-11-07 21:33:45 +02005639 /* RPS code assumes GPLL is used */
5640 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5641
Ville Syrjäläc8e96272014-11-07 21:33:44 +02005642 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
Jesse Barnes0a073b82013-04-17 15:54:58 -07005643 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5644
Ben Widawskyb39fb292014-03-19 18:31:11 -07005645 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
Ville Syrjälä73008b92013-06-25 19:21:01 +03005646 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005647 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
Ben Widawskyb39fb292014-03-19 18:31:11 -07005648 dev_priv->rps.cur_freq);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005649
Ville Syrjälä73008b92013-06-25 19:21:01 +03005650 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02005651 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
Ben Widawskyb39fb292014-03-19 18:31:11 -07005652 dev_priv->rps.efficient_freq);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005653
Ben Widawskyb39fb292014-03-19 18:31:11 -07005654 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005655
Mika Kuoppala59bad942015-01-16 11:34:40 +02005656 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Jesse Barnes0a073b82013-04-17 15:54:58 -07005657}
5658
Eugeni Dodonovdde18882012-04-18 15:29:24 -03005659static unsigned long intel_pxfreq(u32 vidfreq)
5660{
5661 unsigned long freq;
5662 int div = (vidfreq & 0x3f0000) >> 16;
5663 int post = (vidfreq & 0x3000) >> 12;
5664 int pre = (vidfreq & 0x7);
5665
5666 if (!pre)
5667 return 0;
5668
5669 freq = ((div * 133333) / ((1<<post) * pre));
5670
5671 return freq;
5672}
5673
Daniel Vettereb48eb02012-04-26 23:28:12 +02005674static const struct cparams {
5675 u16 i;
5676 u16 t;
5677 u16 m;
5678 u16 c;
5679} cparams[] = {
5680 { 1, 1333, 301, 28664 },
5681 { 1, 1066, 294, 24460 },
5682 { 1, 800, 294, 25192 },
5683 { 0, 1333, 276, 27605 },
5684 { 0, 1066, 276, 27605 },
5685 { 0, 800, 231, 23784 },
5686};
5687
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005688static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
Daniel Vettereb48eb02012-04-26 23:28:12 +02005689{
5690 u64 total_count, diff, ret;
5691 u32 count1, count2, count3, m = 0, c = 0;
5692 unsigned long now = jiffies_to_msecs(jiffies), diff1;
5693 int i;
5694
Daniel Vetter02d71952012-08-09 16:44:54 +02005695 assert_spin_locked(&mchdev_lock);
5696
Daniel Vetter20e4d402012-08-08 23:35:39 +02005697 diff1 = now - dev_priv->ips.last_time1;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005698
5699 /* Prevent division-by-zero if we are asking too fast.
5700 * Also, we don't get interesting results if we are polling
5701 * faster than once in 10ms, so just return the saved value
5702 * in such cases.
5703 */
5704 if (diff1 <= 10)
Daniel Vetter20e4d402012-08-08 23:35:39 +02005705 return dev_priv->ips.chipset_power;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005706
5707 count1 = I915_READ(DMIEC);
5708 count2 = I915_READ(DDREC);
5709 count3 = I915_READ(CSIEC);
5710
5711 total_count = count1 + count2 + count3;
5712
5713 /* FIXME: handle per-counter overflow */
Daniel Vetter20e4d402012-08-08 23:35:39 +02005714 if (total_count < dev_priv->ips.last_count1) {
5715 diff = ~0UL - dev_priv->ips.last_count1;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005716 diff += total_count;
5717 } else {
Daniel Vetter20e4d402012-08-08 23:35:39 +02005718 diff = total_count - dev_priv->ips.last_count1;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005719 }
5720
5721 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
Daniel Vetter20e4d402012-08-08 23:35:39 +02005722 if (cparams[i].i == dev_priv->ips.c_m &&
5723 cparams[i].t == dev_priv->ips.r_t) {
Daniel Vettereb48eb02012-04-26 23:28:12 +02005724 m = cparams[i].m;
5725 c = cparams[i].c;
5726 break;
5727 }
5728 }
5729
5730 diff = div_u64(diff, diff1);
5731 ret = ((m * diff) + c);
5732 ret = div_u64(ret, 10);
5733
Daniel Vetter20e4d402012-08-08 23:35:39 +02005734 dev_priv->ips.last_count1 = total_count;
5735 dev_priv->ips.last_time1 = now;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005736
Daniel Vetter20e4d402012-08-08 23:35:39 +02005737 dev_priv->ips.chipset_power = ret;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005738
5739 return ret;
5740}
5741
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005742unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5743{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00005744 struct drm_device *dev = dev_priv->dev;
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005745 unsigned long val;
5746
Damien Lespiau3d13ef22014-02-07 19:12:47 +00005747 if (INTEL_INFO(dev)->gen != 5)
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005748 return 0;
5749
5750 spin_lock_irq(&mchdev_lock);
5751
5752 val = __i915_chipset_val(dev_priv);
5753
5754 spin_unlock_irq(&mchdev_lock);
5755
5756 return val;
5757}
5758
Daniel Vettereb48eb02012-04-26 23:28:12 +02005759unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
5760{
5761 unsigned long m, x, b;
5762 u32 tsfs;
5763
5764 tsfs = I915_READ(TSFS);
5765
5766 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
5767 x = I915_READ8(TR1);
5768
5769 b = tsfs & TSFS_INTR_MASK;
5770
5771 return ((m * x) / 127) - b;
5772}
5773
Mika Kuoppalad972d6e2014-12-01 18:01:05 +02005774static int _pxvid_to_vd(u8 pxvid)
5775{
5776 if (pxvid == 0)
5777 return 0;
5778
5779 if (pxvid >= 8 && pxvid < 31)
5780 pxvid = 31;
5781
5782 return (pxvid + 2) * 125;
5783}
5784
5785static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
Daniel Vettereb48eb02012-04-26 23:28:12 +02005786{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00005787 struct drm_device *dev = dev_priv->dev;
Mika Kuoppalad972d6e2014-12-01 18:01:05 +02005788 const int vd = _pxvid_to_vd(pxvid);
5789 const int vm = vd - 1125;
5790
Damien Lespiau3d13ef22014-02-07 19:12:47 +00005791 if (INTEL_INFO(dev)->is_mobile)
Mika Kuoppalad972d6e2014-12-01 18:01:05 +02005792 return vm > 0 ? vm : 0;
5793
5794 return vd;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005795}
5796
Daniel Vetter02d71952012-08-09 16:44:54 +02005797static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
Daniel Vettereb48eb02012-04-26 23:28:12 +02005798{
Thomas Gleixner5ed0bdf2014-07-16 21:05:06 +00005799 u64 now, diff, diffms;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005800 u32 count;
5801
Daniel Vetter02d71952012-08-09 16:44:54 +02005802 assert_spin_locked(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005803
Thomas Gleixner5ed0bdf2014-07-16 21:05:06 +00005804 now = ktime_get_raw_ns();
5805 diffms = now - dev_priv->ips.last_time2;
5806 do_div(diffms, NSEC_PER_MSEC);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005807
5808 /* Don't divide by 0 */
Daniel Vettereb48eb02012-04-26 23:28:12 +02005809 if (!diffms)
5810 return;
5811
5812 count = I915_READ(GFXEC);
5813
Daniel Vetter20e4d402012-08-08 23:35:39 +02005814 if (count < dev_priv->ips.last_count2) {
5815 diff = ~0UL - dev_priv->ips.last_count2;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005816 diff += count;
5817 } else {
Daniel Vetter20e4d402012-08-08 23:35:39 +02005818 diff = count - dev_priv->ips.last_count2;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005819 }
5820
Daniel Vetter20e4d402012-08-08 23:35:39 +02005821 dev_priv->ips.last_count2 = count;
5822 dev_priv->ips.last_time2 = now;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005823
5824 /* More magic constants... */
5825 diff = diff * 1181;
5826 diff = div_u64(diff, diffms * 10);
Daniel Vetter20e4d402012-08-08 23:35:39 +02005827 dev_priv->ips.gfx_power = diff;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005828}
5829
Daniel Vetter02d71952012-08-09 16:44:54 +02005830void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5831{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00005832 struct drm_device *dev = dev_priv->dev;
5833
5834 if (INTEL_INFO(dev)->gen != 5)
Daniel Vetter02d71952012-08-09 16:44:54 +02005835 return;
5836
Daniel Vetter92703882012-08-09 16:46:01 +02005837 spin_lock_irq(&mchdev_lock);
Daniel Vetter02d71952012-08-09 16:44:54 +02005838
5839 __i915_update_gfx_val(dev_priv);
5840
Daniel Vetter92703882012-08-09 16:46:01 +02005841 spin_unlock_irq(&mchdev_lock);
Daniel Vetter02d71952012-08-09 16:44:54 +02005842}
5843
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005844static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
Daniel Vettereb48eb02012-04-26 23:28:12 +02005845{
5846 unsigned long t, corr, state1, corr2, state2;
5847 u32 pxvid, ext_v;
5848
Daniel Vetter02d71952012-08-09 16:44:54 +02005849 assert_spin_locked(&mchdev_lock);
5850
Ben Widawskyb39fb292014-03-19 18:31:11 -07005851 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
Daniel Vettereb48eb02012-04-26 23:28:12 +02005852 pxvid = (pxvid >> 24) & 0x7f;
5853 ext_v = pvid_to_extvid(dev_priv, pxvid);
5854
5855 state1 = ext_v;
5856
5857 t = i915_mch_val(dev_priv);
5858
5859 /* Revel in the empirically derived constants */
5860
5861 /* Correction factor in 1/100000 units */
5862 if (t > 80)
5863 corr = ((t * 2349) + 135940);
5864 else if (t >= 50)
5865 corr = ((t * 964) + 29317);
5866 else /* < 50 */
5867 corr = ((t * 301) + 1004);
5868
5869 corr = corr * ((150142 * state1) / 10000 - 78642);
5870 corr /= 100000;
Daniel Vetter20e4d402012-08-08 23:35:39 +02005871 corr2 = (corr * dev_priv->ips.corr);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005872
5873 state2 = (corr2 * state1) / 10000;
5874 state2 /= 100; /* convert to mW */
5875
Daniel Vetter02d71952012-08-09 16:44:54 +02005876 __i915_update_gfx_val(dev_priv);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005877
Daniel Vetter20e4d402012-08-08 23:35:39 +02005878 return dev_priv->ips.gfx_power + state2;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005879}
5880
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005881unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5882{
Damien Lespiau3d13ef22014-02-07 19:12:47 +00005883 struct drm_device *dev = dev_priv->dev;
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005884 unsigned long val;
5885
Damien Lespiau3d13ef22014-02-07 19:12:47 +00005886 if (INTEL_INFO(dev)->gen != 5)
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005887 return 0;
5888
5889 spin_lock_irq(&mchdev_lock);
5890
5891 val = __i915_gfx_val(dev_priv);
5892
5893 spin_unlock_irq(&mchdev_lock);
5894
5895 return val;
5896}
5897
Daniel Vettereb48eb02012-04-26 23:28:12 +02005898/**
5899 * i915_read_mch_val - return value for IPS use
5900 *
5901 * Calculate and return a value for the IPS driver to use when deciding whether
5902 * we have thermal and power headroom to increase CPU or GPU power budget.
5903 */
5904unsigned long i915_read_mch_val(void)
5905{
5906 struct drm_i915_private *dev_priv;
5907 unsigned long chipset_val, graphics_val, ret = 0;
5908
Daniel Vetter92703882012-08-09 16:46:01 +02005909 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005910 if (!i915_mch_dev)
5911 goto out_unlock;
5912 dev_priv = i915_mch_dev;
5913
Chris Wilsonf531dcb2012-09-25 10:16:12 +01005914 chipset_val = __i915_chipset_val(dev_priv);
5915 graphics_val = __i915_gfx_val(dev_priv);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005916
5917 ret = chipset_val + graphics_val;
5918
5919out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02005920 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005921
5922 return ret;
5923}
5924EXPORT_SYMBOL_GPL(i915_read_mch_val);
5925
5926/**
5927 * i915_gpu_raise - raise GPU frequency limit
5928 *
5929 * Raise the limit; IPS indicates we have thermal headroom.
5930 */
5931bool i915_gpu_raise(void)
5932{
5933 struct drm_i915_private *dev_priv;
5934 bool ret = true;
5935
Daniel Vetter92703882012-08-09 16:46:01 +02005936 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005937 if (!i915_mch_dev) {
5938 ret = false;
5939 goto out_unlock;
5940 }
5941 dev_priv = i915_mch_dev;
5942
Daniel Vetter20e4d402012-08-08 23:35:39 +02005943 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
5944 dev_priv->ips.max_delay--;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005945
5946out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02005947 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005948
5949 return ret;
5950}
5951EXPORT_SYMBOL_GPL(i915_gpu_raise);
5952
5953/**
5954 * i915_gpu_lower - lower GPU frequency limit
5955 *
5956 * IPS indicates we're close to a thermal limit, so throttle back the GPU
5957 * frequency maximum.
5958 */
5959bool i915_gpu_lower(void)
5960{
5961 struct drm_i915_private *dev_priv;
5962 bool ret = true;
5963
Daniel Vetter92703882012-08-09 16:46:01 +02005964 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005965 if (!i915_mch_dev) {
5966 ret = false;
5967 goto out_unlock;
5968 }
5969 dev_priv = i915_mch_dev;
5970
Daniel Vetter20e4d402012-08-08 23:35:39 +02005971 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
5972 dev_priv->ips.max_delay++;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005973
5974out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02005975 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005976
5977 return ret;
5978}
5979EXPORT_SYMBOL_GPL(i915_gpu_lower);
5980
5981/**
5982 * i915_gpu_busy - indicate GPU business to IPS
5983 *
5984 * Tell the IPS driver whether or not the GPU is busy.
5985 */
5986bool i915_gpu_busy(void)
5987{
5988 struct drm_i915_private *dev_priv;
Oscar Mateoa4872ba2014-05-22 14:13:33 +01005989 struct intel_engine_cs *ring;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005990 bool ret = false;
Chris Wilsonf047e392012-07-21 12:31:41 +01005991 int i;
Daniel Vettereb48eb02012-04-26 23:28:12 +02005992
Daniel Vetter92703882012-08-09 16:46:01 +02005993 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02005994 if (!i915_mch_dev)
5995 goto out_unlock;
5996 dev_priv = i915_mch_dev;
5997
Chris Wilsonf047e392012-07-21 12:31:41 +01005998 for_each_ring(ring, dev_priv, i)
5999 ret |= !list_empty(&ring->request_list);
Daniel Vettereb48eb02012-04-26 23:28:12 +02006000
6001out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02006002 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02006003
6004 return ret;
6005}
6006EXPORT_SYMBOL_GPL(i915_gpu_busy);
6007
6008/**
6009 * i915_gpu_turbo_disable - disable graphics turbo
6010 *
6011 * Disable graphics turbo by resetting the max frequency and setting the
6012 * current frequency to the default.
6013 */
6014bool i915_gpu_turbo_disable(void)
6015{
6016 struct drm_i915_private *dev_priv;
6017 bool ret = true;
6018
Daniel Vetter92703882012-08-09 16:46:01 +02006019 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02006020 if (!i915_mch_dev) {
6021 ret = false;
6022 goto out_unlock;
6023 }
6024 dev_priv = i915_mch_dev;
6025
Daniel Vetter20e4d402012-08-08 23:35:39 +02006026 dev_priv->ips.max_delay = dev_priv->ips.fstart;
Daniel Vettereb48eb02012-04-26 23:28:12 +02006027
Daniel Vetter20e4d402012-08-08 23:35:39 +02006028 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
Daniel Vettereb48eb02012-04-26 23:28:12 +02006029 ret = false;
6030
6031out_unlock:
Daniel Vetter92703882012-08-09 16:46:01 +02006032 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02006033
6034 return ret;
6035}
6036EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6037
6038/**
6039 * Tells the intel_ips driver that the i915 driver is now loaded, if
6040 * IPS got loaded first.
6041 *
6042 * This awkward dance is so that neither module has to depend on the
6043 * other in order for IPS to do the appropriate communication of
6044 * GPU turbo limits to i915.
6045 */
6046static void
6047ips_ping_for_i915_load(void)
6048{
6049 void (*link)(void);
6050
6051 link = symbol_get(ips_link_to_i915_driver);
6052 if (link) {
6053 link();
6054 symbol_put(ips_link_to_i915_driver);
6055 }
6056}
6057
6058void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6059{
Daniel Vetter02d71952012-08-09 16:44:54 +02006060 /* We only register the i915 ips part with intel-ips once everything is
6061 * set up, to avoid intel-ips sneaking in and reading bogus values. */
Daniel Vetter92703882012-08-09 16:46:01 +02006062 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02006063 i915_mch_dev = dev_priv;
Daniel Vetter92703882012-08-09 16:46:01 +02006064 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02006065
6066 ips_ping_for_i915_load();
6067}
6068
6069void intel_gpu_ips_teardown(void)
6070{
Daniel Vetter92703882012-08-09 16:46:01 +02006071 spin_lock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02006072 i915_mch_dev = NULL;
Daniel Vetter92703882012-08-09 16:46:01 +02006073 spin_unlock_irq(&mchdev_lock);
Daniel Vettereb48eb02012-04-26 23:28:12 +02006074}
Deepak S76c3552f2014-01-30 23:08:16 +05306075
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006076static void intel_init_emon(struct drm_device *dev)
Eugeni Dodonovdde18882012-04-18 15:29:24 -03006077{
6078 struct drm_i915_private *dev_priv = dev->dev_private;
6079 u32 lcfuse;
6080 u8 pxw[16];
6081 int i;
6082
6083 /* Disable to program */
6084 I915_WRITE(ECR, 0);
6085 POSTING_READ(ECR);
6086
6087 /* Program energy weights for various events */
6088 I915_WRITE(SDEW, 0x15040d00);
6089 I915_WRITE(CSIEW0, 0x007f0000);
6090 I915_WRITE(CSIEW1, 0x1e220004);
6091 I915_WRITE(CSIEW2, 0x04000004);
6092
6093 for (i = 0; i < 5; i++)
6094 I915_WRITE(PEW + (i * 4), 0);
6095 for (i = 0; i < 3; i++)
6096 I915_WRITE(DEW + (i * 4), 0);
6097
6098 /* Program P-state weights to account for frequency power adjustment */
6099 for (i = 0; i < 16; i++) {
6100 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
6101 unsigned long freq = intel_pxfreq(pxvidfreq);
6102 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6103 PXVFREQ_PX_SHIFT;
6104 unsigned long val;
6105
6106 val = vid * vid;
6107 val *= (freq / 1000);
6108 val *= 255;
6109 val /= (127*127*900);
6110 if (val > 0xff)
6111 DRM_ERROR("bad pxval: %ld\n", val);
6112 pxw[i] = val;
6113 }
6114 /* Render standby states get 0 weight */
6115 pxw[14] = 0;
6116 pxw[15] = 0;
6117
6118 for (i = 0; i < 4; i++) {
6119 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6120 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6121 I915_WRITE(PXW + (i * 4), val);
6122 }
6123
6124 /* Adjust magic regs to magic values (more experimental results) */
6125 I915_WRITE(OGW0, 0);
6126 I915_WRITE(OGW1, 0);
6127 I915_WRITE(EG0, 0x00007f00);
6128 I915_WRITE(EG1, 0x0000000e);
6129 I915_WRITE(EG2, 0x000e0000);
6130 I915_WRITE(EG3, 0x68000300);
6131 I915_WRITE(EG4, 0x42000000);
6132 I915_WRITE(EG5, 0x00140031);
6133 I915_WRITE(EG6, 0);
6134 I915_WRITE(EG7, 0);
6135
6136 for (i = 0; i < 8; i++)
6137 I915_WRITE(PXWL + (i * 4), 0);
6138
6139 /* Enable PMON + select events */
6140 I915_WRITE(ECR, 0x80000019);
6141
6142 lcfuse = I915_READ(LCFUSE02);
6143
Daniel Vetter20e4d402012-08-08 23:35:39 +02006144 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
Eugeni Dodonovdde18882012-04-18 15:29:24 -03006145}
6146
Imre Deakae484342014-03-31 15:10:44 +03006147void intel_init_gt_powersave(struct drm_device *dev)
6148{
Imre Deake6069ca2014-04-18 16:01:02 +03006149 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
6150
Deepak S38807742014-05-23 21:00:15 +05306151 if (IS_CHERRYVIEW(dev))
6152 cherryview_init_gt_powersave(dev);
6153 else if (IS_VALLEYVIEW(dev))
Imre Deak4e805192014-04-14 20:24:41 +03006154 valleyview_init_gt_powersave(dev);
Imre Deakae484342014-03-31 15:10:44 +03006155}
6156
6157void intel_cleanup_gt_powersave(struct drm_device *dev)
6158{
Deepak S38807742014-05-23 21:00:15 +05306159 if (IS_CHERRYVIEW(dev))
6160 return;
6161 else if (IS_VALLEYVIEW(dev))
Imre Deak4e805192014-04-14 20:24:41 +03006162 valleyview_cleanup_gt_powersave(dev);
Imre Deakae484342014-03-31 15:10:44 +03006163}
6164
Imre Deakdbea3ce2014-12-15 18:59:28 +02006165static void gen6_suspend_rps(struct drm_device *dev)
6166{
6167 struct drm_i915_private *dev_priv = dev->dev_private;
6168
6169 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6170
Akash Goel4c2a8892015-03-06 11:07:24 +05306171 gen6_disable_rps_interrupts(dev);
Imre Deakdbea3ce2014-12-15 18:59:28 +02006172}
6173
Jesse Barnes156c7ca2014-06-12 08:35:45 -07006174/**
6175 * intel_suspend_gt_powersave - suspend PM work and helper threads
6176 * @dev: drm device
6177 *
6178 * We don't want to disable RC6 or other features here, we just want
6179 * to make sure any work we've queued has finished and won't bother
6180 * us while we're suspended.
6181 */
6182void intel_suspend_gt_powersave(struct drm_device *dev)
6183{
6184 struct drm_i915_private *dev_priv = dev->dev_private;
6185
Imre Deakd4d70aa2014-11-19 15:30:04 +02006186 if (INTEL_INFO(dev)->gen < 6)
6187 return;
6188
Imre Deakdbea3ce2014-12-15 18:59:28 +02006189 gen6_suspend_rps(dev);
Deepak Sb47adc12014-06-20 20:03:02 +05306190
6191 /* Force GPU to min freq during suspend */
6192 gen6_rps_idle(dev_priv);
Jesse Barnes156c7ca2014-06-12 08:35:45 -07006193}
6194
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006195void intel_disable_gt_powersave(struct drm_device *dev)
6196{
Jesse Barnes1a01ab32012-11-02 11:14:00 -07006197 struct drm_i915_private *dev_priv = dev->dev_private;
6198
Daniel Vetter930ebb42012-06-29 23:32:16 +02006199 if (IS_IRONLAKE_M(dev)) {
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006200 ironlake_disable_drps(dev);
Deepak S38807742014-05-23 21:00:15 +05306201 } else if (INTEL_INFO(dev)->gen >= 6) {
Daniel Vetter10d8d362014-06-12 17:48:52 +02006202 intel_suspend_gt_powersave(dev);
Imre Deake4948372014-05-12 18:35:04 +03006203
Jesse Barnes4fc688c2012-11-02 11:14:01 -07006204 mutex_lock(&dev_priv->rps.hw_lock);
Zhe Wang20e49362014-11-04 17:07:05 +00006205 if (INTEL_INFO(dev)->gen >= 9)
6206 gen9_disable_rps(dev);
6207 else if (IS_CHERRYVIEW(dev))
Deepak S38807742014-05-23 21:00:15 +05306208 cherryview_disable_rps(dev);
6209 else if (IS_VALLEYVIEW(dev))
Jesse Barnesd20d4f02013-04-23 10:09:28 -07006210 valleyview_disable_rps(dev);
6211 else
6212 gen6_disable_rps(dev);
Imre Deake5347702014-11-19 15:30:02 +02006213
Chris Wilsonc0951f02013-10-10 21:58:50 +01006214 dev_priv->rps.enabled = false;
Jesse Barnes4fc688c2012-11-02 11:14:01 -07006215 mutex_unlock(&dev_priv->rps.hw_lock);
Daniel Vetter930ebb42012-06-29 23:32:16 +02006216 }
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006217}
6218
Jesse Barnes1a01ab32012-11-02 11:14:00 -07006219static void intel_gen6_powersave_work(struct work_struct *work)
6220{
6221 struct drm_i915_private *dev_priv =
6222 container_of(work, struct drm_i915_private,
6223 rps.delayed_resume_work.work);
6224 struct drm_device *dev = dev_priv->dev;
6225
Jesse Barnes4fc688c2012-11-02 11:14:01 -07006226 mutex_lock(&dev_priv->rps.hw_lock);
Jesse Barnes0a073b82013-04-17 15:54:58 -07006227
Akash Goel4c2a8892015-03-06 11:07:24 +05306228 gen6_reset_rps_interrupts(dev);
Imre Deak3cc134e2014-11-19 15:30:03 +02006229
Deepak S38807742014-05-23 21:00:15 +05306230 if (IS_CHERRYVIEW(dev)) {
6231 cherryview_enable_rps(dev);
6232 } else if (IS_VALLEYVIEW(dev)) {
Jesse Barnes0a073b82013-04-17 15:54:58 -07006233 valleyview_enable_rps(dev);
Zhe Wang20e49362014-11-04 17:07:05 +00006234 } else if (INTEL_INFO(dev)->gen >= 9) {
Jesse Barnesb6fef0e2015-01-16 18:07:25 +00006235 gen9_enable_rc6(dev);
Zhe Wang20e49362014-11-04 17:07:05 +00006236 gen9_enable_rps(dev);
Akash Goelcc017fb42015-06-29 14:50:21 +05306237 if (IS_SKYLAKE(dev))
6238 __gen6_update_ring_freq(dev);
Ben Widawsky6edee7f2013-11-02 21:07:52 -07006239 } else if (IS_BROADWELL(dev)) {
6240 gen8_enable_rps(dev);
Imre Deakc2bc2fc2014-04-18 16:16:23 +03006241 __gen6_update_ring_freq(dev);
Jesse Barnes0a073b82013-04-17 15:54:58 -07006242 } else {
6243 gen6_enable_rps(dev);
Imre Deakc2bc2fc2014-04-18 16:16:23 +03006244 __gen6_update_ring_freq(dev);
Jesse Barnes0a073b82013-04-17 15:54:58 -07006245 }
Chris Wilsonaed242f2015-03-18 09:48:21 +00006246
6247 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6248 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6249
6250 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6251 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6252
Chris Wilsonc0951f02013-10-10 21:58:50 +01006253 dev_priv->rps.enabled = true;
Imre Deak3cc134e2014-11-19 15:30:03 +02006254
Akash Goel4c2a8892015-03-06 11:07:24 +05306255 gen6_enable_rps_interrupts(dev);
Imre Deak3cc134e2014-11-19 15:30:03 +02006256
Jesse Barnes4fc688c2012-11-02 11:14:01 -07006257 mutex_unlock(&dev_priv->rps.hw_lock);
Imre Deakc6df39b2014-04-14 20:24:29 +03006258
6259 intel_runtime_pm_put(dev_priv);
Jesse Barnes1a01ab32012-11-02 11:14:00 -07006260}
6261
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006262void intel_enable_gt_powersave(struct drm_device *dev)
6263{
Jesse Barnes1a01ab32012-11-02 11:14:00 -07006264 struct drm_i915_private *dev_priv = dev->dev_private;
6265
Yu Zhangf61018b2015-02-10 19:05:52 +08006266 /* Powersaving is controlled by the host when inside a VM */
6267 if (intel_vgpu_active(dev))
6268 return;
6269
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006270 if (IS_IRONLAKE_M(dev)) {
Imre Deakdc1d0132014-04-14 20:24:28 +03006271 mutex_lock(&dev->struct_mutex);
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006272 ironlake_enable_drps(dev);
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006273 intel_init_emon(dev);
Imre Deakdc1d0132014-04-14 20:24:28 +03006274 mutex_unlock(&dev->struct_mutex);
Deepak S38807742014-05-23 21:00:15 +05306275 } else if (INTEL_INFO(dev)->gen >= 6) {
Jesse Barnes1a01ab32012-11-02 11:14:00 -07006276 /*
6277 * PCU communication is slow and this doesn't need to be
6278 * done at any specific time, so do this out of our fast path
6279 * to make resume and init faster.
Imre Deakc6df39b2014-04-14 20:24:29 +03006280 *
6281 * We depend on the HW RC6 power context save/restore
6282 * mechanism when entering D3 through runtime PM suspend. So
6283 * disable RPM until RPS/RC6 is properly setup. We can only
6284 * get here via the driver load/system resume/runtime resume
6285 * paths, so the _noresume version is enough (and in case of
6286 * runtime resume it's necessary).
Jesse Barnes1a01ab32012-11-02 11:14:00 -07006287 */
Imre Deakc6df39b2014-04-14 20:24:29 +03006288 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
6289 round_jiffies_up_relative(HZ)))
6290 intel_runtime_pm_get_noresume(dev_priv);
Daniel Vetter8090c6b2012-06-24 16:42:32 +02006291 }
6292}
6293
Imre Deakc6df39b2014-04-14 20:24:29 +03006294void intel_reset_gt_powersave(struct drm_device *dev)
6295{
6296 struct drm_i915_private *dev_priv = dev->dev_private;
6297
Imre Deakdbea3ce2014-12-15 18:59:28 +02006298 if (INTEL_INFO(dev)->gen < 6)
6299 return;
6300
6301 gen6_suspend_rps(dev);
Imre Deakc6df39b2014-04-14 20:24:29 +03006302 dev_priv->rps.enabled = false;
Imre Deakc6df39b2014-04-14 20:24:29 +03006303}
6304
Daniel Vetter3107bd42012-10-31 22:52:31 +01006305static void ibx_init_clock_gating(struct drm_device *dev)
6306{
6307 struct drm_i915_private *dev_priv = dev->dev_private;
6308
6309 /*
6310 * On Ibex Peak and Cougar Point, we need to disable clock
6311 * gating for the panel power sequencer or it will fail to
6312 * start up when no ports are active.
6313 */
6314 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6315}
6316
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006317static void g4x_disable_trickle_feed(struct drm_device *dev)
6318{
6319 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläb12ce1d2015-05-26 20:27:23 +03006320 enum pipe pipe;
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006321
Damien Lespiau055e3932014-08-18 13:49:10 +01006322 for_each_pipe(dev_priv, pipe) {
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006323 I915_WRITE(DSPCNTR(pipe),
6324 I915_READ(DSPCNTR(pipe)) |
6325 DISPPLANE_TRICKLE_FEED_DISABLE);
Ville Syrjäläb12ce1d2015-05-26 20:27:23 +03006326
6327 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6328 POSTING_READ(DSPSURF(pipe));
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006329 }
6330}
6331
Ville Syrjälä017636c2013-12-05 15:51:37 +02006332static void ilk_init_lp_watermarks(struct drm_device *dev)
6333{
6334 struct drm_i915_private *dev_priv = dev->dev_private;
6335
6336 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6337 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6338 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6339
6340 /*
6341 * Don't touch WM1S_LP_EN here.
6342 * Doing so could cause underruns.
6343 */
6344}
6345
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006346static void ironlake_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006347{
6348 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiau231e54f2012-10-19 17:55:41 +01006349 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006350
Damien Lespiauf1e8fa52013-06-07 17:41:09 +01006351 /*
6352 * Required for FBC
6353 * WaFbcDisableDpfcClockGating:ilk
6354 */
Damien Lespiau4d47e4f2012-10-19 17:55:42 +01006355 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6356 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6357 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006358
6359 I915_WRITE(PCH_3DCGDIS0,
6360 MARIUNIT_CLOCK_GATE_DISABLE |
6361 SVSMUNIT_CLOCK_GATE_DISABLE);
6362 I915_WRITE(PCH_3DCGDIS1,
6363 VFMUNIT_CLOCK_GATE_DISABLE);
6364
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006365 /*
6366 * According to the spec the following bits should be set in
6367 * order to enable memory self-refresh
6368 * The bit 22/21 of 0x42004
6369 * The bit 5 of 0x42020
6370 * The bit 15 of 0x45000
6371 */
6372 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6373 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6374 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
Damien Lespiau4d47e4f2012-10-19 17:55:42 +01006375 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006376 I915_WRITE(DISP_ARB_CTL,
6377 (I915_READ(DISP_ARB_CTL) |
6378 DISP_FBC_WM_DIS));
Ville Syrjälä017636c2013-12-05 15:51:37 +02006379
6380 ilk_init_lp_watermarks(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006381
6382 /*
6383 * Based on the document from hardware guys the following bits
6384 * should be set unconditionally in order to enable FBC.
6385 * The bit 22 of 0x42000
6386 * The bit 22 of 0x42004
6387 * The bit 7,8,9 of 0x42020.
6388 */
6389 if (IS_IRONLAKE_M(dev)) {
Damien Lespiau4bb35332013-06-14 15:23:24 +01006390 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006391 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6392 I915_READ(ILK_DISPLAY_CHICKEN1) |
6393 ILK_FBCQ_DIS);
6394 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6395 I915_READ(ILK_DISPLAY_CHICKEN2) |
6396 ILK_DPARB_GATE);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006397 }
6398
Damien Lespiau4d47e4f2012-10-19 17:55:42 +01006399 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6400
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006401 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6402 I915_READ(ILK_DISPLAY_CHICKEN2) |
6403 ILK_ELPIN_409_SELECT);
6404 I915_WRITE(_3D_CHICKEN2,
6405 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6406 _3D_CHICKEN2_WM_READ_PIPELINED);
Daniel Vetter4358a372012-10-18 11:49:51 +02006407
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006408 /* WaDisableRenderCachePipelinedFlush:ilk */
Daniel Vetter4358a372012-10-18 11:49:51 +02006409 I915_WRITE(CACHE_MODE_0,
6410 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
Daniel Vetter3107bd42012-10-31 22:52:31 +01006411
Akash Goel4e046322014-04-04 17:14:38 +05306412 /* WaDisable_RenderCache_OperationalFlush:ilk */
6413 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6414
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006415 g4x_disable_trickle_feed(dev);
Ville Syrjäläbdad2b22013-06-07 10:47:03 +03006416
Daniel Vetter3107bd42012-10-31 22:52:31 +01006417 ibx_init_clock_gating(dev);
6418}
6419
6420static void cpt_init_clock_gating(struct drm_device *dev)
6421{
6422 struct drm_i915_private *dev_priv = dev->dev_private;
6423 int pipe;
Paulo Zanoni3f704fa2013-04-08 15:48:07 -03006424 uint32_t val;
Daniel Vetter3107bd42012-10-31 22:52:31 +01006425
6426 /*
6427 * On Ibex Peak and Cougar Point, we need to disable clock
6428 * gating for the panel power sequencer or it will fail to
6429 * start up when no ports are active.
6430 */
Jesse Barnescd664072013-10-02 10:34:19 -07006431 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6432 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6433 PCH_CPUNIT_CLOCK_GATE_DISABLE);
Daniel Vetter3107bd42012-10-31 22:52:31 +01006434 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6435 DPLS_EDP_PPS_FIX_DIS);
Takashi Iwai335c07b2012-12-11 11:46:29 +01006436 /* The below fixes the weird display corruption, a few pixels shifted
6437 * downward, on (only) LVDS of some HP laptops with IVY.
6438 */
Damien Lespiau055e3932014-08-18 13:49:10 +01006439 for_each_pipe(dev_priv, pipe) {
Paulo Zanonidc4bd2d2013-04-08 15:48:08 -03006440 val = I915_READ(TRANS_CHICKEN2(pipe));
6441 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6442 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03006443 if (dev_priv->vbt.fdi_rx_polarity_inverted)
Paulo Zanoni3f704fa2013-04-08 15:48:07 -03006444 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
Paulo Zanonidc4bd2d2013-04-08 15:48:08 -03006445 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6446 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6447 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
Paulo Zanoni3f704fa2013-04-08 15:48:07 -03006448 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6449 }
Daniel Vetter3107bd42012-10-31 22:52:31 +01006450 /* WADP0ClockGatingDisable */
Damien Lespiau055e3932014-08-18 13:49:10 +01006451 for_each_pipe(dev_priv, pipe) {
Daniel Vetter3107bd42012-10-31 22:52:31 +01006452 I915_WRITE(TRANS_CHICKEN1(pipe),
6453 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6454 }
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006455}
6456
Daniel Vetter1d7aaa02013-02-09 21:03:42 +01006457static void gen6_check_mch_setup(struct drm_device *dev)
6458{
6459 struct drm_i915_private *dev_priv = dev->dev_private;
6460 uint32_t tmp;
6461
6462 tmp = I915_READ(MCH_SSKPD);
Daniel Vetterdf662a22014-08-04 11:17:25 +02006463 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6464 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6465 tmp);
Daniel Vetter1d7aaa02013-02-09 21:03:42 +01006466}
6467
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006468static void gen6_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006469{
6470 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiau231e54f2012-10-19 17:55:41 +01006471 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006472
Damien Lespiau231e54f2012-10-19 17:55:41 +01006473 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006474
6475 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6476 I915_READ(ILK_DISPLAY_CHICKEN2) |
6477 ILK_ELPIN_409_SELECT);
6478
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006479 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
Daniel Vetter42839082012-12-14 23:38:28 +01006480 I915_WRITE(_3D_CHICKEN,
6481 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6482
Akash Goel4e046322014-04-04 17:14:38 +05306483 /* WaDisable_RenderCache_OperationalFlush:snb */
6484 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6485
Ville Syrjälä8d85d272014-02-04 21:59:15 +02006486 /*
6487 * BSpec recoomends 8x4 when MSAA is used,
6488 * however in practice 16x4 seems fastest.
Ville Syrjäläc5c98a52014-02-05 12:43:47 +02006489 *
6490 * Note that PS/WM thread counts depend on the WIZ hashing
6491 * disable bit, which we don't touch here, but it's good
6492 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
Ville Syrjälä8d85d272014-02-04 21:59:15 +02006493 */
6494 I915_WRITE(GEN6_GT_MODE,
Damien Lespiau98533252014-12-08 17:33:51 +00006495 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
Ville Syrjälä8d85d272014-02-04 21:59:15 +02006496
Ville Syrjälä017636c2013-12-05 15:51:37 +02006497 ilk_init_lp_watermarks(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006498
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006499 I915_WRITE(CACHE_MODE_0,
Daniel Vetter50743292012-04-26 22:02:54 +02006500 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006501
6502 I915_WRITE(GEN6_UCGCTL1,
6503 I915_READ(GEN6_UCGCTL1) |
6504 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6505 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6506
6507 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6508 * gating disable must be set. Failure to set it results in
6509 * flickering pixels due to Z write ordering failures after
6510 * some amount of runtime in the Mesa "fire" demo, and Unigine
6511 * Sanctuary and Tropics, and apparently anything else with
6512 * alpha test or pixel discard.
6513 *
6514 * According to the spec, bit 11 (RCCUNIT) must also be set,
6515 * but we didn't debug actual testcases to find it out.
Jesse Barnes0f846f82012-06-14 11:04:47 -07006516 *
Ville Syrjäläef593182014-01-22 21:32:47 +02006517 * WaDisableRCCUnitClockGating:snb
6518 * WaDisableRCPBUnitClockGating:snb
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006519 */
6520 I915_WRITE(GEN6_UCGCTL2,
6521 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6522 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6523
Ville Syrjälä5eb146d2014-02-04 21:59:16 +02006524 /* WaStripsFansDisableFastClipPerformanceFix:snb */
Ville Syrjälä743b57d2014-02-04 21:59:17 +02006525 I915_WRITE(_3D_CHICKEN3,
6526 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006527
6528 /*
Ville Syrjäläe927ecd2014-02-04 21:59:18 +02006529 * Bspec says:
6530 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6531 * 3DSTATE_SF number of SF output attributes is more than 16."
6532 */
6533 I915_WRITE(_3D_CHICKEN3,
6534 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6535
6536 /*
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006537 * According to the spec the following bits should be
6538 * set in order to enable memory self-refresh and fbc:
6539 * The bit21 and bit22 of 0x42000
6540 * The bit21 and bit22 of 0x42004
6541 * The bit5 and bit7 of 0x42020
6542 * The bit14 of 0x70180
6543 * The bit14 of 0x71180
Damien Lespiau4bb35332013-06-14 15:23:24 +01006544 *
6545 * WaFbcAsynchFlipDisableFbcQueue:snb
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006546 */
6547 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6548 I915_READ(ILK_DISPLAY_CHICKEN1) |
6549 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6550 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6551 I915_READ(ILK_DISPLAY_CHICKEN2) |
6552 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
Damien Lespiau231e54f2012-10-19 17:55:41 +01006553 I915_WRITE(ILK_DSPCLK_GATE_D,
6554 I915_READ(ILK_DSPCLK_GATE_D) |
6555 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
6556 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006557
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006558 g4x_disable_trickle_feed(dev);
Ben Widawskyf8f2ac92012-10-03 19:34:24 -07006559
Daniel Vetter3107bd42012-10-31 22:52:31 +01006560 cpt_init_clock_gating(dev);
Daniel Vetter1d7aaa02013-02-09 21:03:42 +01006561
6562 gen6_check_mch_setup(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006563}
6564
6565static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6566{
6567 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
6568
Ville Syrjälä3aad9052014-01-22 21:32:59 +02006569 /*
Ville Syrjälä46680e02014-01-22 21:33:01 +02006570 * WaVSThreadDispatchOverride:ivb,vlv
Ville Syrjälä3aad9052014-01-22 21:32:59 +02006571 *
6572 * This actually overrides the dispatch
6573 * mode for all thread types.
6574 */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006575 reg &= ~GEN7_FF_SCHED_MASK;
6576 reg |= GEN7_FF_TS_SCHED_HW;
6577 reg |= GEN7_FF_VS_SCHED_HW;
6578 reg |= GEN7_FF_DS_SCHED_HW;
6579
6580 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6581}
6582
Paulo Zanoni17a303e2012-11-20 15:12:07 -02006583static void lpt_init_clock_gating(struct drm_device *dev)
6584{
6585 struct drm_i915_private *dev_priv = dev->dev_private;
6586
6587 /*
6588 * TODO: this bit should only be enabled when really needed, then
6589 * disabled when not needed anymore in order to save power.
6590 */
6591 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
6592 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6593 I915_READ(SOUTH_DSPCLK_GATE_D) |
6594 PCH_LP_PARTITION_LEVEL_DISABLE);
Paulo Zanoni0a790cd2013-04-17 18:15:49 -03006595
6596 /* WADPOClockGatingDisable:hsw */
6597 I915_WRITE(_TRANSA_CHICKEN1,
6598 I915_READ(_TRANSA_CHICKEN1) |
6599 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
Paulo Zanoni17a303e2012-11-20 15:12:07 -02006600}
6601
Imre Deak7d708ee2013-04-17 14:04:50 +03006602static void lpt_suspend_hw(struct drm_device *dev)
6603{
6604 struct drm_i915_private *dev_priv = dev->dev_private;
6605
6606 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6607 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
6608
6609 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6610 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6611 }
6612}
6613
Paulo Zanoni47c2bd92014-08-21 17:09:37 -03006614static void broadwell_init_clock_gating(struct drm_device *dev)
Ben Widawsky1020a5c2013-11-02 21:07:06 -07006615{
6616 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiau07d27e22014-03-03 17:31:46 +00006617 enum pipe pipe;
Ville Syrjälä4d487cf2015-05-19 20:32:56 +03006618 uint32_t misccpctl;
Ben Widawsky1020a5c2013-11-02 21:07:06 -07006619
Ville Syrjälä7ad0dba2015-05-19 20:32:55 +03006620 ilk_init_lp_watermarks(dev);
Ben Widawsky50ed5fb2013-11-02 21:07:40 -07006621
Ben Widawskyab57fff2013-12-12 15:28:04 -08006622 /* WaSwitchSolVfFArbitrationPriority:bdw */
Ben Widawsky50ed5fb2013-11-02 21:07:40 -07006623 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
Ben Widawskyfe4ab3c2013-11-02 21:07:54 -07006624
Ben Widawskyab57fff2013-12-12 15:28:04 -08006625 /* WaPsrDPAMaskVBlankInSRD:bdw */
Ben Widawskyfe4ab3c2013-11-02 21:07:54 -07006626 I915_WRITE(CHICKEN_PAR1_1,
6627 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6628
Ben Widawskyab57fff2013-12-12 15:28:04 -08006629 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
Damien Lespiau055e3932014-08-18 13:49:10 +01006630 for_each_pipe(dev_priv, pipe) {
Damien Lespiau07d27e22014-03-03 17:31:46 +00006631 I915_WRITE(CHICKEN_PIPESL_1(pipe),
Ville Syrjäläc7c65622014-03-05 13:05:45 +02006632 I915_READ(CHICKEN_PIPESL_1(pipe)) |
Ville Syrjälä8f670bb2014-03-05 13:05:47 +02006633 BDW_DPRS_MASK_VBLANK_SRD);
Ben Widawskyfe4ab3c2013-11-02 21:07:54 -07006634 }
Ben Widawsky63801f22013-12-12 17:26:03 -08006635
Ben Widawskyab57fff2013-12-12 15:28:04 -08006636 /* WaVSRefCountFullforceMissDisable:bdw */
6637 /* WaDSRefCountFullforceMissDisable:bdw */
6638 I915_WRITE(GEN7_FF_THREAD_MODE,
6639 I915_READ(GEN7_FF_THREAD_MODE) &
6640 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
Ville Syrjälä36075a42014-02-04 21:59:21 +02006641
Ville Syrjälä295e8bb2014-02-27 21:59:01 +02006642 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6643 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
Ville Syrjälä4f1ca9e2014-02-27 21:59:02 +02006644
6645 /* WaDisableSDEUnitClockGating:bdw */
6646 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6647 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
Damien Lespiau5d708682014-03-26 18:41:51 +00006648
Ville Syrjälä4d487cf2015-05-19 20:32:56 +03006649 /*
6650 * WaProgramL3SqcReg1Default:bdw
6651 * WaTempDisableDOPClkGating:bdw
6652 */
6653 misccpctl = I915_READ(GEN7_MISCCPCTL);
6654 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6655 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
6656 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6657
Ville Syrjälä6d50b062015-05-19 20:32:57 +03006658 /*
6659 * WaGttCachingOffByDefault:bdw
6660 * GTT cache may not work with big pages, so if those
6661 * are ever enabled GTT cache may need to be disabled.
6662 */
6663 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
6664
Paulo Zanoni89d6b2b2014-08-21 17:09:36 -03006665 lpt_init_clock_gating(dev);
Ben Widawsky1020a5c2013-11-02 21:07:06 -07006666}
6667
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03006668static void haswell_init_clock_gating(struct drm_device *dev)
6669{
6670 struct drm_i915_private *dev_priv = dev->dev_private;
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03006671
Ville Syrjälä017636c2013-12-05 15:51:37 +02006672 ilk_init_lp_watermarks(dev);
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03006673
Francisco Jerezf3fc4882013-10-02 15:53:16 -07006674 /* L3 caching of data atomics doesn't work -- disable it. */
6675 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6676 I915_WRITE(HSW_ROW_CHICKEN3,
6677 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6678
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006679 /* This is required by WaCatErrorRejectionIssue:hsw */
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03006680 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6681 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6682 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6683
Ville Syrjäläe36ea7f2014-01-22 21:33:00 +02006684 /* WaVSRefCountFullforceMissDisable:hsw */
6685 I915_WRITE(GEN7_FF_THREAD_MODE,
6686 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03006687
Akash Goel4e046322014-04-04 17:14:38 +05306688 /* WaDisable_RenderCache_OperationalFlush:hsw */
6689 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6690
Chia-I Wufe27c602014-01-28 13:29:33 +08006691 /* enable HiZ Raw Stall Optimization */
6692 I915_WRITE(CACHE_MODE_0_GEN7,
6693 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6694
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006695 /* WaDisable4x2SubspanOptimization:hsw */
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03006696 I915_WRITE(CACHE_MODE_1,
6697 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
Eugeni Dodonov1544d9d2012-07-02 11:51:10 -03006698
Ville Syrjäläa12c4962014-02-04 21:59:20 +02006699 /*
6700 * BSpec recommends 8x4 when MSAA is used,
6701 * however in practice 16x4 seems fastest.
Ville Syrjäläc5c98a52014-02-05 12:43:47 +02006702 *
6703 * Note that PS/WM thread counts depend on the WIZ hashing
6704 * disable bit, which we don't touch here, but it's good
6705 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
Ville Syrjäläa12c4962014-02-04 21:59:20 +02006706 */
6707 I915_WRITE(GEN7_GT_MODE,
Damien Lespiau98533252014-12-08 17:33:51 +00006708 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
Ville Syrjäläa12c4962014-02-04 21:59:20 +02006709
Kenneth Graunke94411592014-12-31 16:23:00 -08006710 /* WaSampleCChickenBitEnable:hsw */
6711 I915_WRITE(HALF_SLICE_CHICKEN3,
6712 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
6713
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006714 /* WaSwitchSolVfFArbitrationPriority:hsw */
Ben Widawskye3dff582013-03-20 14:49:14 -07006715 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6716
Paulo Zanoni90a88642013-05-03 17:23:45 -03006717 /* WaRsPkgCStateDisplayPMReq:hsw */
6718 I915_WRITE(CHICKEN_PAR1_1,
6719 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
Eugeni Dodonov1544d9d2012-07-02 11:51:10 -03006720
Paulo Zanoni17a303e2012-11-20 15:12:07 -02006721 lpt_init_clock_gating(dev);
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03006722}
6723
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006724static void ivybridge_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006725{
6726 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawsky20848222012-05-04 18:58:59 -07006727 uint32_t snpcr;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006728
Ville Syrjälä017636c2013-12-05 15:51:37 +02006729 ilk_init_lp_watermarks(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006730
Damien Lespiau231e54f2012-10-19 17:55:41 +01006731 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006732
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006733 /* WaDisableEarlyCull:ivb */
Jesse Barnes87f80202012-10-02 17:43:41 -05006734 I915_WRITE(_3D_CHICKEN3,
6735 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6736
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006737 /* WaDisableBackToBackFlipFix:ivb */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006738 I915_WRITE(IVB_CHICKEN3,
6739 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6740 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6741
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006742 /* WaDisablePSDDualDispatchEnable:ivb */
Jesse Barnes12f33822012-10-25 12:15:45 -07006743 if (IS_IVB_GT1(dev))
6744 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6745 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
Jesse Barnes12f33822012-10-25 12:15:45 -07006746
Akash Goel4e046322014-04-04 17:14:38 +05306747 /* WaDisable_RenderCache_OperationalFlush:ivb */
6748 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6749
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006750 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006751 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
6752 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
6753
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006754 /* WaApplyL3ControlAndL3ChickenMode:ivb */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006755 I915_WRITE(GEN7_L3CNTLREG1,
6756 GEN7_WA_FOR_GEN7_L3_CONTROL);
6757 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
Jesse Barnes8ab43972012-10-25 12:15:42 -07006758 GEN7_WA_L3_CHICKEN_MODE);
6759 if (IS_IVB_GT1(dev))
6760 I915_WRITE(GEN7_ROW_CHICKEN2,
6761 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
Ville Syrjälä412236c2014-01-22 21:32:44 +02006762 else {
6763 /* must write both registers */
6764 I915_WRITE(GEN7_ROW_CHICKEN2,
6765 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
Jesse Barnes8ab43972012-10-25 12:15:42 -07006766 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
6767 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
Ville Syrjälä412236c2014-01-22 21:32:44 +02006768 }
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006769
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006770 /* WaForceL3Serialization:ivb */
Jesse Barnes61939d92012-10-02 17:43:38 -05006771 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6772 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6773
Ville Syrjälä1b80a19a2014-01-22 21:32:53 +02006774 /*
Jesse Barnes0f846f82012-06-14 11:04:47 -07006775 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006776 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
Jesse Barnes0f846f82012-06-14 11:04:47 -07006777 */
6778 I915_WRITE(GEN6_UCGCTL2,
Ville Syrjälä28acf3b2014-01-22 21:32:48 +02006779 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
Jesse Barnes0f846f82012-06-14 11:04:47 -07006780
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006781 /* This is required by WaCatErrorRejectionIssue:ivb */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006782 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6783 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6784 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6785
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006786 g4x_disable_trickle_feed(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006787
6788 gen7_setup_fixed_func_scheduler(dev_priv);
Daniel Vetter97e19302012-04-24 16:00:21 +02006789
Chris Wilson22721342014-03-04 09:41:43 +00006790 if (0) { /* causes HiZ corruption on ivb:gt1 */
6791 /* enable HiZ Raw Stall Optimization */
6792 I915_WRITE(CACHE_MODE_0_GEN7,
6793 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6794 }
Chia-I Wu116f2b62014-01-28 13:29:34 +08006795
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006796 /* WaDisable4x2SubspanOptimization:ivb */
Daniel Vetter97e19302012-04-24 16:00:21 +02006797 I915_WRITE(CACHE_MODE_1,
6798 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
Ben Widawsky20848222012-05-04 18:58:59 -07006799
Ville Syrjäläa607c1a2014-02-04 21:59:19 +02006800 /*
6801 * BSpec recommends 8x4 when MSAA is used,
6802 * however in practice 16x4 seems fastest.
Ville Syrjäläc5c98a52014-02-05 12:43:47 +02006803 *
6804 * Note that PS/WM thread counts depend on the WIZ hashing
6805 * disable bit, which we don't touch here, but it's good
6806 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
Ville Syrjäläa607c1a2014-02-04 21:59:19 +02006807 */
6808 I915_WRITE(GEN7_GT_MODE,
Damien Lespiau98533252014-12-08 17:33:51 +00006809 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
Ville Syrjäläa607c1a2014-02-04 21:59:19 +02006810
Ben Widawsky20848222012-05-04 18:58:59 -07006811 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6812 snpcr &= ~GEN6_MBC_SNPCR_MASK;
6813 snpcr |= GEN6_MBC_SNPCR_MED;
6814 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
Daniel Vetter3107bd42012-10-31 22:52:31 +01006815
Ben Widawskyab5c6082013-04-05 13:12:41 -07006816 if (!HAS_PCH_NOP(dev))
6817 cpt_init_clock_gating(dev);
Daniel Vetter1d7aaa02013-02-09 21:03:42 +01006818
6819 gen6_check_mch_setup(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006820}
6821
Ville Syrjäläc6beb132015-03-05 21:19:48 +02006822static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
6823{
6824 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6825
6826 /*
6827 * Disable trickle feed and enable pnd deadline calculation
6828 */
6829 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
6830 I915_WRITE(CBR1_VLV, 0);
6831}
6832
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006833static void valleyview_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006834{
6835 struct drm_i915_private *dev_priv = dev->dev_private;
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006836
Ville Syrjäläc6beb132015-03-05 21:19:48 +02006837 vlv_init_display_clock_gating(dev_priv);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006838
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006839 /* WaDisableEarlyCull:vlv */
Jesse Barnes87f80202012-10-02 17:43:41 -05006840 I915_WRITE(_3D_CHICKEN3,
6841 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6842
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006843 /* WaDisableBackToBackFlipFix:vlv */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006844 I915_WRITE(IVB_CHICKEN3,
6845 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6846 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6847
Ville Syrjäläfad7d362014-01-22 21:32:39 +02006848 /* WaPsdDispatchEnable:vlv */
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006849 /* WaDisablePSDDualDispatchEnable:vlv */
Jesse Barnes12f33822012-10-25 12:15:45 -07006850 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
Jesse Barnesd3bc0302013-03-08 10:45:51 -08006851 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
6852 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
Jesse Barnes12f33822012-10-25 12:15:45 -07006853
Akash Goel4e046322014-04-04 17:14:38 +05306854 /* WaDisable_RenderCache_OperationalFlush:vlv */
6855 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6856
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006857 /* WaForceL3Serialization:vlv */
Jesse Barnes61939d92012-10-02 17:43:38 -05006858 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6859 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6860
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006861 /* WaDisableDopClockGating:vlv */
Jesse Barnes8ab43972012-10-25 12:15:42 -07006862 I915_WRITE(GEN7_ROW_CHICKEN2,
6863 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6864
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006865 /* This is required by WaCatErrorRejectionIssue:vlv */
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006866 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6867 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6868 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6869
Ville Syrjälä46680e02014-01-22 21:33:01 +02006870 gen7_setup_fixed_func_scheduler(dev_priv);
6871
Ville Syrjälä3c0edae2014-01-22 21:32:56 +02006872 /*
Jesse Barnes0f846f82012-06-14 11:04:47 -07006873 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006874 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
Jesse Barnes0f846f82012-06-14 11:04:47 -07006875 */
6876 I915_WRITE(GEN6_UCGCTL2,
Ville Syrjälä3c0edae2014-01-22 21:32:56 +02006877 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
Jesse Barnes0f846f82012-06-14 11:04:47 -07006878
Akash Goelc98f5062014-03-24 23:00:07 +05306879 /* WaDisableL3Bank2xClockGate:vlv
6880 * Disabling L3 clock gating- MMIO 940c[25] = 1
6881 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
6882 I915_WRITE(GEN7_UCGCTL4,
6883 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
Jesse Barnese3f33d42012-06-14 11:04:50 -07006884
Ville Syrjäläafd58e72014-01-22 21:33:03 +02006885 /*
6886 * BSpec says this must be set, even though
6887 * WaDisable4x2SubspanOptimization isn't listed for VLV.
6888 */
Daniel Vetter6b26c862012-04-24 14:04:12 +02006889 I915_WRITE(CACHE_MODE_1,
6890 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
Jesse Barnes79831172012-06-20 10:53:12 -07006891
6892 /*
Ville Syrjäläda2518f2015-01-21 19:38:01 +02006893 * BSpec recommends 8x4 when MSAA is used,
6894 * however in practice 16x4 seems fastest.
6895 *
6896 * Note that PS/WM thread counts depend on the WIZ hashing
6897 * disable bit, which we don't touch here, but it's good
6898 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6899 */
6900 I915_WRITE(GEN7_GT_MODE,
6901 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6902
6903 /*
Ville Syrjälä031994e2014-01-22 21:32:46 +02006904 * WaIncreaseL3CreditsForVLVB0:vlv
6905 * This is the hardware default actually.
6906 */
6907 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
6908
6909 /*
Damien Lespiauecdb4eb72013-05-03 18:48:10 +01006910 * WaDisableVLVClockGating_VBIIssue:vlv
Jesse Barnes2d809572012-10-25 12:15:44 -07006911 * Disable clock gating on th GCFG unit to prevent a delay
6912 * in the reporting of vblank events.
6913 */
Ville Syrjälä7a0d1ee2014-01-22 21:33:04 +02006914 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006915}
6916
Ville Syrjäläa4565da2014-04-09 13:28:10 +03006917static void cherryview_init_clock_gating(struct drm_device *dev)
6918{
6919 struct drm_i915_private *dev_priv = dev->dev_private;
6920
Ville Syrjäläc6beb132015-03-05 21:19:48 +02006921 vlv_init_display_clock_gating(dev_priv);
Ville Syrjälädd811e72014-04-09 13:28:33 +03006922
Ville Syrjälä232ce332014-04-09 13:28:35 +03006923 /* WaVSRefCountFullforceMissDisable:chv */
6924 /* WaDSRefCountFullforceMissDisable:chv */
6925 I915_WRITE(GEN7_FF_THREAD_MODE,
6926 I915_READ(GEN7_FF_THREAD_MODE) &
6927 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
Ville Syrjäläacea6f92014-04-09 13:28:36 +03006928
6929 /* WaDisableSemaphoreAndSyncFlipWait:chv */
6930 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6931 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
Ville Syrjälä08466972014-04-09 13:28:37 +03006932
6933 /* WaDisableCSUnitClockGating:chv */
6934 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6935 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
Ville Syrjäläc6317802014-04-09 13:28:38 +03006936
6937 /* WaDisableSDEUnitClockGating:chv */
6938 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6939 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
Ville Syrjälä6d50b062015-05-19 20:32:57 +03006940
6941 /*
6942 * GTT cache may not work with big pages, so if those
6943 * are ever enabled GTT cache may need to be disabled.
6944 */
6945 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
Ville Syrjäläa4565da2014-04-09 13:28:10 +03006946}
6947
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006948static void g4x_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006949{
6950 struct drm_i915_private *dev_priv = dev->dev_private;
6951 uint32_t dspclk_gate;
6952
6953 I915_WRITE(RENCLK_GATE_D1, 0);
6954 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
6955 GS_UNIT_CLOCK_GATE_DISABLE |
6956 CL_UNIT_CLOCK_GATE_DISABLE);
6957 I915_WRITE(RAMCLK_GATE_D, 0);
6958 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
6959 OVRUNIT_CLOCK_GATE_DISABLE |
6960 OVCUNIT_CLOCK_GATE_DISABLE;
6961 if (IS_GM45(dev))
6962 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
6963 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
Daniel Vetter4358a372012-10-18 11:49:51 +02006964
6965 /* WaDisableRenderCachePipelinedFlush */
6966 I915_WRITE(CACHE_MODE_0,
6967 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
Ville Syrjäläde1aa622013-06-07 10:47:01 +03006968
Akash Goel4e046322014-04-04 17:14:38 +05306969 /* WaDisable_RenderCache_OperationalFlush:g4x */
6970 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6971
Ville Syrjälä0e088b82013-06-07 10:47:04 +03006972 g4x_disable_trickle_feed(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006973}
6974
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006975static void crestline_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006976{
6977 struct drm_i915_private *dev_priv = dev->dev_private;
6978
6979 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
6980 I915_WRITE(RENCLK_GATE_D2, 0);
6981 I915_WRITE(DSPCLK_GATE_D, 0);
6982 I915_WRITE(RAMCLK_GATE_D, 0);
6983 I915_WRITE16(DEUC, 0);
Ville Syrjälä20f94962013-06-07 10:47:02 +03006984 I915_WRITE(MI_ARB_STATE,
6985 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
Akash Goel4e046322014-04-04 17:14:38 +05306986
6987 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6988 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006989}
6990
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03006991static void broadwater_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03006992{
6993 struct drm_i915_private *dev_priv = dev->dev_private;
6994
6995 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
6996 I965_RCC_CLOCK_GATE_DISABLE |
6997 I965_RCPB_CLOCK_GATE_DISABLE |
6998 I965_ISC_CLOCK_GATE_DISABLE |
6999 I965_FBC_CLOCK_GATE_DISABLE);
7000 I915_WRITE(RENCLK_GATE_D2, 0);
Ville Syrjälä20f94962013-06-07 10:47:02 +03007001 I915_WRITE(MI_ARB_STATE,
7002 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
Akash Goel4e046322014-04-04 17:14:38 +05307003
7004 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7005 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007006}
7007
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007008static void gen3_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007009{
7010 struct drm_i915_private *dev_priv = dev->dev_private;
7011 u32 dstate = I915_READ(D_STATE);
7012
7013 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7014 DSTATE_DOT_CLOCK_GATING;
7015 I915_WRITE(D_STATE, dstate);
Chris Wilson13a86b82012-04-24 14:51:43 +01007016
7017 if (IS_PINEVIEW(dev))
7018 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
Daniel Vetter974a3b02012-09-09 11:54:16 +02007019
7020 /* IIR "flip pending" means done if this bit is set */
7021 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
Ville Syrjälä12fabbcb92014-02-25 15:13:38 +02007022
7023 /* interrupts should cause a wake up from C3 */
Ville Syrjälä32992542014-02-25 15:13:39 +02007024 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
Ville Syrjälädbb42742014-02-25 15:13:41 +02007025
7026 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7027 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Ville Syrjälä10383922014-08-15 01:21:54 +03007028
7029 I915_WRITE(MI_ARB_STATE,
7030 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007031}
7032
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007033static void i85x_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007034{
7035 struct drm_i915_private *dev_priv = dev->dev_private;
7036
7037 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
Ville Syrjälä54e472a2014-02-25 15:13:40 +02007038
7039 /* interrupts should cause a wake up from C3 */
7040 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7041 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
Ville Syrjälä10383922014-08-15 01:21:54 +03007042
7043 I915_WRITE(MEM_MODE,
7044 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007045}
7046
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007047static void i830_init_clock_gating(struct drm_device *dev)
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007048{
7049 struct drm_i915_private *dev_priv = dev->dev_private;
7050
7051 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
Ville Syrjälä10383922014-08-15 01:21:54 +03007052
7053 I915_WRITE(MEM_MODE,
7054 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7055 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007056}
7057
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007058void intel_init_clock_gating(struct drm_device *dev)
7059{
7060 struct drm_i915_private *dev_priv = dev->dev_private;
7061
Damien Lespiauc57e3552015-02-09 19:33:05 +00007062 if (dev_priv->display.init_clock_gating)
7063 dev_priv->display.init_clock_gating(dev);
Eugeni Dodonov6f1d69b2012-04-18 15:29:25 -03007064}
7065
Imre Deak7d708ee2013-04-17 14:04:50 +03007066void intel_suspend_hw(struct drm_device *dev)
7067{
7068 if (HAS_PCH_LPT(dev))
7069 lpt_suspend_hw(dev);
7070}
7071
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007072/* Set up chip specific power management-related functions */
7073void intel_init_pm(struct drm_device *dev)
7074{
7075 struct drm_i915_private *dev_priv = dev->dev_private;
7076
Rodrigo Vivi7ff0ebc2014-12-08 14:09:10 -02007077 intel_fbc_init(dev_priv);
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007078
Daniel Vetterc921aba2012-04-26 23:28:17 +02007079 /* For cxsr */
7080 if (IS_PINEVIEW(dev))
7081 i915_pineview_get_mem_freq(dev);
7082 else if (IS_GEN5(dev))
7083 i915_ironlake_get_mem_freq(dev);
7084
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007085 /* For FIFO watermark updates */
Damien Lespiauf5ed50c2014-11-13 17:51:52 +00007086 if (INTEL_INFO(dev)->gen >= 9) {
Pradeep Bhat2af30a52014-11-04 17:06:38 +00007087 skl_setup_wm_latency(dev);
7088
Imre Deaka82abe42015-03-27 14:00:04 +02007089 if (IS_BROXTON(dev))
7090 dev_priv->display.init_clock_gating =
7091 bxt_init_clock_gating;
7092 else if (IS_SKYLAKE(dev))
7093 dev_priv->display.init_clock_gating =
7094 skl_init_clock_gating;
Pradeep Bhat2d41c0b2014-11-04 17:06:42 +00007095 dev_priv->display.update_wm = skl_update_wm;
7096 dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
Damien Lespiauc83155a2014-03-28 00:18:35 +05307097 } else if (HAS_PCH_SPLIT(dev)) {
Damien Lespiaufa50ad62014-03-17 18:01:16 +00007098 ilk_setup_wm_latency(dev);
Ville Syrjälä53615a52013-08-01 16:18:50 +03007099
Ville Syrjäläbd602542014-01-07 16:14:10 +02007100 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7101 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7102 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7103 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7104 dev_priv->display.update_wm = ilk_update_wm;
7105 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
7106 } else {
7107 DRM_DEBUG_KMS("Failed to read display plane latency. "
7108 "Disable CxSR\n");
7109 }
7110
7111 if (IS_GEN5(dev))
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007112 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
Ville Syrjäläbd602542014-01-07 16:14:10 +02007113 else if (IS_GEN6(dev))
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007114 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
Ville Syrjäläbd602542014-01-07 16:14:10 +02007115 else if (IS_IVYBRIDGE(dev))
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007116 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
Ville Syrjäläbd602542014-01-07 16:14:10 +02007117 else if (IS_HASWELL(dev))
Eugeni Dodonovcad2a2d2012-07-02 11:51:09 -03007118 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
Ville Syrjäläbd602542014-01-07 16:14:10 +02007119 else if (INTEL_INFO(dev)->gen == 8)
Paulo Zanoni47c2bd92014-08-21 17:09:37 -03007120 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
Ville Syrjäläa4565da2014-04-09 13:28:10 +03007121 } else if (IS_CHERRYVIEW(dev)) {
Ville Syrjälä262cd2e2015-06-24 22:00:04 +03007122 vlv_setup_wm_latency(dev);
7123
7124 dev_priv->display.update_wm = vlv_update_wm;
Ville Syrjäläa4565da2014-04-09 13:28:10 +03007125 dev_priv->display.init_clock_gating =
7126 cherryview_init_clock_gating;
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007127 } else if (IS_VALLEYVIEW(dev)) {
Ville Syrjälä26e1fe42015-06-24 22:00:06 +03007128 vlv_setup_wm_latency(dev);
7129
7130 dev_priv->display.update_wm = vlv_update_wm;
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007131 dev_priv->display.init_clock_gating =
7132 valleyview_init_clock_gating;
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007133 } else if (IS_PINEVIEW(dev)) {
7134 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7135 dev_priv->is_ddr3,
7136 dev_priv->fsb_freq,
7137 dev_priv->mem_freq)) {
7138 DRM_INFO("failed to find known CxSR latency "
7139 "(found ddr%s fsb freq %d, mem freq %d), "
7140 "disabling CxSR\n",
7141 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7142 dev_priv->fsb_freq, dev_priv->mem_freq);
7143 /* Disable CxSR and never update its watermark again */
Imre Deak5209b1f2014-07-01 12:36:17 +03007144 intel_set_memory_cxsr(dev_priv, false);
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007145 dev_priv->display.update_wm = NULL;
7146 } else
7147 dev_priv->display.update_wm = pineview_update_wm;
7148 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7149 } else if (IS_G4X(dev)) {
7150 dev_priv->display.update_wm = g4x_update_wm;
7151 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7152 } else if (IS_GEN4(dev)) {
7153 dev_priv->display.update_wm = i965_update_wm;
7154 if (IS_CRESTLINE(dev))
7155 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7156 else if (IS_BROADWATER(dev))
7157 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7158 } else if (IS_GEN3(dev)) {
7159 dev_priv->display.update_wm = i9xx_update_wm;
7160 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7161 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
Daniel Vetterfeb56b92013-12-14 20:38:30 -02007162 } else if (IS_GEN2(dev)) {
7163 if (INTEL_INFO(dev)->num_pipes == 1) {
7164 dev_priv->display.update_wm = i845_update_wm;
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007165 dev_priv->display.get_fifo_size = i845_get_fifo_size;
Daniel Vetterfeb56b92013-12-14 20:38:30 -02007166 } else {
7167 dev_priv->display.update_wm = i9xx_update_wm;
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007168 dev_priv->display.get_fifo_size = i830_get_fifo_size;
Daniel Vetterfeb56b92013-12-14 20:38:30 -02007169 }
7170
7171 if (IS_I85X(dev) || IS_I865G(dev))
7172 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7173 else
7174 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7175 } else {
7176 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
Eugeni Dodonov1fa61102012-04-18 15:29:26 -03007177 }
7178}
7179
Tom O'Rourke151a49d2014-11-13 18:50:10 -08007180int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
Ben Widawsky42c05262012-09-26 10:34:00 -07007181{
Jesse Barnes4fc688c2012-11-02 11:14:01 -07007182 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Ben Widawsky42c05262012-09-26 10:34:00 -07007183
7184 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7185 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7186 return -EAGAIN;
7187 }
7188
7189 I915_WRITE(GEN6_PCODE_DATA, *val);
Damien Lespiaudddab342014-11-13 17:51:50 +00007190 I915_WRITE(GEN6_PCODE_DATA1, 0);
Ben Widawsky42c05262012-09-26 10:34:00 -07007191 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7192
7193 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7194 500)) {
7195 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7196 return -ETIMEDOUT;
7197 }
7198
7199 *val = I915_READ(GEN6_PCODE_DATA);
7200 I915_WRITE(GEN6_PCODE_DATA, 0);
7201
7202 return 0;
7203}
7204
Tom O'Rourke151a49d2014-11-13 18:50:10 -08007205int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
Ben Widawsky42c05262012-09-26 10:34:00 -07007206{
Jesse Barnes4fc688c2012-11-02 11:14:01 -07007207 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Ben Widawsky42c05262012-09-26 10:34:00 -07007208
7209 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7210 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7211 return -EAGAIN;
7212 }
7213
7214 I915_WRITE(GEN6_PCODE_DATA, val);
7215 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7216
7217 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7218 500)) {
7219 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7220 return -ETIMEDOUT;
7221 }
7222
7223 I915_WRITE(GEN6_PCODE_DATA, 0);
7224
7225 return 0;
7226}
Jesse Barnesa0e4e192013-04-02 11:23:05 -07007227
Ville Syrjälädd06f882014-11-10 22:55:12 +02007228static int vlv_gpu_freq_div(unsigned int czclk_freq)
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007229{
Ville Syrjälädd06f882014-11-10 22:55:12 +02007230 switch (czclk_freq) {
7231 case 200:
7232 return 10;
7233 case 267:
7234 return 12;
7235 case 320:
7236 case 333:
Ville Syrjälädd06f882014-11-10 22:55:12 +02007237 return 16;
Ville Syrjäläab3fb152014-11-10 22:55:15 +02007238 case 400:
7239 return 20;
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007240 default:
7241 return -1;
7242 }
Ville Syrjälädd06f882014-11-10 22:55:12 +02007243}
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007244
Ville Syrjälädd06f882014-11-10 22:55:12 +02007245static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7246{
7247 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
7248
7249 div = vlv_gpu_freq_div(czclk_freq);
7250 if (div < 0)
7251 return div;
7252
7253 return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007254}
7255
Fengguang Wub55dd642014-07-12 11:21:39 +02007256static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007257{
Ville Syrjälädd06f882014-11-10 22:55:12 +02007258 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007259
Ville Syrjälädd06f882014-11-10 22:55:12 +02007260 mul = vlv_gpu_freq_div(czclk_freq);
7261 if (mul < 0)
7262 return mul;
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007263
Ville Syrjälädd06f882014-11-10 22:55:12 +02007264 return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
Jesse Barnes855ba3b2013-04-17 15:54:57 -07007265}
7266
Fengguang Wub55dd642014-07-12 11:21:39 +02007267static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
Deepak S22b1b2f2014-07-12 14:54:33 +05307268{
Ville Syrjälädd06f882014-11-10 22:55:12 +02007269 int div, czclk_freq = dev_priv->rps.cz_freq;
Deepak S22b1b2f2014-07-12 14:54:33 +05307270
Ville Syrjälädd06f882014-11-10 22:55:12 +02007271 div = vlv_gpu_freq_div(czclk_freq) / 2;
7272 if (div < 0)
7273 return div;
Deepak S22b1b2f2014-07-12 14:54:33 +05307274
Ville Syrjälädd06f882014-11-10 22:55:12 +02007275 return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
Deepak S22b1b2f2014-07-12 14:54:33 +05307276}
7277
Fengguang Wub55dd642014-07-12 11:21:39 +02007278static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
Deepak S22b1b2f2014-07-12 14:54:33 +05307279{
Ville Syrjälädd06f882014-11-10 22:55:12 +02007280 int mul, czclk_freq = dev_priv->rps.cz_freq;
Deepak S22b1b2f2014-07-12 14:54:33 +05307281
Ville Syrjälädd06f882014-11-10 22:55:12 +02007282 mul = vlv_gpu_freq_div(czclk_freq) / 2;
7283 if (mul < 0)
7284 return mul;
Deepak S22b1b2f2014-07-12 14:54:33 +05307285
Ville Syrjälä1c147622014-08-18 14:42:43 +03007286 /* CHV needs even values */
Ville Syrjälädd06f882014-11-10 22:55:12 +02007287 return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
Deepak S22b1b2f2014-07-12 14:54:33 +05307288}
7289
Ville Syrjälä616bc822015-01-23 21:04:25 +02007290int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7291{
Akash Goel80b6dda2015-03-06 11:07:15 +05307292 if (IS_GEN9(dev_priv->dev))
7293 return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER;
7294 else if (IS_CHERRYVIEW(dev_priv->dev))
Ville Syrjälä616bc822015-01-23 21:04:25 +02007295 return chv_gpu_freq(dev_priv, val);
7296 else if (IS_VALLEYVIEW(dev_priv->dev))
7297 return byt_gpu_freq(dev_priv, val);
7298 else
7299 return val * GT_FREQUENCY_MULTIPLIER;
7300}
7301
Ville Syrjälä616bc822015-01-23 21:04:25 +02007302int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7303{
Akash Goel80b6dda2015-03-06 11:07:15 +05307304 if (IS_GEN9(dev_priv->dev))
7305 return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER;
7306 else if (IS_CHERRYVIEW(dev_priv->dev))
Ville Syrjälä616bc822015-01-23 21:04:25 +02007307 return chv_freq_opcode(dev_priv, val);
Deepak S22b1b2f2014-07-12 14:54:33 +05307308 else if (IS_VALLEYVIEW(dev_priv->dev))
Ville Syrjälä616bc822015-01-23 21:04:25 +02007309 return byt_freq_opcode(dev_priv, val);
7310 else
7311 return val / GT_FREQUENCY_MULTIPLIER;
Deepak S22b1b2f2014-07-12 14:54:33 +05307312}
7313
Chris Wilson6ad790c2015-04-07 16:20:31 +01007314struct request_boost {
7315 struct work_struct work;
Daniel Vettereed29a52015-05-21 14:21:25 +02007316 struct drm_i915_gem_request *req;
Chris Wilson6ad790c2015-04-07 16:20:31 +01007317};
7318
7319static void __intel_rps_boost_work(struct work_struct *work)
7320{
7321 struct request_boost *boost = container_of(work, struct request_boost, work);
Chris Wilsone61b9952015-04-27 13:41:24 +01007322 struct drm_i915_gem_request *req = boost->req;
Chris Wilson6ad790c2015-04-07 16:20:31 +01007323
Chris Wilsone61b9952015-04-27 13:41:24 +01007324 if (!i915_gem_request_completed(req, true))
7325 gen6_rps_boost(to_i915(req->ring->dev), NULL,
7326 req->emitted_jiffies);
Chris Wilson6ad790c2015-04-07 16:20:31 +01007327
Chris Wilsone61b9952015-04-27 13:41:24 +01007328 i915_gem_request_unreference__unlocked(req);
Chris Wilson6ad790c2015-04-07 16:20:31 +01007329 kfree(boost);
7330}
7331
7332void intel_queue_rps_boost_for_request(struct drm_device *dev,
Daniel Vettereed29a52015-05-21 14:21:25 +02007333 struct drm_i915_gem_request *req)
Chris Wilson6ad790c2015-04-07 16:20:31 +01007334{
7335 struct request_boost *boost;
7336
Daniel Vettereed29a52015-05-21 14:21:25 +02007337 if (req == NULL || INTEL_INFO(dev)->gen < 6)
Chris Wilson6ad790c2015-04-07 16:20:31 +01007338 return;
7339
Chris Wilsone61b9952015-04-27 13:41:24 +01007340 if (i915_gem_request_completed(req, true))
7341 return;
7342
Chris Wilson6ad790c2015-04-07 16:20:31 +01007343 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
7344 if (boost == NULL)
7345 return;
7346
Daniel Vettereed29a52015-05-21 14:21:25 +02007347 i915_gem_request_reference(req);
7348 boost->req = req;
Chris Wilson6ad790c2015-04-07 16:20:31 +01007349
7350 INIT_WORK(&boost->work, __intel_rps_boost_work);
7351 queue_work(to_i915(dev)->wq, &boost->work);
7352}
7353
Daniel Vetterf742a552013-12-06 10:17:53 +01007354void intel_pm_setup(struct drm_device *dev)
Chris Wilson907b28c2013-07-19 20:36:52 +01007355{
7356 struct drm_i915_private *dev_priv = dev->dev_private;
7357
Daniel Vetterf742a552013-12-06 10:17:53 +01007358 mutex_init(&dev_priv->rps.hw_lock);
Chris Wilson8d3afd72015-05-21 21:01:47 +01007359 spin_lock_init(&dev_priv->rps.client_lock);
Daniel Vetterf742a552013-12-06 10:17:53 +01007360
Chris Wilson907b28c2013-07-19 20:36:52 +01007361 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7362 intel_gen6_powersave_work);
Chris Wilson1854d5c2015-04-07 16:20:32 +01007363 INIT_LIST_HEAD(&dev_priv->rps.clients);
Chris Wilson2e1b8732015-04-27 13:41:22 +01007364 INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
7365 INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
Paulo Zanoni5d584b22014-03-07 20:08:15 -03007366
Paulo Zanoni33688d92014-03-07 20:08:19 -03007367 dev_priv->pm.suspended = false;
Chris Wilson907b28c2013-07-19 20:36:52 +01007368}