blob: 1caba548a525cddda4d0a8168173245319401349 [file] [log] [blame]
Daniel Vetter9c065a72014-09-30 10:56:38 +02001/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29#include <linux/pm_runtime.h>
30#include <linux/vgaarb.h>
31
32#include "i915_drv.h"
33#include "intel_drv.h"
Daniel Vetter9c065a72014-09-30 10:56:38 +020034
Daniel Vettere4e76842014-09-30 10:56:42 +020035/**
36 * DOC: runtime pm
37 *
38 * The i915 driver supports dynamic enabling and disabling of entire hardware
39 * blocks at runtime. This is especially important on the display side where
40 * software is supposed to control many power gates manually on recent hardware,
41 * since on the GT side a lot of the power management is done by the hardware.
42 * But even there some manual control at the device level is required.
43 *
44 * Since i915 supports a diverse set of platforms with a unified codebase and
45 * hardware engineers just love to shuffle functionality around between power
46 * domains there's a sizeable amount of indirection required. This file provides
47 * generic functions to the driver for grabbing and releasing references for
48 * abstract power domains. It then maps those to the actual power wells
49 * present for a given platform.
50 */
51
Suketu Shah5aefb232015-04-16 14:22:10 +053052bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
Imre Deak438b8dc2017-07-11 23:42:30 +030053 enum i915_power_well_id power_well_id);
Suketu Shah5aefb232015-04-16 14:22:10 +053054
Imre Deak9c8d0b82016-06-13 16:44:34 +030055static struct i915_power_well *
Imre Deak438b8dc2017-07-11 23:42:30 +030056lookup_power_well(struct drm_i915_private *dev_priv,
57 enum i915_power_well_id power_well_id);
Imre Deak9c8d0b82016-06-13 16:44:34 +030058
Daniel Stone9895ad02015-11-20 15:55:33 +000059const char *
60intel_display_power_domain_str(enum intel_display_power_domain domain)
61{
62 switch (domain) {
63 case POWER_DOMAIN_PIPE_A:
64 return "PIPE_A";
65 case POWER_DOMAIN_PIPE_B:
66 return "PIPE_B";
67 case POWER_DOMAIN_PIPE_C:
68 return "PIPE_C";
69 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
70 return "PIPE_A_PANEL_FITTER";
71 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
72 return "PIPE_B_PANEL_FITTER";
73 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
74 return "PIPE_C_PANEL_FITTER";
75 case POWER_DOMAIN_TRANSCODER_A:
76 return "TRANSCODER_A";
77 case POWER_DOMAIN_TRANSCODER_B:
78 return "TRANSCODER_B";
79 case POWER_DOMAIN_TRANSCODER_C:
80 return "TRANSCODER_C";
81 case POWER_DOMAIN_TRANSCODER_EDP:
82 return "TRANSCODER_EDP";
Jani Nikula4d1de972016-03-18 17:05:42 +020083 case POWER_DOMAIN_TRANSCODER_DSI_A:
84 return "TRANSCODER_DSI_A";
85 case POWER_DOMAIN_TRANSCODER_DSI_C:
86 return "TRANSCODER_DSI_C";
Daniel Stone9895ad02015-11-20 15:55:33 +000087 case POWER_DOMAIN_PORT_DDI_A_LANES:
88 return "PORT_DDI_A_LANES";
89 case POWER_DOMAIN_PORT_DDI_B_LANES:
90 return "PORT_DDI_B_LANES";
91 case POWER_DOMAIN_PORT_DDI_C_LANES:
92 return "PORT_DDI_C_LANES";
93 case POWER_DOMAIN_PORT_DDI_D_LANES:
94 return "PORT_DDI_D_LANES";
95 case POWER_DOMAIN_PORT_DDI_E_LANES:
96 return "PORT_DDI_E_LANES";
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020097 case POWER_DOMAIN_PORT_DDI_A_IO:
98 return "PORT_DDI_A_IO";
99 case POWER_DOMAIN_PORT_DDI_B_IO:
100 return "PORT_DDI_B_IO";
101 case POWER_DOMAIN_PORT_DDI_C_IO:
102 return "PORT_DDI_C_IO";
103 case POWER_DOMAIN_PORT_DDI_D_IO:
104 return "PORT_DDI_D_IO";
105 case POWER_DOMAIN_PORT_DDI_E_IO:
106 return "PORT_DDI_E_IO";
Daniel Stone9895ad02015-11-20 15:55:33 +0000107 case POWER_DOMAIN_PORT_DSI:
108 return "PORT_DSI";
109 case POWER_DOMAIN_PORT_CRT:
110 return "PORT_CRT";
111 case POWER_DOMAIN_PORT_OTHER:
112 return "PORT_OTHER";
113 case POWER_DOMAIN_VGA:
114 return "VGA";
115 case POWER_DOMAIN_AUDIO:
116 return "AUDIO";
117 case POWER_DOMAIN_PLLS:
118 return "PLLS";
119 case POWER_DOMAIN_AUX_A:
120 return "AUX_A";
121 case POWER_DOMAIN_AUX_B:
122 return "AUX_B";
123 case POWER_DOMAIN_AUX_C:
124 return "AUX_C";
125 case POWER_DOMAIN_AUX_D:
126 return "AUX_D";
127 case POWER_DOMAIN_GMBUS:
128 return "GMBUS";
129 case POWER_DOMAIN_INIT:
130 return "INIT";
131 case POWER_DOMAIN_MODESET:
132 return "MODESET";
133 default:
134 MISSING_CASE(domain);
135 return "?";
136 }
137}
138
Damien Lespiaue8ca9322015-07-30 18:20:26 -0300139static void intel_power_well_enable(struct drm_i915_private *dev_priv,
140 struct i915_power_well *power_well)
141{
142 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
143 power_well->ops->enable(dev_priv, power_well);
144 power_well->hw_enabled = true;
145}
146
Damien Lespiaudcddab32015-07-30 18:20:27 -0300147static void intel_power_well_disable(struct drm_i915_private *dev_priv,
148 struct i915_power_well *power_well)
149{
150 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
151 power_well->hw_enabled = false;
152 power_well->ops->disable(dev_priv, power_well);
153}
154
Imre Deakb409ca92016-06-13 16:44:33 +0300155static void intel_power_well_get(struct drm_i915_private *dev_priv,
156 struct i915_power_well *power_well)
157{
158 if (!power_well->count++)
159 intel_power_well_enable(dev_priv, power_well);
160}
161
162static void intel_power_well_put(struct drm_i915_private *dev_priv,
163 struct i915_power_well *power_well)
164{
165 WARN(!power_well->count, "Use count on power well %s is already zero",
166 power_well->name);
167
168 if (!--power_well->count)
169 intel_power_well_disable(dev_priv, power_well);
170}
171
Daniel Vettere4e76842014-09-30 10:56:42 +0200172/*
Daniel Vetter9c065a72014-09-30 10:56:38 +0200173 * We should only use the power well if we explicitly asked the hardware to
174 * enable it, so check if it's enabled and also check if we've requested it to
175 * be enabled.
176 */
177static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
178 struct i915_power_well *power_well)
179{
180 return I915_READ(HSW_PWR_WELL_DRIVER) ==
181 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
182}
183
Daniel Vettere4e76842014-09-30 10:56:42 +0200184/**
185 * __intel_display_power_is_enabled - unlocked check for a power domain
186 * @dev_priv: i915 device instance
187 * @domain: power domain to check
188 *
189 * This is the unlocked version of intel_display_power_is_enabled() and should
190 * only be used from error capture and recovery code where deadlocks are
191 * possible.
192 *
193 * Returns:
194 * True when the power domain is enabled, false otherwise.
195 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +0200196bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
197 enum intel_display_power_domain domain)
Daniel Vetter9c065a72014-09-30 10:56:38 +0200198{
Daniel Vetter9c065a72014-09-30 10:56:38 +0200199 struct i915_power_well *power_well;
200 bool is_enabled;
Daniel Vetter9c065a72014-09-30 10:56:38 +0200201
202 if (dev_priv->pm.suspended)
203 return false;
204
Daniel Vetter9c065a72014-09-30 10:56:38 +0200205 is_enabled = true;
206
Imre Deak75ccb2e2017-02-17 17:39:43 +0200207 for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
Daniel Vetter9c065a72014-09-30 10:56:38 +0200208 if (power_well->always_on)
209 continue;
210
211 if (!power_well->hw_enabled) {
212 is_enabled = false;
213 break;
214 }
215 }
216
217 return is_enabled;
218}
219
Daniel Vettere4e76842014-09-30 10:56:42 +0200220/**
Damien Lespiauf61ccae2014-11-25 13:45:41 +0000221 * intel_display_power_is_enabled - check for a power domain
Daniel Vettere4e76842014-09-30 10:56:42 +0200222 * @dev_priv: i915 device instance
223 * @domain: power domain to check
224 *
225 * This function can be used to check the hw power domain state. It is mostly
226 * used in hardware state readout functions. Everywhere else code should rely
227 * upon explicit power domain reference counting to ensure that the hardware
228 * block is powered up before accessing it.
229 *
230 * Callers must hold the relevant modesetting locks to ensure that concurrent
231 * threads can't disable the power well while the caller tries to read a few
232 * registers.
233 *
234 * Returns:
235 * True when the power domain is enabled, false otherwise.
236 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +0200237bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
238 enum intel_display_power_domain domain)
Daniel Vetter9c065a72014-09-30 10:56:38 +0200239{
240 struct i915_power_domains *power_domains;
241 bool ret;
242
243 power_domains = &dev_priv->power_domains;
244
245 mutex_lock(&power_domains->lock);
Daniel Vetterf458ebb2014-09-30 10:56:39 +0200246 ret = __intel_display_power_is_enabled(dev_priv, domain);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200247 mutex_unlock(&power_domains->lock);
248
249 return ret;
250}
251
Daniel Vettere4e76842014-09-30 10:56:42 +0200252/**
253 * intel_display_set_init_power - set the initial power domain state
254 * @dev_priv: i915 device instance
255 * @enable: whether to enable or disable the initial power domain state
256 *
257 * For simplicity our driver load/unload and system suspend/resume code assumes
258 * that all power domains are always enabled. This functions controls the state
259 * of this little hack. While the initial power domain state is enabled runtime
260 * pm is effectively disabled.
261 */
Daniel Vetterd9bc89d92014-09-30 10:56:40 +0200262void intel_display_set_init_power(struct drm_i915_private *dev_priv,
263 bool enable)
264{
265 if (dev_priv->power_domains.init_power_on == enable)
266 return;
267
268 if (enable)
269 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
270 else
271 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
272
273 dev_priv->power_domains.init_power_on = enable;
274}
275
Daniel Vetter9c065a72014-09-30 10:56:38 +0200276/*
277 * Starting with Haswell, we have a "Power Down Well" that can be turned off
278 * when not needed anymore. We have 4 registers that can request the power well
279 * to be enabled, and it will only be disabled if none of the registers is
280 * requesting it to be enabled.
281 */
282static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
283{
David Weinehall52a05c32016-08-22 13:32:44 +0300284 struct pci_dev *pdev = dev_priv->drm.pdev;
Daniel Vetter9c065a72014-09-30 10:56:38 +0200285
286 /*
287 * After we re-enable the power well, if we touch VGA register 0x3d5
288 * we'll get unclaimed register interrupts. This stops after we write
289 * anything to the VGA MSR register. The vgacon module uses this
290 * register all the time, so if we unbind our driver and, as a
291 * consequence, bind vgacon, we'll get stuck in an infinite loop at
292 * console_unlock(). So make here we touch the VGA MSR register, making
293 * sure vgacon can keep working normally without triggering interrupts
294 * and error messages.
295 */
David Weinehall52a05c32016-08-22 13:32:44 +0300296 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200297 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
David Weinehall52a05c32016-08-22 13:32:44 +0300298 vga_put(pdev, VGA_RSRC_LEGACY_IO);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200299
Tvrtko Ursulin86527442016-10-13 11:03:00 +0100300 if (IS_BROADWELL(dev_priv))
Damien Lespiau4c6c03b2015-03-06 18:50:48 +0000301 gen8_irq_power_well_post_enable(dev_priv,
302 1 << PIPE_C | 1 << PIPE_B);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200303}
304
Ville Syrjäläaae8ba82016-02-19 20:47:30 +0200305static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
306{
307 if (IS_BROADWELL(dev_priv))
308 gen8_irq_power_well_pre_disable(dev_priv,
309 1 << PIPE_C | 1 << PIPE_B);
310}
311
Damien Lespiaud14c0342015-03-06 18:50:51 +0000312static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
313 struct i915_power_well *power_well)
314{
David Weinehall52a05c32016-08-22 13:32:44 +0300315 struct pci_dev *pdev = dev_priv->drm.pdev;
Damien Lespiaud14c0342015-03-06 18:50:51 +0000316
317 /*
318 * After we re-enable the power well, if we touch VGA register 0x3d5
319 * we'll get unclaimed register interrupts. This stops after we write
320 * anything to the VGA MSR register. The vgacon module uses this
321 * register all the time, so if we unbind our driver and, as a
322 * consequence, bind vgacon, we'll get stuck in an infinite loop at
323 * console_unlock(). So make here we touch the VGA MSR register, making
324 * sure vgacon can keep working normally without triggering interrupts
325 * and error messages.
326 */
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +0300327 if (power_well->id == SKL_DISP_PW_2) {
David Weinehall52a05c32016-08-22 13:32:44 +0300328 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
Damien Lespiaud14c0342015-03-06 18:50:51 +0000329 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
David Weinehall52a05c32016-08-22 13:32:44 +0300330 vga_put(pdev, VGA_RSRC_LEGACY_IO);
Damien Lespiaud14c0342015-03-06 18:50:51 +0000331
332 gen8_irq_power_well_post_enable(dev_priv,
333 1 << PIPE_C | 1 << PIPE_B);
334 }
Damien Lespiaud14c0342015-03-06 18:50:51 +0000335}
336
Ville Syrjäläaae8ba82016-02-19 20:47:30 +0200337static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
338 struct i915_power_well *power_well)
339{
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +0300340 if (power_well->id == SKL_DISP_PW_2)
Ville Syrjäläaae8ba82016-02-19 20:47:30 +0200341 gen8_irq_power_well_pre_disable(dev_priv,
342 1 << PIPE_C | 1 << PIPE_B);
343}
344
Imre Deak42d93662017-06-29 18:37:01 +0300345static void gen9_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
346 struct i915_power_well *power_well)
347{
Imre Deak438b8dc2017-07-11 23:42:30 +0300348 enum i915_power_well_id id = power_well->id;
Imre Deak42d93662017-06-29 18:37:01 +0300349
350 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
351 WARN_ON(intel_wait_for_register(dev_priv,
352 HSW_PWR_WELL_DRIVER,
353 SKL_POWER_WELL_STATE(id),
354 SKL_POWER_WELL_STATE(id),
355 1));
356}
357
Imre Deak438b8dc2017-07-11 23:42:30 +0300358static u32 gen9_power_well_requesters(struct drm_i915_private *dev_priv,
359 enum i915_power_well_id id)
Imre Deak42d93662017-06-29 18:37:01 +0300360{
361 u32 req_mask = SKL_POWER_WELL_REQ(id);
362 u32 ret;
363
364 ret = I915_READ(HSW_PWR_WELL_BIOS) & req_mask ? 1 : 0;
365 ret |= I915_READ(HSW_PWR_WELL_DRIVER) & req_mask ? 2 : 0;
366 ret |= I915_READ(HSW_PWR_WELL_KVMR) & req_mask ? 4 : 0;
367 ret |= I915_READ(HSW_PWR_WELL_DEBUG) & req_mask ? 8 : 0;
368
369 return ret;
370}
371
372static void gen9_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
373 struct i915_power_well *power_well)
374{
Imre Deak438b8dc2017-07-11 23:42:30 +0300375 enum i915_power_well_id id = power_well->id;
Imre Deak42d93662017-06-29 18:37:01 +0300376 bool disabled;
377 u32 reqs;
378
379 /*
380 * Bspec doesn't require waiting for PWs to get disabled, but still do
381 * this for paranoia. The known cases where a PW will be forced on:
382 * - a KVMR request on any power well via the KVMR request register
383 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
384 * DEBUG request registers
385 * Skip the wait in case any of the request bits are set and print a
386 * diagnostic message.
387 */
388 wait_for((disabled = !(I915_READ(HSW_PWR_WELL_DRIVER) &
389 SKL_POWER_WELL_STATE(id))) ||
390 (reqs = gen9_power_well_requesters(dev_priv, id)), 1);
391 if (disabled)
392 return;
393
394 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
395 power_well->name,
396 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
397}
398
Imre Deakec46d482017-07-06 17:40:33 +0300399static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
400 struct i915_power_well *power_well)
Daniel Vetter9c065a72014-09-30 10:56:38 +0200401{
Imre Deakec46d482017-07-06 17:40:33 +0300402 I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE_REQUEST);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200403
Imre Deakec46d482017-07-06 17:40:33 +0300404 if (intel_wait_for_register(dev_priv,
405 HSW_PWR_WELL_DRIVER,
406 HSW_PWR_WELL_STATE_ENABLED,
407 HSW_PWR_WELL_STATE_ENABLED,
408 20))
409 DRM_ERROR("Timeout enabling power well\n");
410 hsw_power_well_post_enable(dev_priv);
411}
Daniel Vetter9c065a72014-09-30 10:56:38 +0200412
Imre Deakec46d482017-07-06 17:40:33 +0300413static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
414 struct i915_power_well *power_well)
415{
416 hsw_power_well_pre_disable(dev_priv);
417 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
418 POSTING_READ(HSW_PWR_WELL_DRIVER);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200419}
420
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000421#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200422 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
423 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
424 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
425 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
426 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
427 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
428 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
429 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
430 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
431 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
432 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
433 BIT_ULL(POWER_DOMAIN_AUX_B) | \
434 BIT_ULL(POWER_DOMAIN_AUX_C) | \
435 BIT_ULL(POWER_DOMAIN_AUX_D) | \
436 BIT_ULL(POWER_DOMAIN_AUDIO) | \
437 BIT_ULL(POWER_DOMAIN_VGA) | \
438 BIT_ULL(POWER_DOMAIN_INIT))
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +0200439#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
440 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
441 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200442 BIT_ULL(POWER_DOMAIN_INIT))
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +0200443#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
444 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200445 BIT_ULL(POWER_DOMAIN_INIT))
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +0200446#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
447 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200448 BIT_ULL(POWER_DOMAIN_INIT))
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +0200449#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
450 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200451 BIT_ULL(POWER_DOMAIN_INIT))
Patrik Jakobsson9f836f92015-11-16 16:20:01 +0100452#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
453 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200454 BIT_ULL(POWER_DOMAIN_MODESET) | \
455 BIT_ULL(POWER_DOMAIN_AUX_A) | \
456 BIT_ULL(POWER_DOMAIN_INIT))
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000457
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +0530458#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200459 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
460 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
461 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
462 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
463 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
464 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
465 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
466 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
467 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
468 BIT_ULL(POWER_DOMAIN_AUX_B) | \
469 BIT_ULL(POWER_DOMAIN_AUX_C) | \
470 BIT_ULL(POWER_DOMAIN_AUDIO) | \
471 BIT_ULL(POWER_DOMAIN_VGA) | \
472 BIT_ULL(POWER_DOMAIN_GMBUS) | \
473 BIT_ULL(POWER_DOMAIN_INIT))
Patrik Jakobsson9f836f92015-11-16 16:20:01 +0100474#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
475 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200476 BIT_ULL(POWER_DOMAIN_MODESET) | \
477 BIT_ULL(POWER_DOMAIN_AUX_A) | \
478 BIT_ULL(POWER_DOMAIN_INIT))
Imre Deak9c8d0b82016-06-13 16:44:34 +0300479#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200480 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
481 BIT_ULL(POWER_DOMAIN_AUX_A) | \
482 BIT_ULL(POWER_DOMAIN_INIT))
Imre Deak9c8d0b82016-06-13 16:44:34 +0300483#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200484 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
485 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
486 BIT_ULL(POWER_DOMAIN_AUX_B) | \
487 BIT_ULL(POWER_DOMAIN_AUX_C) | \
488 BIT_ULL(POWER_DOMAIN_INIT))
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +0530489
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +0200490#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200491 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
492 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
493 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
494 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
495 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
496 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
497 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
498 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
499 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
500 BIT_ULL(POWER_DOMAIN_AUX_B) | \
501 BIT_ULL(POWER_DOMAIN_AUX_C) | \
502 BIT_ULL(POWER_DOMAIN_AUDIO) | \
503 BIT_ULL(POWER_DOMAIN_VGA) | \
504 BIT_ULL(POWER_DOMAIN_INIT))
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +0200505#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
506 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
507#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
508 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
509#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
510 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +0200511#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200512 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
513 BIT_ULL(POWER_DOMAIN_AUX_A) | \
514 BIT_ULL(POWER_DOMAIN_INIT))
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +0200515#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200516 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
517 BIT_ULL(POWER_DOMAIN_AUX_B) | \
518 BIT_ULL(POWER_DOMAIN_INIT))
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +0200519#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200520 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
521 BIT_ULL(POWER_DOMAIN_AUX_C) | \
522 BIT_ULL(POWER_DOMAIN_INIT))
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +0200523#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200524 BIT_ULL(POWER_DOMAIN_AUX_A) | \
525 BIT_ULL(POWER_DOMAIN_INIT))
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +0200526#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200527 BIT_ULL(POWER_DOMAIN_AUX_B) | \
528 BIT_ULL(POWER_DOMAIN_INIT))
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +0200529#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200530 BIT_ULL(POWER_DOMAIN_AUX_C) | \
531 BIT_ULL(POWER_DOMAIN_INIT))
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +0200532#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
533 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +0200534 BIT_ULL(POWER_DOMAIN_MODESET) | \
535 BIT_ULL(POWER_DOMAIN_AUX_A) | \
536 BIT_ULL(POWER_DOMAIN_INIT))
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +0200537
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -0700538#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
539 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
540 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
541 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
542 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
543 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
544 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
545 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
546 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
547 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
548 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
549 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
550 BIT_ULL(POWER_DOMAIN_AUX_B) | \
551 BIT_ULL(POWER_DOMAIN_AUX_C) | \
552 BIT_ULL(POWER_DOMAIN_AUX_D) | \
553 BIT_ULL(POWER_DOMAIN_AUDIO) | \
554 BIT_ULL(POWER_DOMAIN_VGA) | \
555 BIT_ULL(POWER_DOMAIN_INIT))
556#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
557 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
558 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
559 BIT_ULL(POWER_DOMAIN_INIT))
560#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
561 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
562 BIT_ULL(POWER_DOMAIN_INIT))
563#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
564 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
565 BIT_ULL(POWER_DOMAIN_INIT))
566#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
567 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
568 BIT_ULL(POWER_DOMAIN_INIT))
569#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
570 BIT_ULL(POWER_DOMAIN_AUX_A) | \
571 BIT_ULL(POWER_DOMAIN_INIT))
572#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
573 BIT_ULL(POWER_DOMAIN_AUX_B) | \
574 BIT_ULL(POWER_DOMAIN_INIT))
575#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
576 BIT_ULL(POWER_DOMAIN_AUX_C) | \
577 BIT_ULL(POWER_DOMAIN_INIT))
578#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
579 BIT_ULL(POWER_DOMAIN_AUX_D) | \
580 BIT_ULL(POWER_DOMAIN_INIT))
581#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
582 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
583 BIT_ULL(POWER_DOMAIN_MODESET) | \
584 BIT_ULL(POWER_DOMAIN_AUX_A) | \
585 BIT_ULL(POWER_DOMAIN_INIT))
586
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530587static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
588{
Imre Deakbfcdabe2016-04-01 16:02:37 +0300589 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
590 "DC9 already programmed to be enabled.\n");
591 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
592 "DC5 still not disabled to enable DC9.\n");
Imre Deake8a3a2a2017-06-29 18:37:00 +0300593 WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER) &
594 SKL_POWER_WELL_REQ(SKL_DISP_PW_2),
595 "Power well 2 on.\n");
Imre Deakbfcdabe2016-04-01 16:02:37 +0300596 WARN_ONCE(intel_irqs_enabled(dev_priv),
597 "Interrupts not disabled yet.\n");
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530598
599 /*
600 * TODO: check for the following to verify the conditions to enter DC9
601 * state are satisfied:
602 * 1] Check relevant display engine registers to verify if mode set
603 * disable sequence was followed.
604 * 2] Check if display uninitialize sequence is initialized.
605 */
606}
607
608static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
609{
Imre Deakbfcdabe2016-04-01 16:02:37 +0300610 WARN_ONCE(intel_irqs_enabled(dev_priv),
611 "Interrupts not disabled yet.\n");
612 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
613 "DC5 still not disabled.\n");
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530614
615 /*
616 * TODO: check for the following to verify DC9 state was indeed
617 * entered before programming to disable it:
618 * 1] Check relevant display engine registers to verify if mode
619 * set disable sequence was followed.
620 * 2] Check if display uninitialize sequence is initialized.
621 */
622}
623
Mika Kuoppala779cb5d2016-02-18 17:58:09 +0200624static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
625 u32 state)
626{
627 int rewrites = 0;
628 int rereads = 0;
629 u32 v;
630
631 I915_WRITE(DC_STATE_EN, state);
632
633 /* It has been observed that disabling the dc6 state sometimes
634 * doesn't stick and dmc keeps returning old value. Make sure
635 * the write really sticks enough times and also force rewrite until
636 * we are confident that state is exactly what we want.
637 */
638 do {
639 v = I915_READ(DC_STATE_EN);
640
641 if (v != state) {
642 I915_WRITE(DC_STATE_EN, state);
643 rewrites++;
644 rereads = 0;
645 } else if (rereads++ > 5) {
646 break;
647 }
648
649 } while (rewrites < 100);
650
651 if (v != state)
652 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
653 state, v);
654
655 /* Most of the times we need one retry, avoid spam */
656 if (rewrites > 1)
657 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
658 state, rewrites);
659}
660
Imre Deakda2f41d2016-04-20 20:27:56 +0300661static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530662{
Imre Deakda2f41d2016-04-20 20:27:56 +0300663 u32 mask;
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530664
Imre Deak13ae3a02015-11-04 19:24:16 +0200665 mask = DC_STATE_EN_UPTO_DC5;
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200666 if (IS_GEN9_LP(dev_priv))
Imre Deak13ae3a02015-11-04 19:24:16 +0200667 mask |= DC_STATE_EN_DC9;
668 else
669 mask |= DC_STATE_EN_UPTO_DC6;
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530670
Imre Deakda2f41d2016-04-20 20:27:56 +0300671 return mask;
672}
673
674void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
675{
676 u32 val;
677
678 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
679
680 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
681 dev_priv->csr.dc_state, val);
682 dev_priv->csr.dc_state = val;
683}
684
685static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
686{
687 uint32_t val;
688 uint32_t mask;
689
Imre Deaka37baf32016-02-29 22:49:03 +0200690 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
691 state &= dev_priv->csr.allowed_dc_mask;
Patrik Jakobsson443646c2015-11-16 15:01:06 +0100692
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530693 val = I915_READ(DC_STATE_EN);
Imre Deakda2f41d2016-04-20 20:27:56 +0300694 mask = gen9_dc_mask(dev_priv);
Imre Deak13ae3a02015-11-04 19:24:16 +0200695 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
696 val & mask, state);
Patrik Jakobsson832dba82016-02-18 17:21:11 +0200697
698 /* Check if DMC is ignoring our DC state requests */
699 if ((val & mask) != dev_priv->csr.dc_state)
700 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
701 dev_priv->csr.dc_state, val & mask);
702
Imre Deak13ae3a02015-11-04 19:24:16 +0200703 val &= ~mask;
704 val |= state;
Mika Kuoppala779cb5d2016-02-18 17:58:09 +0200705
706 gen9_write_dc_state(dev_priv, val);
Patrik Jakobsson832dba82016-02-18 17:21:11 +0200707
708 dev_priv->csr.dc_state = val & mask;
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530709}
710
Imre Deak13ae3a02015-11-04 19:24:16 +0200711void bxt_enable_dc9(struct drm_i915_private *dev_priv)
712{
713 assert_can_enable_dc9(dev_priv);
714
715 DRM_DEBUG_KMS("Enabling DC9\n");
716
Imre Deak78597992016-06-16 16:37:20 +0300717 intel_power_sequencer_reset(dev_priv);
Imre Deak13ae3a02015-11-04 19:24:16 +0200718 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
719}
720
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530721void bxt_disable_dc9(struct drm_i915_private *dev_priv)
722{
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530723 assert_can_disable_dc9(dev_priv);
724
725 DRM_DEBUG_KMS("Disabling DC9\n");
726
Imre Deak13ae3a02015-11-04 19:24:16 +0200727 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
Imre Deak8090ba82016-08-10 14:07:33 +0300728
729 intel_pps_unlock_regs_wa(dev_priv);
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530730}
731
Daniel Vetteraf5fead2015-10-28 23:58:57 +0200732static void assert_csr_loaded(struct drm_i915_private *dev_priv)
733{
734 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
735 "CSR program storage start is NULL\n");
736 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
737 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
738}
739
Suketu Shah5aefb232015-04-16 14:22:10 +0530740static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
Suketu Shahdc174302015-04-17 19:46:16 +0530741{
Suketu Shah5aefb232015-04-16 14:22:10 +0530742 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
743 SKL_DISP_PW_2);
744
Jesse Barnes6ff8ab02015-09-10 08:20:28 -0700745 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
Suketu Shah5aefb232015-04-16 14:22:10 +0530746
Jesse Barnes6ff8ab02015-09-10 08:20:28 -0700747 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
748 "DC5 already programmed to be enabled.\n");
Imre Deakc9b88462015-12-15 20:10:34 +0200749 assert_rpm_wakelock_held(dev_priv);
Suketu Shah5aefb232015-04-16 14:22:10 +0530750
751 assert_csr_loaded(dev_priv);
752}
753
Imre Deakf62c79b2016-04-20 20:27:57 +0300754void gen9_enable_dc5(struct drm_i915_private *dev_priv)
Suketu Shah5aefb232015-04-16 14:22:10 +0530755{
Suketu Shah5aefb232015-04-16 14:22:10 +0530756 assert_can_enable_dc5(dev_priv);
A.Sunil Kamath6b457d32015-04-16 14:22:09 +0530757
758 DRM_DEBUG_KMS("Enabling DC5\n");
759
Imre Deak13ae3a02015-11-04 19:24:16 +0200760 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
Suketu Shahdc174302015-04-17 19:46:16 +0530761}
762
Suketu Shah93c7cb62015-04-16 14:22:13 +0530763static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
Suketu Shahf75a1982015-04-16 14:22:11 +0530764{
Jesse Barnes6ff8ab02015-09-10 08:20:28 -0700765 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
766 "Backlight is not disabled.\n");
767 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
768 "DC6 already programmed to be enabled.\n");
Suketu Shah93c7cb62015-04-16 14:22:13 +0530769
770 assert_csr_loaded(dev_priv);
771}
772
Animesh Manna0a9d2be2015-09-29 11:01:59 +0530773void skl_enable_dc6(struct drm_i915_private *dev_priv)
Suketu Shah93c7cb62015-04-16 14:22:13 +0530774{
Suketu Shah93c7cb62015-04-16 14:22:13 +0530775 assert_can_enable_dc6(dev_priv);
A.Sunil Kamath74b4f372015-04-16 14:22:12 +0530776
777 DRM_DEBUG_KMS("Enabling DC6\n");
778
Imre Deak13ae3a02015-11-04 19:24:16 +0200779 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
780
Suketu Shahf75a1982015-04-16 14:22:11 +0530781}
782
Animesh Manna0a9d2be2015-09-29 11:01:59 +0530783void skl_disable_dc6(struct drm_i915_private *dev_priv)
Suketu Shahf75a1982015-04-16 14:22:11 +0530784{
A.Sunil Kamath74b4f372015-04-16 14:22:12 +0530785 DRM_DEBUG_KMS("Disabling DC6\n");
786
Imre Deak13ae3a02015-11-04 19:24:16 +0200787 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
Suketu Shahf75a1982015-04-16 14:22:11 +0530788}
789
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000790static void skl_set_power_well(struct drm_i915_private *dev_priv,
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +0200791 struct i915_power_well *power_well, bool enable)
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000792{
793 uint32_t tmp, fuse_status;
794 uint32_t req_mask, state_mask;
Imre Deak91377262017-07-06 17:40:31 +0300795 bool check_fuse_status = false;
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000796
797 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
798 fuse_status = I915_READ(SKL_FUSE_STATUS);
799
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +0300800 switch (power_well->id) {
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000801 case SKL_DISP_PW_1:
Chris Wilson117c1142016-06-30 15:33:33 +0100802 if (intel_wait_for_register(dev_priv,
803 SKL_FUSE_STATUS,
804 SKL_FUSE_PG0_DIST_STATUS,
805 SKL_FUSE_PG0_DIST_STATUS,
806 1)) {
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000807 DRM_ERROR("PG0 not enabled\n");
808 return;
809 }
810 break;
811 case SKL_DISP_PW_2:
812 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
813 DRM_ERROR("PG1 in disabled state\n");
814 return;
815 }
816 break;
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +0200817 case SKL_DISP_PW_MISC_IO:
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -0700818 case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A, CNL_DISP_PW_DDI_A */
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000819 case SKL_DISP_PW_DDI_B:
820 case SKL_DISP_PW_DDI_C:
821 case SKL_DISP_PW_DDI_D:
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -0700822 case GLK_DISP_PW_AUX_A: /* CNL_DISP_PW_AUX_A */
823 case GLK_DISP_PW_AUX_B: /* CNL_DISP_PW_AUX_B */
824 case GLK_DISP_PW_AUX_C: /* CNL_DISP_PW_AUX_C */
825 case CNL_DISP_PW_AUX_D:
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000826 break;
827 default:
Imre Deak438b8dc2017-07-11 23:42:30 +0300828 WARN(1, "Unknown power well %u\n", power_well->id);
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000829 return;
830 }
831
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +0300832 req_mask = SKL_POWER_WELL_REQ(power_well->id);
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +0300833 state_mask = SKL_POWER_WELL_STATE(power_well->id);
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000834
Imre Deak91377262017-07-06 17:40:31 +0300835 if (!enable)
Ville Syrjäläaae8ba82016-02-19 20:47:30 +0200836 skl_power_well_pre_disable(dev_priv, power_well);
837
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000838 if (enable) {
Imre Deak91377262017-07-06 17:40:31 +0300839 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000840
Imre Deak91377262017-07-06 17:40:31 +0300841 DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
842 check_fuse_status = true;
Imre Deak42d93662017-06-29 18:37:01 +0300843
844 gen9_wait_for_power_well_enable(dev_priv, power_well);
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000845 } else {
Imre Deak91377262017-07-06 17:40:31 +0300846 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
847 POSTING_READ(HSW_PWR_WELL_DRIVER);
848 DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
Imre Deakc6782b72016-04-05 13:26:05 +0300849
Imre Deak42d93662017-06-29 18:37:01 +0300850 gen9_wait_for_power_well_disable(dev_priv, power_well);
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000851 }
852
853 if (check_fuse_status) {
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +0300854 if (power_well->id == SKL_DISP_PW_1) {
Chris Wilson8b00f552016-06-30 15:33:34 +0100855 if (intel_wait_for_register(dev_priv,
856 SKL_FUSE_STATUS,
857 SKL_FUSE_PG1_DIST_STATUS,
858 SKL_FUSE_PG1_DIST_STATUS,
859 1))
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000860 DRM_ERROR("PG1 distributing status timeout\n");
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +0300861 } else if (power_well->id == SKL_DISP_PW_2) {
Chris Wilson8b00f552016-06-30 15:33:34 +0100862 if (intel_wait_for_register(dev_priv,
863 SKL_FUSE_STATUS,
864 SKL_FUSE_PG2_DIST_STATUS,
865 SKL_FUSE_PG2_DIST_STATUS,
866 1))
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000867 DRM_ERROR("PG2 distributing status timeout\n");
868 }
869 }
Damien Lespiaud14c0342015-03-06 18:50:51 +0000870
Imre Deak91377262017-07-06 17:40:31 +0300871 if (enable)
Damien Lespiaud14c0342015-03-06 18:50:51 +0000872 skl_power_well_post_enable(dev_priv, power_well);
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000873}
874
Daniel Vetter9c065a72014-09-30 10:56:38 +0200875static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
876 struct i915_power_well *power_well)
877{
Imre Deak16e84912017-02-17 17:39:45 +0200878 /* Take over the request bit if set by BIOS. */
879 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) {
880 if (!(I915_READ(HSW_PWR_WELL_DRIVER) &
881 HSW_PWR_WELL_ENABLE_REQUEST))
882 I915_WRITE(HSW_PWR_WELL_DRIVER,
883 HSW_PWR_WELL_ENABLE_REQUEST);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200884 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
Imre Deak16e84912017-02-17 17:39:45 +0200885 }
Daniel Vetter9c065a72014-09-30 10:56:38 +0200886}
887
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000888static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
889 struct i915_power_well *power_well)
890{
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +0300891 uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
892 SKL_POWER_WELL_STATE(power_well->id);
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000893
894 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
895}
896
897static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
898 struct i915_power_well *power_well)
899{
Imre Deak14544e12017-02-17 17:39:44 +0200900 uint32_t mask = SKL_POWER_WELL_REQ(power_well->id);
901 uint32_t bios_req = I915_READ(HSW_PWR_WELL_BIOS);
902
Imre Deak16e84912017-02-17 17:39:45 +0200903 /* Take over the request bit if set by BIOS. */
Imre Deak14544e12017-02-17 17:39:44 +0200904 if (bios_req & mask) {
Imre Deak16e84912017-02-17 17:39:45 +0200905 uint32_t drv_req = I915_READ(HSW_PWR_WELL_DRIVER);
906
907 if (!(drv_req & mask))
908 I915_WRITE(HSW_PWR_WELL_DRIVER, drv_req | mask);
Imre Deak14544e12017-02-17 17:39:44 +0200909 I915_WRITE(HSW_PWR_WELL_BIOS, bios_req & ~mask);
910 }
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000911}
912
913static void skl_power_well_enable(struct drm_i915_private *dev_priv,
914 struct i915_power_well *power_well)
915{
916 skl_set_power_well(dev_priv, power_well, true);
917}
918
919static void skl_power_well_disable(struct drm_i915_private *dev_priv,
920 struct i915_power_well *power_well)
921{
922 skl_set_power_well(dev_priv, power_well, false);
923}
924
Imre Deak9c8d0b82016-06-13 16:44:34 +0300925static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
926 struct i915_power_well *power_well)
927{
Imre Deakb5565a22017-07-06 17:40:29 +0300928 bxt_ddi_phy_init(dev_priv, power_well->bxt.phy);
Imre Deak9c8d0b82016-06-13 16:44:34 +0300929}
930
931static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
932 struct i915_power_well *power_well)
933{
Imre Deakb5565a22017-07-06 17:40:29 +0300934 bxt_ddi_phy_uninit(dev_priv, power_well->bxt.phy);
Imre Deak9c8d0b82016-06-13 16:44:34 +0300935}
936
937static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
938 struct i915_power_well *power_well)
939{
Imre Deakb5565a22017-07-06 17:40:29 +0300940 return bxt_ddi_phy_is_enabled(dev_priv, power_well->bxt.phy);
Imre Deak9c8d0b82016-06-13 16:44:34 +0300941}
942
Imre Deak9c8d0b82016-06-13 16:44:34 +0300943static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
944{
945 struct i915_power_well *power_well;
946
947 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
948 if (power_well->count > 0)
Imre Deakb5565a22017-07-06 17:40:29 +0300949 bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
Imre Deak9c8d0b82016-06-13 16:44:34 +0300950
951 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
952 if (power_well->count > 0)
Imre Deakb5565a22017-07-06 17:40:29 +0300953 bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +0200954
955 if (IS_GEMINILAKE(dev_priv)) {
956 power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
957 if (power_well->count > 0)
Imre Deakb5565a22017-07-06 17:40:29 +0300958 bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +0200959 }
Imre Deak9c8d0b82016-06-13 16:44:34 +0300960}
961
Patrik Jakobsson9f836f92015-11-16 16:20:01 +0100962static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
963 struct i915_power_well *power_well)
964{
965 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
966}
967
Ville Syrjälä18a80672016-05-16 16:59:40 +0300968static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
969{
970 u32 tmp = I915_READ(DBUF_CTL);
971
972 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
973 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
974 "Unexpected DBuf power power state (0x%08x)\n", tmp);
975}
976
Patrik Jakobsson9f836f92015-11-16 16:20:01 +0100977static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
978 struct i915_power_well *power_well)
979{
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200980 struct intel_cdclk_state cdclk_state = {};
981
Imre Deak5b773eb2016-02-29 22:49:05 +0200982 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
Imre Deakadc7f042016-04-04 17:27:10 +0300983
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200984 dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
985 WARN_ON(!intel_cdclk_state_compare(&dev_priv->cdclk.hw, &cdclk_state));
Ville Syrjälä342be922016-05-13 23:41:39 +0300986
Ville Syrjälä18a80672016-05-16 16:59:40 +0300987 gen9_assert_dbuf_enabled(dev_priv);
988
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200989 if (IS_GEN9_LP(dev_priv))
Imre Deak9c8d0b82016-06-13 16:44:34 +0300990 bxt_verify_ddi_phy_power_wells(dev_priv);
Patrik Jakobsson9f836f92015-11-16 16:20:01 +0100991}
992
993static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
994 struct i915_power_well *power_well)
995{
Imre Deakf74ed082016-04-18 14:48:21 +0300996 if (!dev_priv->csr.dmc_payload)
997 return;
998
Imre Deaka37baf32016-02-29 22:49:03 +0200999 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01001000 skl_enable_dc6(dev_priv);
Imre Deaka37baf32016-02-29 22:49:03 +02001001 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01001002 gen9_enable_dc5(dev_priv);
1003}
1004
Imre Deak3c1b38e2017-02-17 17:39:42 +02001005static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1006 struct i915_power_well *power_well)
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01001007{
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01001008}
1009
Daniel Vetter9c065a72014-09-30 10:56:38 +02001010static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1011 struct i915_power_well *power_well)
1012{
1013}
1014
1015static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1016 struct i915_power_well *power_well)
1017{
1018 return true;
1019}
1020
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03001021static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1022 struct i915_power_well *power_well)
1023{
1024 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1025 i830_enable_pipe(dev_priv, PIPE_A);
1026 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1027 i830_enable_pipe(dev_priv, PIPE_B);
1028}
1029
1030static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1031 struct i915_power_well *power_well)
1032{
1033 i830_disable_pipe(dev_priv, PIPE_B);
1034 i830_disable_pipe(dev_priv, PIPE_A);
1035}
1036
1037static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1038 struct i915_power_well *power_well)
1039{
1040 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1041 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1042}
1043
1044static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1045 struct i915_power_well *power_well)
1046{
1047 if (power_well->count > 0)
1048 i830_pipes_power_well_enable(dev_priv, power_well);
1049 else
1050 i830_pipes_power_well_disable(dev_priv, power_well);
1051}
1052
Daniel Vetter9c065a72014-09-30 10:56:38 +02001053static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1054 struct i915_power_well *power_well, bool enable)
1055{
Imre Deak438b8dc2017-07-11 23:42:30 +03001056 enum i915_power_well_id power_well_id = power_well->id;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001057 u32 mask;
1058 u32 state;
1059 u32 ctrl;
1060
1061 mask = PUNIT_PWRGT_MASK(power_well_id);
1062 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
1063 PUNIT_PWRGT_PWR_GATE(power_well_id);
1064
1065 mutex_lock(&dev_priv->rps.hw_lock);
1066
1067#define COND \
1068 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1069
1070 if (COND)
1071 goto out;
1072
1073 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1074 ctrl &= ~mask;
1075 ctrl |= state;
1076 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1077
1078 if (wait_for(COND, 100))
Masanari Iida7e35ab82015-05-10 01:00:23 +09001079 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
Daniel Vetter9c065a72014-09-30 10:56:38 +02001080 state,
1081 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1082
1083#undef COND
1084
1085out:
1086 mutex_unlock(&dev_priv->rps.hw_lock);
1087}
1088
Daniel Vetter9c065a72014-09-30 10:56:38 +02001089static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1090 struct i915_power_well *power_well)
1091{
1092 vlv_set_power_well(dev_priv, power_well, true);
1093}
1094
1095static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1096 struct i915_power_well *power_well)
1097{
1098 vlv_set_power_well(dev_priv, power_well, false);
1099}
1100
1101static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1102 struct i915_power_well *power_well)
1103{
Imre Deak438b8dc2017-07-11 23:42:30 +03001104 enum i915_power_well_id power_well_id = power_well->id;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001105 bool enabled = false;
1106 u32 mask;
1107 u32 state;
1108 u32 ctrl;
1109
1110 mask = PUNIT_PWRGT_MASK(power_well_id);
1111 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
1112
1113 mutex_lock(&dev_priv->rps.hw_lock);
1114
1115 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1116 /*
1117 * We only ever set the power-on and power-gate states, anything
1118 * else is unexpected.
1119 */
1120 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
1121 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
1122 if (state == ctrl)
1123 enabled = true;
1124
1125 /*
1126 * A transient state at this point would mean some unexpected party
1127 * is poking at the power controls too.
1128 */
1129 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1130 WARN_ON(ctrl != state);
1131
1132 mutex_unlock(&dev_priv->rps.hw_lock);
1133
1134 return enabled;
1135}
1136
Ville Syrjälä766078d2016-04-11 16:56:30 +03001137static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1138{
Hans de Goede721d4842016-12-02 15:29:04 +01001139 u32 val;
1140
1141 /*
1142 * On driver load, a pipe may be active and driving a DSI display.
1143 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1144 * (and never recovering) in this case. intel_dsi_post_disable() will
1145 * clear it when we turn off the display.
1146 */
1147 val = I915_READ(DSPCLK_GATE_D);
1148 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1149 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1150 I915_WRITE(DSPCLK_GATE_D, val);
Ville Syrjälä766078d2016-04-11 16:56:30 +03001151
1152 /*
1153 * Disable trickle feed and enable pnd deadline calculation
1154 */
1155 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1156 I915_WRITE(CBR1_VLV, 0);
Ville Syrjälä19ab4ed2016-04-27 17:43:22 +03001157
1158 WARN_ON(dev_priv->rawclk_freq == 0);
1159
1160 I915_WRITE(RAWCLK_FREQ_VLV,
1161 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
Ville Syrjälä766078d2016-04-11 16:56:30 +03001162}
1163
Ville Syrjälä2be7d542015-06-29 15:25:51 +03001164static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
Daniel Vetter9c065a72014-09-30 10:56:38 +02001165{
Lyude9504a892016-06-21 17:03:42 -04001166 struct intel_encoder *encoder;
Ville Syrjälä5a8fbb72015-06-29 15:25:53 +03001167 enum pipe pipe;
1168
1169 /*
1170 * Enable the CRI clock source so we can get at the
1171 * display and the reference clock for VGA
1172 * hotplug / manual detection. Supposedly DSI also
1173 * needs the ref clock up and running.
1174 *
1175 * CHV DPLL B/C have some issues if VGA mode is enabled.
1176 */
Tvrtko Ursulin801388c2016-11-16 08:55:44 +00001177 for_each_pipe(dev_priv, pipe) {
Ville Syrjälä5a8fbb72015-06-29 15:25:53 +03001178 u32 val = I915_READ(DPLL(pipe));
1179
1180 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1181 if (pipe != PIPE_A)
1182 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1183
1184 I915_WRITE(DPLL(pipe), val);
1185 }
Daniel Vetter9c065a72014-09-30 10:56:38 +02001186
Ville Syrjälä766078d2016-04-11 16:56:30 +03001187 vlv_init_display_clock_gating(dev_priv);
1188
Daniel Vetter9c065a72014-09-30 10:56:38 +02001189 spin_lock_irq(&dev_priv->irq_lock);
1190 valleyview_enable_display_irqs(dev_priv);
1191 spin_unlock_irq(&dev_priv->irq_lock);
1192
1193 /*
1194 * During driver initialization/resume we can avoid restoring the
1195 * part of the HW/SW state that will be inited anyway explicitly.
1196 */
1197 if (dev_priv->power_domains.initializing)
1198 return;
1199
Daniel Vetterb9632912014-09-30 10:56:44 +02001200 intel_hpd_init(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001201
Lyude9504a892016-06-21 17:03:42 -04001202 /* Re-enable the ADPA, if we have one */
1203 for_each_intel_encoder(&dev_priv->drm, encoder) {
1204 if (encoder->type == INTEL_OUTPUT_ANALOG)
1205 intel_crt_reset(&encoder->base);
1206 }
1207
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +00001208 i915_redisable_vga_power_on(dev_priv);
Imre Deak8090ba82016-08-10 14:07:33 +03001209
1210 intel_pps_unlock_regs_wa(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001211}
1212
Ville Syrjälä2be7d542015-06-29 15:25:51 +03001213static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1214{
1215 spin_lock_irq(&dev_priv->irq_lock);
1216 valleyview_disable_display_irqs(dev_priv);
1217 spin_unlock_irq(&dev_priv->irq_lock);
1218
Ville Syrjälä2230fde2016-02-19 18:41:52 +02001219 /* make sure we're done processing display irqs */
Chris Wilson91c8a322016-07-05 10:40:23 +01001220 synchronize_irq(dev_priv->drm.irq);
Ville Syrjälä2230fde2016-02-19 18:41:52 +02001221
Imre Deak78597992016-06-16 16:37:20 +03001222 intel_power_sequencer_reset(dev_priv);
Lyude19625e82016-06-21 17:03:44 -04001223
Lyudeb64b5402016-10-26 12:36:09 -04001224 /* Prevent us from re-enabling polling on accident in late suspend */
1225 if (!dev_priv->drm.dev->power.is_suspended)
1226 intel_hpd_poll_init(dev_priv);
Ville Syrjälä2be7d542015-06-29 15:25:51 +03001227}
1228
1229static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1230 struct i915_power_well *power_well)
1231{
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001232 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
Ville Syrjälä2be7d542015-06-29 15:25:51 +03001233
1234 vlv_set_power_well(dev_priv, power_well, true);
1235
1236 vlv_display_power_well_init(dev_priv);
1237}
1238
Daniel Vetter9c065a72014-09-30 10:56:38 +02001239static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1240 struct i915_power_well *power_well)
1241{
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001242 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001243
Ville Syrjälä2be7d542015-06-29 15:25:51 +03001244 vlv_display_power_well_deinit(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001245
1246 vlv_set_power_well(dev_priv, power_well, false);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001247}
1248
1249static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1250 struct i915_power_well *power_well)
1251{
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001252 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001253
Ville Syrjälä5a8fbb72015-06-29 15:25:53 +03001254 /* since ref/cri clock was enabled */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001255 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1256
1257 vlv_set_power_well(dev_priv, power_well, true);
1258
1259 /*
1260 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1261 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1262 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1263 * b. The other bits such as sfr settings / modesel may all
1264 * be set to 0.
1265 *
1266 * This should only be done on init and resume from S3 with
1267 * both PLLs disabled, or we risk losing DPIO and PLL
1268 * synchronization.
1269 */
1270 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1271}
1272
1273static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1274 struct i915_power_well *power_well)
1275{
1276 enum pipe pipe;
1277
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001278 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001279
1280 for_each_pipe(dev_priv, pipe)
1281 assert_pll_disabled(dev_priv, pipe);
1282
1283 /* Assert common reset */
1284 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1285
1286 vlv_set_power_well(dev_priv, power_well, false);
1287}
1288
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001289#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
Ville Syrjälä30142272015-07-08 23:46:01 +03001290
Imre Deak438b8dc2017-07-11 23:42:30 +03001291static struct i915_power_well *
1292lookup_power_well(struct drm_i915_private *dev_priv,
1293 enum i915_power_well_id power_well_id)
Ville Syrjälä30142272015-07-08 23:46:01 +03001294{
1295 struct i915_power_domains *power_domains = &dev_priv->power_domains;
Ville Syrjälä30142272015-07-08 23:46:01 +03001296 int i;
1297
Imre Deakfc17f222015-11-04 19:24:11 +02001298 for (i = 0; i < power_domains->power_well_count; i++) {
1299 struct i915_power_well *power_well;
1300
1301 power_well = &power_domains->power_wells[i];
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001302 if (power_well->id == power_well_id)
Ville Syrjälä30142272015-07-08 23:46:01 +03001303 return power_well;
1304 }
1305
1306 return NULL;
1307}
1308
1309#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1310
1311static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1312{
1313 struct i915_power_well *cmn_bc =
1314 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1315 struct i915_power_well *cmn_d =
1316 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1317 u32 phy_control = dev_priv->chv_phy_control;
1318 u32 phy_status = 0;
Ville Syrjälä3be60de2015-09-08 18:05:45 +03001319 u32 phy_status_mask = 0xffffffff;
Ville Syrjälä30142272015-07-08 23:46:01 +03001320
Ville Syrjälä3be60de2015-09-08 18:05:45 +03001321 /*
1322 * The BIOS can leave the PHY is some weird state
1323 * where it doesn't fully power down some parts.
1324 * Disable the asserts until the PHY has been fully
1325 * reset (ie. the power well has been disabled at
1326 * least once).
1327 */
1328 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1329 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1330 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1331 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1332 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1333 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1334 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1335
1336 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1337 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1338 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1339 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1340
Ville Syrjälä30142272015-07-08 23:46:01 +03001341 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1342 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1343
1344 /* this assumes override is only used to enable lanes */
1345 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1346 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1347
1348 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1349 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1350
1351 /* CL1 is on whenever anything is on in either channel */
1352 if (BITS_SET(phy_control,
1353 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1354 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1355 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1356
1357 /*
1358 * The DPLLB check accounts for the pipe B + port A usage
1359 * with CL2 powered up but all the lanes in the second channel
1360 * powered down.
1361 */
1362 if (BITS_SET(phy_control,
1363 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1364 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1365 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1366
1367 if (BITS_SET(phy_control,
1368 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1369 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1370 if (BITS_SET(phy_control,
1371 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1372 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1373
1374 if (BITS_SET(phy_control,
1375 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1376 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1377 if (BITS_SET(phy_control,
1378 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1379 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1380 }
1381
1382 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1383 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1384
1385 /* this assumes override is only used to enable lanes */
1386 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1387 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1388
1389 if (BITS_SET(phy_control,
1390 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1391 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1392
1393 if (BITS_SET(phy_control,
1394 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1395 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1396 if (BITS_SET(phy_control,
1397 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1398 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1399 }
1400
Ville Syrjälä3be60de2015-09-08 18:05:45 +03001401 phy_status &= phy_status_mask;
1402
Ville Syrjälä30142272015-07-08 23:46:01 +03001403 /*
1404 * The PHY may be busy with some initial calibration and whatnot,
1405 * so the power state can take a while to actually change.
1406 */
Chris Wilson919fcd52016-06-30 15:33:35 +01001407 if (intel_wait_for_register(dev_priv,
1408 DISPLAY_PHY_STATUS,
1409 phy_status_mask,
1410 phy_status,
1411 10))
1412 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1413 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1414 phy_status, dev_priv->chv_phy_control);
Ville Syrjälä30142272015-07-08 23:46:01 +03001415}
1416
1417#undef BITS_SET
1418
Daniel Vetter9c065a72014-09-30 10:56:38 +02001419static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1420 struct i915_power_well *power_well)
1421{
1422 enum dpio_phy phy;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001423 enum pipe pipe;
1424 uint32_t tmp;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001425
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001426 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1427 power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001428
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001429 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001430 pipe = PIPE_A;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001431 phy = DPIO_PHY0;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001432 } else {
1433 pipe = PIPE_C;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001434 phy = DPIO_PHY1;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001435 }
Ville Syrjälä5a8fbb72015-06-29 15:25:53 +03001436
1437 /* since ref/cri clock was enabled */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001438 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1439 vlv_set_power_well(dev_priv, power_well, true);
1440
1441 /* Poll for phypwrgood signal */
Chris Wilsonffebb832016-06-30 15:33:36 +01001442 if (intel_wait_for_register(dev_priv,
1443 DISPLAY_PHY_STATUS,
1444 PHY_POWERGOOD(phy),
1445 PHY_POWERGOOD(phy),
1446 1))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001447 DRM_ERROR("Display PHY %d is not power up\n", phy);
1448
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001449 mutex_lock(&dev_priv->sb_lock);
1450
1451 /* Enable dynamic power down */
1452 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
Ville Syrjäläee279212015-07-08 23:45:57 +03001453 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1454 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001455 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1456
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001457 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001458 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1459 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1460 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
Ville Syrjälä3e288782015-07-08 23:45:58 +03001461 } else {
1462 /*
1463 * Force the non-existing CL2 off. BXT does this
1464 * too, so maybe it saves some power even though
1465 * CL2 doesn't exist?
1466 */
1467 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1468 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1469 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001470 }
1471
1472 mutex_unlock(&dev_priv->sb_lock);
1473
Ville Syrjälä70722462015-04-10 18:21:28 +03001474 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1475 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001476
1477 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1478 phy, dev_priv->chv_phy_control);
Ville Syrjälä30142272015-07-08 23:46:01 +03001479
1480 assert_chv_phy_status(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001481}
1482
1483static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1484 struct i915_power_well *power_well)
1485{
1486 enum dpio_phy phy;
1487
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001488 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1489 power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001490
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001491 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
Daniel Vetter9c065a72014-09-30 10:56:38 +02001492 phy = DPIO_PHY0;
1493 assert_pll_disabled(dev_priv, PIPE_A);
1494 assert_pll_disabled(dev_priv, PIPE_B);
1495 } else {
1496 phy = DPIO_PHY1;
1497 assert_pll_disabled(dev_priv, PIPE_C);
1498 }
1499
Ville Syrjälä70722462015-04-10 18:21:28 +03001500 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1501 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001502
1503 vlv_set_power_well(dev_priv, power_well, false);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001504
1505 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1506 phy, dev_priv->chv_phy_control);
Ville Syrjälä30142272015-07-08 23:46:01 +03001507
Ville Syrjälä3be60de2015-09-08 18:05:45 +03001508 /* PHY is fully reset now, so we can enable the PHY state asserts */
1509 dev_priv->chv_phy_assert[phy] = true;
1510
Ville Syrjälä30142272015-07-08 23:46:01 +03001511 assert_chv_phy_status(dev_priv);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001512}
1513
Ville Syrjälä6669e392015-07-08 23:46:00 +03001514static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1515 enum dpio_channel ch, bool override, unsigned int mask)
1516{
1517 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1518 u32 reg, val, expected, actual;
1519
Ville Syrjälä3be60de2015-09-08 18:05:45 +03001520 /*
1521 * The BIOS can leave the PHY is some weird state
1522 * where it doesn't fully power down some parts.
1523 * Disable the asserts until the PHY has been fully
1524 * reset (ie. the power well has been disabled at
1525 * least once).
1526 */
1527 if (!dev_priv->chv_phy_assert[phy])
1528 return;
1529
Ville Syrjälä6669e392015-07-08 23:46:00 +03001530 if (ch == DPIO_CH0)
1531 reg = _CHV_CMN_DW0_CH0;
1532 else
1533 reg = _CHV_CMN_DW6_CH1;
1534
1535 mutex_lock(&dev_priv->sb_lock);
1536 val = vlv_dpio_read(dev_priv, pipe, reg);
1537 mutex_unlock(&dev_priv->sb_lock);
1538
1539 /*
1540 * This assumes !override is only used when the port is disabled.
1541 * All lanes should power down even without the override when
1542 * the port is disabled.
1543 */
1544 if (!override || mask == 0xf) {
1545 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1546 /*
1547 * If CH1 common lane is not active anymore
1548 * (eg. for pipe B DPLL) the entire channel will
1549 * shut down, which causes the common lane registers
1550 * to read as 0. That means we can't actually check
1551 * the lane power down status bits, but as the entire
1552 * register reads as 0 it's a good indication that the
1553 * channel is indeed entirely powered down.
1554 */
1555 if (ch == DPIO_CH1 && val == 0)
1556 expected = 0;
1557 } else if (mask != 0x0) {
1558 expected = DPIO_ANYDL_POWERDOWN;
1559 } else {
1560 expected = 0;
1561 }
1562
1563 if (ch == DPIO_CH0)
1564 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1565 else
1566 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1567 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1568
1569 WARN(actual != expected,
1570 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1571 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1572 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1573 reg, val);
1574}
1575
Ville Syrjäläb0b33842015-07-08 23:45:55 +03001576bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1577 enum dpio_channel ch, bool override)
1578{
1579 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1580 bool was_override;
1581
1582 mutex_lock(&power_domains->lock);
1583
1584 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1585
1586 if (override == was_override)
1587 goto out;
1588
1589 if (override)
1590 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1591 else
1592 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1593
1594 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1595
1596 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1597 phy, ch, dev_priv->chv_phy_control);
1598
Ville Syrjälä30142272015-07-08 23:46:01 +03001599 assert_chv_phy_status(dev_priv);
1600
Ville Syrjäläb0b33842015-07-08 23:45:55 +03001601out:
1602 mutex_unlock(&power_domains->lock);
1603
1604 return was_override;
1605}
1606
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001607void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1608 bool override, unsigned int mask)
1609{
1610 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1611 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1612 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1613 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1614
1615 mutex_lock(&power_domains->lock);
1616
1617 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1618 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1619
1620 if (override)
1621 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1622 else
1623 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1624
1625 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1626
1627 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1628 phy, ch, mask, dev_priv->chv_phy_control);
1629
Ville Syrjälä30142272015-07-08 23:46:01 +03001630 assert_chv_phy_status(dev_priv);
1631
Ville Syrjälä6669e392015-07-08 23:46:00 +03001632 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1633
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001634 mutex_unlock(&power_domains->lock);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001635}
1636
1637static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1638 struct i915_power_well *power_well)
1639{
Imre Deakf49193c2017-07-06 17:40:23 +03001640 enum pipe pipe = PIPE_A;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001641 bool enabled;
1642 u32 state, ctrl;
1643
1644 mutex_lock(&dev_priv->rps.hw_lock);
1645
1646 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1647 /*
1648 * We only ever set the power-on and power-gate states, anything
1649 * else is unexpected.
1650 */
1651 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1652 enabled = state == DP_SSS_PWR_ON(pipe);
1653
1654 /*
1655 * A transient state at this point would mean some unexpected party
1656 * is poking at the power controls too.
1657 */
1658 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1659 WARN_ON(ctrl << 16 != state);
1660
1661 mutex_unlock(&dev_priv->rps.hw_lock);
1662
1663 return enabled;
1664}
1665
1666static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1667 struct i915_power_well *power_well,
1668 bool enable)
1669{
Imre Deakf49193c2017-07-06 17:40:23 +03001670 enum pipe pipe = PIPE_A;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001671 u32 state;
1672 u32 ctrl;
1673
1674 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1675
1676 mutex_lock(&dev_priv->rps.hw_lock);
1677
1678#define COND \
1679 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1680
1681 if (COND)
1682 goto out;
1683
1684 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1685 ctrl &= ~DP_SSC_MASK(pipe);
1686 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1687 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1688
1689 if (wait_for(COND, 100))
Masanari Iida7e35ab82015-05-10 01:00:23 +09001690 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
Daniel Vetter9c065a72014-09-30 10:56:38 +02001691 state,
1692 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1693
1694#undef COND
1695
1696out:
1697 mutex_unlock(&dev_priv->rps.hw_lock);
1698}
1699
Daniel Vetter9c065a72014-09-30 10:56:38 +02001700static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1701 struct i915_power_well *power_well)
1702{
Imre Deakf49193c2017-07-06 17:40:23 +03001703 WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001704
1705 chv_set_pipe_power_well(dev_priv, power_well, true);
Ville Syrjäläafd62752014-10-30 19:43:03 +02001706
Ville Syrjälä2be7d542015-06-29 15:25:51 +03001707 vlv_display_power_well_init(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001708}
1709
1710static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1711 struct i915_power_well *power_well)
1712{
Imre Deakf49193c2017-07-06 17:40:23 +03001713 WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001714
Ville Syrjälä2be7d542015-06-29 15:25:51 +03001715 vlv_display_power_well_deinit(dev_priv);
Ville Syrjäläafd62752014-10-30 19:43:03 +02001716
Daniel Vetter9c065a72014-09-30 10:56:38 +02001717 chv_set_pipe_power_well(dev_priv, power_well, false);
1718}
1719
Imre Deak09731282016-02-17 14:17:42 +02001720static void
1721__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1722 enum intel_display_power_domain domain)
1723{
1724 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1725 struct i915_power_well *power_well;
Imre Deak09731282016-02-17 14:17:42 +02001726
Imre Deak75ccb2e2017-02-17 17:39:43 +02001727 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
Imre Deakb409ca92016-06-13 16:44:33 +03001728 intel_power_well_get(dev_priv, power_well);
Imre Deak09731282016-02-17 14:17:42 +02001729
1730 power_domains->domain_use_count[domain]++;
1731}
1732
Daniel Vettere4e76842014-09-30 10:56:42 +02001733/**
1734 * intel_display_power_get - grab a power domain reference
1735 * @dev_priv: i915 device instance
1736 * @domain: power domain to reference
1737 *
1738 * This function grabs a power domain reference for @domain and ensures that the
1739 * power domain and all its parents are powered up. Therefore users should only
1740 * grab a reference to the innermost power domain they need.
1741 *
1742 * Any power domain reference obtained by this function must have a symmetric
1743 * call to intel_display_power_put() to release the reference again.
1744 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001745void intel_display_power_get(struct drm_i915_private *dev_priv,
1746 enum intel_display_power_domain domain)
1747{
Imre Deak09731282016-02-17 14:17:42 +02001748 struct i915_power_domains *power_domains = &dev_priv->power_domains;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001749
1750 intel_runtime_pm_get(dev_priv);
1751
Imre Deak09731282016-02-17 14:17:42 +02001752 mutex_lock(&power_domains->lock);
1753
1754 __intel_display_power_get_domain(dev_priv, domain);
1755
1756 mutex_unlock(&power_domains->lock);
1757}
1758
1759/**
1760 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1761 * @dev_priv: i915 device instance
1762 * @domain: power domain to reference
1763 *
1764 * This function grabs a power domain reference for @domain and ensures that the
1765 * power domain and all its parents are powered up. Therefore users should only
1766 * grab a reference to the innermost power domain they need.
1767 *
1768 * Any power domain reference obtained by this function must have a symmetric
1769 * call to intel_display_power_put() to release the reference again.
1770 */
1771bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1772 enum intel_display_power_domain domain)
1773{
1774 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1775 bool is_enabled;
1776
1777 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1778 return false;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001779
1780 mutex_lock(&power_domains->lock);
1781
Imre Deak09731282016-02-17 14:17:42 +02001782 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1783 __intel_display_power_get_domain(dev_priv, domain);
1784 is_enabled = true;
1785 } else {
1786 is_enabled = false;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001787 }
1788
Daniel Vetter9c065a72014-09-30 10:56:38 +02001789 mutex_unlock(&power_domains->lock);
Imre Deak09731282016-02-17 14:17:42 +02001790
1791 if (!is_enabled)
1792 intel_runtime_pm_put(dev_priv);
1793
1794 return is_enabled;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001795}
1796
Daniel Vettere4e76842014-09-30 10:56:42 +02001797/**
1798 * intel_display_power_put - release a power domain reference
1799 * @dev_priv: i915 device instance
1800 * @domain: power domain to reference
1801 *
1802 * This function drops the power domain reference obtained by
1803 * intel_display_power_get() and might power down the corresponding hardware
1804 * block right away if this is the last reference.
1805 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001806void intel_display_power_put(struct drm_i915_private *dev_priv,
1807 enum intel_display_power_domain domain)
1808{
1809 struct i915_power_domains *power_domains;
1810 struct i915_power_well *power_well;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001811
1812 power_domains = &dev_priv->power_domains;
1813
1814 mutex_lock(&power_domains->lock);
1815
Daniel Stone11c86db2015-11-20 15:55:34 +00001816 WARN(!power_domains->domain_use_count[domain],
1817 "Use count on domain %s is already zero\n",
1818 intel_display_power_domain_str(domain));
Daniel Vetter9c065a72014-09-30 10:56:38 +02001819 power_domains->domain_use_count[domain]--;
1820
Imre Deak75ccb2e2017-02-17 17:39:43 +02001821 for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
Imre Deakb409ca92016-06-13 16:44:33 +03001822 intel_power_well_put(dev_priv, power_well);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001823
1824 mutex_unlock(&power_domains->lock);
1825
1826 intel_runtime_pm_put(dev_priv);
1827}
1828
Ville Syrjälä9d0996b2016-04-18 14:02:28 +03001829#define HSW_DISPLAY_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001830 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1831 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1832 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1833 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1834 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1835 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1836 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1837 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1838 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1839 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1840 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1841 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
1842 BIT_ULL(POWER_DOMAIN_VGA) | \
1843 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1844 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001845
Ville Syrjälä9d0996b2016-04-18 14:02:28 +03001846#define BDW_DISPLAY_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001847 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1848 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1849 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1850 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1851 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1852 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1853 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1854 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1855 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1856 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1857 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
1858 BIT_ULL(POWER_DOMAIN_VGA) | \
1859 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1860 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001861
Ville Syrjälä465ac0c2016-04-18 14:02:27 +03001862#define VLV_DISPLAY_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001863 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1864 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1865 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1866 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1867 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1868 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1869 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1870 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1871 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
1872 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1873 BIT_ULL(POWER_DOMAIN_VGA) | \
1874 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1875 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1876 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1877 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1878 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001879
1880#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001881 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1882 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1883 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1884 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1885 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1886 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001887
1888#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001889 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1890 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1891 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001892
1893#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001894 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1895 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1896 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001897
1898#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001899 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1900 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1901 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001902
1903#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001904 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1905 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1906 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001907
Ville Syrjälä465ac0c2016-04-18 14:02:27 +03001908#define CHV_DISPLAY_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001909 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1910 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1911 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1912 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1913 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1914 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1915 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1916 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1917 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1918 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1919 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1920 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1921 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
1922 BIT_ULL(POWER_DOMAIN_VGA) | \
1923 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1924 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1925 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1926 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1927 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1928 BIT_ULL(POWER_DOMAIN_INIT))
Ville Syrjälä465ac0c2016-04-18 14:02:27 +03001929
Daniel Vetter9c065a72014-09-30 10:56:38 +02001930#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001931 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1932 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1933 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1934 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1935 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001936
1937#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001938 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1939 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1940 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001941
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03001942#define I830_PIPES_POWER_DOMAINS ( \
1943 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1944 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1945 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1946 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1947 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1948 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1949 BIT_ULL(POWER_DOMAIN_INIT))
1950
Daniel Vetter9c065a72014-09-30 10:56:38 +02001951static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02001952 .sync_hw = i9xx_power_well_sync_hw_noop,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001953 .enable = i9xx_always_on_power_well_noop,
1954 .disable = i9xx_always_on_power_well_noop,
1955 .is_enabled = i9xx_always_on_power_well_enabled,
1956};
1957
1958static const struct i915_power_well_ops chv_pipe_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02001959 .sync_hw = i9xx_power_well_sync_hw_noop,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001960 .enable = chv_pipe_power_well_enable,
1961 .disable = chv_pipe_power_well_disable,
1962 .is_enabled = chv_pipe_power_well_enabled,
1963};
1964
1965static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02001966 .sync_hw = i9xx_power_well_sync_hw_noop,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001967 .enable = chv_dpio_cmn_power_well_enable,
1968 .disable = chv_dpio_cmn_power_well_disable,
1969 .is_enabled = vlv_power_well_enabled,
1970};
1971
1972static struct i915_power_well i9xx_always_on_power_well[] = {
1973 {
1974 .name = "always-on",
1975 .always_on = 1,
1976 .domains = POWER_DOMAIN_MASK,
1977 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03001978 .id = I915_DISP_PW_ALWAYS_ON,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001979 },
1980};
1981
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03001982static const struct i915_power_well_ops i830_pipes_power_well_ops = {
1983 .sync_hw = i830_pipes_power_well_sync_hw,
1984 .enable = i830_pipes_power_well_enable,
1985 .disable = i830_pipes_power_well_disable,
1986 .is_enabled = i830_pipes_power_well_enabled,
1987};
1988
1989static struct i915_power_well i830_power_wells[] = {
1990 {
1991 .name = "always-on",
1992 .always_on = 1,
1993 .domains = POWER_DOMAIN_MASK,
1994 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03001995 .id = I915_DISP_PW_ALWAYS_ON,
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03001996 },
1997 {
1998 .name = "pipes",
1999 .domains = I830_PIPES_POWER_DOMAINS,
2000 .ops = &i830_pipes_power_well_ops,
Imre Deak120b56a2017-07-11 23:42:31 +03002001 .id = I830_DISP_PW_PIPES,
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03002002 },
2003};
2004
Daniel Vetter9c065a72014-09-30 10:56:38 +02002005static const struct i915_power_well_ops hsw_power_well_ops = {
2006 .sync_hw = hsw_power_well_sync_hw,
2007 .enable = hsw_power_well_enable,
2008 .disable = hsw_power_well_disable,
2009 .is_enabled = hsw_power_well_enabled,
2010};
2011
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002012static const struct i915_power_well_ops skl_power_well_ops = {
2013 .sync_hw = skl_power_well_sync_hw,
2014 .enable = skl_power_well_enable,
2015 .disable = skl_power_well_disable,
2016 .is_enabled = skl_power_well_enabled,
2017};
2018
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01002019static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02002020 .sync_hw = i9xx_power_well_sync_hw_noop,
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01002021 .enable = gen9_dc_off_power_well_enable,
2022 .disable = gen9_dc_off_power_well_disable,
2023 .is_enabled = gen9_dc_off_power_well_enabled,
2024};
2025
Imre Deak9c8d0b82016-06-13 16:44:34 +03002026static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02002027 .sync_hw = i9xx_power_well_sync_hw_noop,
Imre Deak9c8d0b82016-06-13 16:44:34 +03002028 .enable = bxt_dpio_cmn_power_well_enable,
2029 .disable = bxt_dpio_cmn_power_well_disable,
2030 .is_enabled = bxt_dpio_cmn_power_well_enabled,
2031};
2032
Daniel Vetter9c065a72014-09-30 10:56:38 +02002033static struct i915_power_well hsw_power_wells[] = {
2034 {
2035 .name = "always-on",
2036 .always_on = 1,
Ville Syrjälä998bd662016-04-18 14:02:26 +03002037 .domains = POWER_DOMAIN_MASK,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002038 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03002039 .id = I915_DISP_PW_ALWAYS_ON,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002040 },
2041 {
2042 .name = "display",
2043 .domains = HSW_DISPLAY_POWER_DOMAINS,
2044 .ops = &hsw_power_well_ops,
Imre Deakfb9248e2017-07-11 23:42:32 +03002045 .id = HSW_DISP_PW_GLOBAL,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002046 },
2047};
2048
2049static struct i915_power_well bdw_power_wells[] = {
2050 {
2051 .name = "always-on",
2052 .always_on = 1,
Ville Syrjälä998bd662016-04-18 14:02:26 +03002053 .domains = POWER_DOMAIN_MASK,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002054 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03002055 .id = I915_DISP_PW_ALWAYS_ON,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002056 },
2057 {
2058 .name = "display",
2059 .domains = BDW_DISPLAY_POWER_DOMAINS,
2060 .ops = &hsw_power_well_ops,
Imre Deakfb9248e2017-07-11 23:42:32 +03002061 .id = HSW_DISP_PW_GLOBAL,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002062 },
2063};
2064
2065static const struct i915_power_well_ops vlv_display_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02002066 .sync_hw = i9xx_power_well_sync_hw_noop,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002067 .enable = vlv_display_power_well_enable,
2068 .disable = vlv_display_power_well_disable,
2069 .is_enabled = vlv_power_well_enabled,
2070};
2071
2072static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02002073 .sync_hw = i9xx_power_well_sync_hw_noop,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002074 .enable = vlv_dpio_cmn_power_well_enable,
2075 .disable = vlv_dpio_cmn_power_well_disable,
2076 .is_enabled = vlv_power_well_enabled,
2077};
2078
2079static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02002080 .sync_hw = i9xx_power_well_sync_hw_noop,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002081 .enable = vlv_power_well_enable,
2082 .disable = vlv_power_well_disable,
2083 .is_enabled = vlv_power_well_enabled,
2084};
2085
2086static struct i915_power_well vlv_power_wells[] = {
2087 {
2088 .name = "always-on",
2089 .always_on = 1,
Ville Syrjälä998bd662016-04-18 14:02:26 +03002090 .domains = POWER_DOMAIN_MASK,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002091 .ops = &i9xx_always_on_power_well_ops,
Imre Deak438b8dc2017-07-11 23:42:30 +03002092 .id = I915_DISP_PW_ALWAYS_ON,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002093 },
2094 {
2095 .name = "display",
2096 .domains = VLV_DISPLAY_POWER_DOMAINS,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002097 .id = PUNIT_POWER_WELL_DISP2D,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002098 .ops = &vlv_display_power_well_ops,
2099 },
2100 {
2101 .name = "dpio-tx-b-01",
2102 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2103 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2104 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2105 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2106 .ops = &vlv_dpio_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002107 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002108 },
2109 {
2110 .name = "dpio-tx-b-23",
2111 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2112 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2113 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2114 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2115 .ops = &vlv_dpio_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002116 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002117 },
2118 {
2119 .name = "dpio-tx-c-01",
2120 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2121 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2122 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2123 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2124 .ops = &vlv_dpio_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002125 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002126 },
2127 {
2128 .name = "dpio-tx-c-23",
2129 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2130 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2131 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2132 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2133 .ops = &vlv_dpio_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002134 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002135 },
2136 {
2137 .name = "dpio-common",
2138 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002139 .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002140 .ops = &vlv_dpio_cmn_power_well_ops,
2141 },
2142};
2143
2144static struct i915_power_well chv_power_wells[] = {
2145 {
2146 .name = "always-on",
2147 .always_on = 1,
Ville Syrjälä998bd662016-04-18 14:02:26 +03002148 .domains = POWER_DOMAIN_MASK,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002149 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03002150 .id = I915_DISP_PW_ALWAYS_ON,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002151 },
Daniel Vetter9c065a72014-09-30 10:56:38 +02002152 {
2153 .name = "display",
Ville Syrjäläbaa4e572014-10-27 16:07:32 +02002154 /*
Ville Syrjäläfde61e42015-05-26 20:22:39 +03002155 * Pipe A power well is the new disp2d well. Pipe B and C
2156 * power wells don't actually exist. Pipe A power well is
2157 * required for any pipe to work.
Ville Syrjäläbaa4e572014-10-27 16:07:32 +02002158 */
Ville Syrjälä465ac0c2016-04-18 14:02:27 +03002159 .domains = CHV_DISPLAY_POWER_DOMAINS,
Imre Deakf49193c2017-07-06 17:40:23 +03002160 .id = CHV_DISP_PW_PIPE_A,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002161 .ops = &chv_pipe_power_well_ops,
2162 },
Daniel Vetter9c065a72014-09-30 10:56:38 +02002163 {
2164 .name = "dpio-common-bc",
Ville Syrjälä71849b62015-04-10 18:21:29 +03002165 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002166 .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002167 .ops = &chv_dpio_cmn_power_well_ops,
2168 },
2169 {
2170 .name = "dpio-common-d",
Ville Syrjälä71849b62015-04-10 18:21:29 +03002171 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002172 .id = PUNIT_POWER_WELL_DPIO_CMN_D,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002173 .ops = &chv_dpio_cmn_power_well_ops,
2174 },
Daniel Vetter9c065a72014-09-30 10:56:38 +02002175};
2176
Suketu Shah5aefb232015-04-16 14:22:10 +05302177bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
Imre Deak438b8dc2017-07-11 23:42:30 +03002178 enum i915_power_well_id power_well_id)
Suketu Shah5aefb232015-04-16 14:22:10 +05302179{
2180 struct i915_power_well *power_well;
2181 bool ret;
2182
2183 power_well = lookup_power_well(dev_priv, power_well_id);
2184 ret = power_well->ops->is_enabled(dev_priv, power_well);
2185
2186 return ret;
2187}
2188
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002189static struct i915_power_well skl_power_wells[] = {
2190 {
2191 .name = "always-on",
2192 .always_on = 1,
Ville Syrjälä998bd662016-04-18 14:02:26 +03002193 .domains = POWER_DOMAIN_MASK,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002194 .ops = &i9xx_always_on_power_well_ops,
Imre Deak438b8dc2017-07-11 23:42:30 +03002195 .id = I915_DISP_PW_ALWAYS_ON,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002196 },
2197 {
2198 .name = "power well 1",
Imre Deak4a76f292015-11-04 19:24:15 +02002199 /* Handled by the DMC firmware */
2200 .domains = 0,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002201 .ops = &skl_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002202 .id = SKL_DISP_PW_1,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002203 },
2204 {
2205 .name = "MISC IO power well",
Imre Deak4a76f292015-11-04 19:24:15 +02002206 /* Handled by the DMC firmware */
2207 .domains = 0,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002208 .ops = &skl_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002209 .id = SKL_DISP_PW_MISC_IO,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002210 },
2211 {
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01002212 .name = "DC off",
2213 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2214 .ops = &gen9_dc_off_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002215 .id = SKL_DISP_PW_DC_OFF,
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01002216 },
2217 {
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002218 .name = "power well 2",
2219 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2220 .ops = &skl_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002221 .id = SKL_DISP_PW_2,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002222 },
2223 {
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +02002224 .name = "DDI A/E IO power well",
2225 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002226 .ops = &skl_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002227 .id = SKL_DISP_PW_DDI_A_E,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002228 },
2229 {
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +02002230 .name = "DDI B IO power well",
2231 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002232 .ops = &skl_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002233 .id = SKL_DISP_PW_DDI_B,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002234 },
2235 {
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +02002236 .name = "DDI C IO power well",
2237 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002238 .ops = &skl_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002239 .id = SKL_DISP_PW_DDI_C,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002240 },
2241 {
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +02002242 .name = "DDI D IO power well",
2243 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002244 .ops = &skl_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002245 .id = SKL_DISP_PW_DDI_D,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002246 },
2247};
2248
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302249static struct i915_power_well bxt_power_wells[] = {
2250 {
2251 .name = "always-on",
2252 .always_on = 1,
Ville Syrjälä998bd662016-04-18 14:02:26 +03002253 .domains = POWER_DOMAIN_MASK,
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302254 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03002255 .id = I915_DISP_PW_ALWAYS_ON,
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302256 },
2257 {
2258 .name = "power well 1",
Imre Deakd7d7c9e2016-04-01 16:02:42 +03002259 .domains = 0,
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302260 .ops = &skl_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002261 .id = SKL_DISP_PW_1,
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302262 },
2263 {
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01002264 .name = "DC off",
2265 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2266 .ops = &gen9_dc_off_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002267 .id = SKL_DISP_PW_DC_OFF,
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01002268 },
2269 {
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302270 .name = "power well 2",
2271 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2272 .ops = &skl_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002273 .id = SKL_DISP_PW_2,
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01002274 },
Imre Deak9c8d0b82016-06-13 16:44:34 +03002275 {
2276 .name = "dpio-common-a",
2277 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2278 .ops = &bxt_dpio_cmn_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002279 .id = BXT_DPIO_CMN_A,
Imre Deakb5565a22017-07-06 17:40:29 +03002280 .bxt.phy = DPIO_PHY1,
Imre Deak9c8d0b82016-06-13 16:44:34 +03002281 },
2282 {
2283 .name = "dpio-common-bc",
2284 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2285 .ops = &bxt_dpio_cmn_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002286 .id = BXT_DPIO_CMN_BC,
Imre Deakb5565a22017-07-06 17:40:29 +03002287 .bxt.phy = DPIO_PHY0,
Imre Deak9c8d0b82016-06-13 16:44:34 +03002288 },
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302289};
2290
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002291static struct i915_power_well glk_power_wells[] = {
2292 {
2293 .name = "always-on",
2294 .always_on = 1,
2295 .domains = POWER_DOMAIN_MASK,
2296 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03002297 .id = I915_DISP_PW_ALWAYS_ON,
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002298 },
2299 {
2300 .name = "power well 1",
2301 /* Handled by the DMC firmware */
2302 .domains = 0,
2303 .ops = &skl_power_well_ops,
2304 .id = SKL_DISP_PW_1,
2305 },
2306 {
2307 .name = "DC off",
2308 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2309 .ops = &gen9_dc_off_power_well_ops,
2310 .id = SKL_DISP_PW_DC_OFF,
2311 },
2312 {
2313 .name = "power well 2",
2314 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2315 .ops = &skl_power_well_ops,
2316 .id = SKL_DISP_PW_2,
2317 },
2318 {
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +02002319 .name = "dpio-common-a",
2320 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2321 .ops = &bxt_dpio_cmn_power_well_ops,
2322 .id = BXT_DPIO_CMN_A,
Imre Deakb5565a22017-07-06 17:40:29 +03002323 .bxt.phy = DPIO_PHY1,
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +02002324 },
2325 {
2326 .name = "dpio-common-b",
2327 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2328 .ops = &bxt_dpio_cmn_power_well_ops,
2329 .id = BXT_DPIO_CMN_BC,
Imre Deakb5565a22017-07-06 17:40:29 +03002330 .bxt.phy = DPIO_PHY0,
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +02002331 },
2332 {
2333 .name = "dpio-common-c",
2334 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2335 .ops = &bxt_dpio_cmn_power_well_ops,
2336 .id = GLK_DPIO_CMN_C,
Imre Deakb5565a22017-07-06 17:40:29 +03002337 .bxt.phy = DPIO_PHY2,
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +02002338 },
2339 {
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002340 .name = "AUX A",
2341 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2342 .ops = &skl_power_well_ops,
2343 .id = GLK_DISP_PW_AUX_A,
2344 },
2345 {
2346 .name = "AUX B",
2347 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2348 .ops = &skl_power_well_ops,
2349 .id = GLK_DISP_PW_AUX_B,
2350 },
2351 {
2352 .name = "AUX C",
2353 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2354 .ops = &skl_power_well_ops,
2355 .id = GLK_DISP_PW_AUX_C,
2356 },
2357 {
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +02002358 .name = "DDI A IO power well",
2359 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002360 .ops = &skl_power_well_ops,
2361 .id = GLK_DISP_PW_DDI_A,
2362 },
2363 {
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +02002364 .name = "DDI B IO power well",
2365 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002366 .ops = &skl_power_well_ops,
2367 .id = SKL_DISP_PW_DDI_B,
2368 },
2369 {
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +02002370 .name = "DDI C IO power well",
2371 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002372 .ops = &skl_power_well_ops,
2373 .id = SKL_DISP_PW_DDI_C,
2374 },
2375};
2376
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002377static struct i915_power_well cnl_power_wells[] = {
2378 {
2379 .name = "always-on",
2380 .always_on = 1,
2381 .domains = POWER_DOMAIN_MASK,
2382 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03002383 .id = I915_DISP_PW_ALWAYS_ON,
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002384 },
2385 {
2386 .name = "power well 1",
2387 /* Handled by the DMC firmware */
2388 .domains = 0,
2389 .ops = &skl_power_well_ops,
2390 .id = SKL_DISP_PW_1,
2391 },
2392 {
2393 .name = "AUX A",
2394 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2395 .ops = &skl_power_well_ops,
2396 .id = CNL_DISP_PW_AUX_A,
2397 },
2398 {
2399 .name = "AUX B",
2400 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2401 .ops = &skl_power_well_ops,
2402 .id = CNL_DISP_PW_AUX_B,
2403 },
2404 {
2405 .name = "AUX C",
2406 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2407 .ops = &skl_power_well_ops,
2408 .id = CNL_DISP_PW_AUX_C,
2409 },
2410 {
2411 .name = "AUX D",
2412 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2413 .ops = &skl_power_well_ops,
2414 .id = CNL_DISP_PW_AUX_D,
2415 },
2416 {
2417 .name = "DC off",
2418 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2419 .ops = &gen9_dc_off_power_well_ops,
2420 .id = SKL_DISP_PW_DC_OFF,
2421 },
2422 {
2423 .name = "power well 2",
2424 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2425 .ops = &skl_power_well_ops,
2426 .id = SKL_DISP_PW_2,
2427 },
2428 {
2429 .name = "DDI A IO power well",
2430 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2431 .ops = &skl_power_well_ops,
2432 .id = CNL_DISP_PW_DDI_A,
2433 },
2434 {
2435 .name = "DDI B IO power well",
2436 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
2437 .ops = &skl_power_well_ops,
2438 .id = SKL_DISP_PW_DDI_B,
2439 },
2440 {
2441 .name = "DDI C IO power well",
2442 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
2443 .ops = &skl_power_well_ops,
2444 .id = SKL_DISP_PW_DDI_C,
2445 },
2446 {
2447 .name = "DDI D IO power well",
2448 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
2449 .ops = &skl_power_well_ops,
2450 .id = SKL_DISP_PW_DDI_D,
2451 },
2452};
2453
Imre Deak1b0e3a02015-11-05 23:04:11 +02002454static int
2455sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2456 int disable_power_well)
2457{
2458 if (disable_power_well >= 0)
2459 return !!disable_power_well;
2460
Imre Deak1b0e3a02015-11-05 23:04:11 +02002461 return 1;
2462}
2463
Imre Deaka37baf32016-02-29 22:49:03 +02002464static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2465 int enable_dc)
2466{
2467 uint32_t mask;
2468 int requested_dc;
2469 int max_dc;
2470
Rodrigo Vivi6d6a8972017-07-06 13:45:08 -07002471 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
Imre Deaka37baf32016-02-29 22:49:03 +02002472 max_dc = 2;
2473 mask = 0;
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02002474 } else if (IS_GEN9_LP(dev_priv)) {
Imre Deaka37baf32016-02-29 22:49:03 +02002475 max_dc = 1;
2476 /*
2477 * DC9 has a separate HW flow from the rest of the DC states,
2478 * not depending on the DMC firmware. It's needed by system
2479 * suspend/resume, so allow it unconditionally.
2480 */
2481 mask = DC_STATE_EN_DC9;
2482 } else {
2483 max_dc = 0;
2484 mask = 0;
2485 }
2486
Imre Deak66e2c4c2016-02-29 22:49:04 +02002487 if (!i915.disable_power_well)
2488 max_dc = 0;
2489
Imre Deaka37baf32016-02-29 22:49:03 +02002490 if (enable_dc >= 0 && enable_dc <= max_dc) {
2491 requested_dc = enable_dc;
2492 } else if (enable_dc == -1) {
2493 requested_dc = max_dc;
2494 } else if (enable_dc > max_dc && enable_dc <= 2) {
2495 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2496 enable_dc, max_dc);
2497 requested_dc = max_dc;
2498 } else {
2499 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2500 requested_dc = max_dc;
2501 }
2502
2503 if (requested_dc > 1)
2504 mask |= DC_STATE_EN_UPTO_DC6;
2505 if (requested_dc > 0)
2506 mask |= DC_STATE_EN_UPTO_DC5;
2507
2508 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2509
2510 return mask;
2511}
2512
Imre Deak21792c62017-07-11 23:42:33 +03002513static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv)
2514{
2515 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2516 u64 power_well_ids;
2517 int i;
2518
2519 power_well_ids = 0;
2520 for (i = 0; i < power_domains->power_well_count; i++) {
2521 enum i915_power_well_id id = power_domains->power_wells[i].id;
2522
2523 WARN_ON(id >= sizeof(power_well_ids) * 8);
2524 WARN_ON(power_well_ids & BIT_ULL(id));
2525 power_well_ids |= BIT_ULL(id);
2526 }
2527}
2528
Daniel Vetter9c065a72014-09-30 10:56:38 +02002529#define set_power_wells(power_domains, __power_wells) ({ \
2530 (power_domains)->power_wells = (__power_wells); \
2531 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
2532})
2533
Daniel Vettere4e76842014-09-30 10:56:42 +02002534/**
2535 * intel_power_domains_init - initializes the power domain structures
2536 * @dev_priv: i915 device instance
2537 *
2538 * Initializes the power domain structures for @dev_priv depending upon the
2539 * supported platform.
2540 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02002541int intel_power_domains_init(struct drm_i915_private *dev_priv)
2542{
2543 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2544
Imre Deak1b0e3a02015-11-05 23:04:11 +02002545 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
2546 i915.disable_power_well);
Imre Deaka37baf32016-02-29 22:49:03 +02002547 dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
2548 i915.enable_dc);
Imre Deak1b0e3a02015-11-05 23:04:11 +02002549
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02002550 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
Ville Syrjäläf0ab43e2015-11-09 16:48:19 +01002551
Daniel Vetter9c065a72014-09-30 10:56:38 +02002552 mutex_init(&power_domains->lock);
2553
2554 /*
2555 * The enabling order will be from lower to higher indexed wells,
2556 * the disabling order is reversed.
2557 */
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002558 if (IS_HASWELL(dev_priv)) {
Daniel Vetter9c065a72014-09-30 10:56:38 +02002559 set_power_wells(power_domains, hsw_power_wells);
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002560 } else if (IS_BROADWELL(dev_priv)) {
Daniel Vetter9c065a72014-09-30 10:56:38 +02002561 set_power_wells(power_domains, bdw_power_wells);
Rodrigo Vivib976dc52017-01-23 10:32:37 -08002562 } else if (IS_GEN9_BC(dev_priv)) {
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002563 set_power_wells(power_domains, skl_power_wells);
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002564 } else if (IS_CANNONLAKE(dev_priv)) {
2565 set_power_wells(power_domains, cnl_power_wells);
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002566 } else if (IS_BROXTON(dev_priv)) {
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302567 set_power_wells(power_domains, bxt_power_wells);
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002568 } else if (IS_GEMINILAKE(dev_priv)) {
2569 set_power_wells(power_domains, glk_power_wells);
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002570 } else if (IS_CHERRYVIEW(dev_priv)) {
Daniel Vetter9c065a72014-09-30 10:56:38 +02002571 set_power_wells(power_domains, chv_power_wells);
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002572 } else if (IS_VALLEYVIEW(dev_priv)) {
Daniel Vetter9c065a72014-09-30 10:56:38 +02002573 set_power_wells(power_domains, vlv_power_wells);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03002574 } else if (IS_I830(dev_priv)) {
2575 set_power_wells(power_domains, i830_power_wells);
Daniel Vetter9c065a72014-09-30 10:56:38 +02002576 } else {
2577 set_power_wells(power_domains, i9xx_always_on_power_well);
2578 }
2579
Imre Deak21792c62017-07-11 23:42:33 +03002580 assert_power_well_ids_unique(dev_priv);
2581
Daniel Vetter9c065a72014-09-30 10:56:38 +02002582 return 0;
2583}
2584
Daniel Vettere4e76842014-09-30 10:56:42 +02002585/**
2586 * intel_power_domains_fini - finalizes the power domain structures
2587 * @dev_priv: i915 device instance
2588 *
2589 * Finalizes the power domain structures for @dev_priv depending upon the
2590 * supported platform. This function also disables runtime pm and ensures that
2591 * the device stays powered up so that the driver can be reloaded.
2592 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002593void intel_power_domains_fini(struct drm_i915_private *dev_priv)
Daniel Vetter9c065a72014-09-30 10:56:38 +02002594{
David Weinehallc49d13e2016-08-22 13:32:42 +03002595 struct device *kdev = &dev_priv->drm.pdev->dev;
Imre Deak25b181b2015-12-17 13:44:56 +02002596
Imre Deakaabee1b2015-12-15 20:10:29 +02002597 /*
2598 * The i915.ko module is still not prepared to be loaded when
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002599 * the power well is not enabled, so just enable it in case
Imre Deakaabee1b2015-12-15 20:10:29 +02002600 * we're going to unload/reload.
2601 * The following also reacquires the RPM reference the core passed
2602 * to the driver during loading, which is dropped in
2603 * intel_runtime_pm_enable(). We have to hand back the control of the
2604 * device to the core with this reference held.
2605 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002606 intel_display_set_init_power(dev_priv, true);
Imre Deakd314cd42015-11-17 17:44:23 +02002607
2608 /* Remove the refcount we took to keep power well support disabled. */
2609 if (!i915.disable_power_well)
2610 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Imre Deak25b181b2015-12-17 13:44:56 +02002611
2612 /*
2613 * Remove the refcount we took in intel_runtime_pm_enable() in case
2614 * the platform doesn't support runtime PM.
2615 */
2616 if (!HAS_RUNTIME_PM(dev_priv))
David Weinehallc49d13e2016-08-22 13:32:42 +03002617 pm_runtime_put(kdev);
Daniel Vetter9c065a72014-09-30 10:56:38 +02002618}
2619
Imre Deak30eade12015-11-04 19:24:13 +02002620static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
Daniel Vetter9c065a72014-09-30 10:56:38 +02002621{
2622 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2623 struct i915_power_well *power_well;
Daniel Vetter9c065a72014-09-30 10:56:38 +02002624
2625 mutex_lock(&power_domains->lock);
Imre Deak75ccb2e2017-02-17 17:39:43 +02002626 for_each_power_well(dev_priv, power_well) {
Daniel Vetter9c065a72014-09-30 10:56:38 +02002627 power_well->ops->sync_hw(dev_priv, power_well);
2628 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2629 power_well);
2630 }
2631 mutex_unlock(&power_domains->lock);
2632}
2633
Ville Syrjälä70c2c182016-05-13 23:41:30 +03002634static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2635{
2636 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2637 POSTING_READ(DBUF_CTL);
2638
2639 udelay(10);
2640
2641 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2642 DRM_ERROR("DBuf power enable timeout\n");
2643}
2644
2645static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2646{
2647 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2648 POSTING_READ(DBUF_CTL);
2649
2650 udelay(10);
2651
2652 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2653 DRM_ERROR("DBuf power disable timeout!\n");
2654}
2655
Imre Deak73dfc222015-11-17 17:33:53 +02002656static void skl_display_core_init(struct drm_i915_private *dev_priv,
Imre Deak443a93a2016-04-04 15:42:57 +03002657 bool resume)
Imre Deak73dfc222015-11-17 17:33:53 +02002658{
2659 struct i915_power_domains *power_domains = &dev_priv->power_domains;
Imre Deak443a93a2016-04-04 15:42:57 +03002660 struct i915_power_well *well;
Imre Deak73dfc222015-11-17 17:33:53 +02002661 uint32_t val;
2662
Imre Deakd26fa1d2015-11-04 19:24:17 +02002663 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2664
Imre Deak73dfc222015-11-17 17:33:53 +02002665 /* enable PCH reset handshake */
2666 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2667 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2668
2669 /* enable PG1 and Misc I/O */
2670 mutex_lock(&power_domains->lock);
Imre Deak443a93a2016-04-04 15:42:57 +03002671
2672 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2673 intel_power_well_enable(dev_priv, well);
2674
2675 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2676 intel_power_well_enable(dev_priv, well);
2677
Imre Deak73dfc222015-11-17 17:33:53 +02002678 mutex_unlock(&power_domains->lock);
2679
Imre Deak73dfc222015-11-17 17:33:53 +02002680 skl_init_cdclk(dev_priv);
2681
Ville Syrjälä70c2c182016-05-13 23:41:30 +03002682 gen9_dbuf_enable(dev_priv);
2683
Ville Syrjälä9f7eb312016-05-13 23:41:29 +03002684 if (resume && dev_priv->csr.dmc_payload)
Imre Deak2abc5252016-03-04 21:57:41 +02002685 intel_csr_load_program(dev_priv);
Imre Deak73dfc222015-11-17 17:33:53 +02002686}
2687
2688static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2689{
2690 struct i915_power_domains *power_domains = &dev_priv->power_domains;
Imre Deak443a93a2016-04-04 15:42:57 +03002691 struct i915_power_well *well;
Imre Deak73dfc222015-11-17 17:33:53 +02002692
Imre Deakd26fa1d2015-11-04 19:24:17 +02002693 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2694
Ville Syrjälä70c2c182016-05-13 23:41:30 +03002695 gen9_dbuf_disable(dev_priv);
2696
Imre Deak73dfc222015-11-17 17:33:53 +02002697 skl_uninit_cdclk(dev_priv);
2698
2699 /* The spec doesn't call for removing the reset handshake flag */
2700 /* disable PG1 and Misc I/O */
Imre Deak443a93a2016-04-04 15:42:57 +03002701
Imre Deak73dfc222015-11-17 17:33:53 +02002702 mutex_lock(&power_domains->lock);
Imre Deak443a93a2016-04-04 15:42:57 +03002703
Imre Deakedfda8e2017-06-29 18:36:59 +03002704 /*
2705 * BSpec says to keep the MISC IO power well enabled here, only
2706 * remove our request for power well 1.
Imre Deak42d93662017-06-29 18:37:01 +03002707 * Note that even though the driver's request is removed power well 1
2708 * may stay enabled after this due to DMC's own request on it.
Imre Deakedfda8e2017-06-29 18:36:59 +03002709 */
Imre Deak443a93a2016-04-04 15:42:57 +03002710 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2711 intel_power_well_disable(dev_priv, well);
2712
Imre Deak73dfc222015-11-17 17:33:53 +02002713 mutex_unlock(&power_domains->lock);
Imre Deak846c6b22017-06-29 18:36:58 +03002714
2715 usleep_range(10, 30); /* 10 us delay per Bspec */
Imre Deak73dfc222015-11-17 17:33:53 +02002716}
2717
Imre Deakd7d7c9e2016-04-01 16:02:42 +03002718void bxt_display_core_init(struct drm_i915_private *dev_priv,
2719 bool resume)
2720{
2721 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2722 struct i915_power_well *well;
2723 uint32_t val;
2724
2725 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2726
2727 /*
2728 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
2729 * or else the reset will hang because there is no PCH to respond.
2730 * Move the handshake programming to initialization sequence.
2731 * Previously was left up to BIOS.
2732 */
2733 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2734 val &= ~RESET_PCH_HANDSHAKE_ENABLE;
2735 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2736
2737 /* Enable PG1 */
2738 mutex_lock(&power_domains->lock);
2739
2740 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2741 intel_power_well_enable(dev_priv, well);
2742
2743 mutex_unlock(&power_domains->lock);
2744
Imre Deak324513c2016-06-13 16:44:36 +03002745 bxt_init_cdclk(dev_priv);
Ville Syrjälä70c2c182016-05-13 23:41:30 +03002746
2747 gen9_dbuf_enable(dev_priv);
2748
Imre Deakd7d7c9e2016-04-01 16:02:42 +03002749 if (resume && dev_priv->csr.dmc_payload)
2750 intel_csr_load_program(dev_priv);
2751}
2752
2753void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2754{
2755 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2756 struct i915_power_well *well;
2757
2758 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2759
Ville Syrjälä70c2c182016-05-13 23:41:30 +03002760 gen9_dbuf_disable(dev_priv);
2761
Imre Deak324513c2016-06-13 16:44:36 +03002762 bxt_uninit_cdclk(dev_priv);
Imre Deakd7d7c9e2016-04-01 16:02:42 +03002763
2764 /* The spec doesn't call for removing the reset handshake flag */
2765
Imre Deak42d93662017-06-29 18:37:01 +03002766 /*
2767 * Disable PW1 (PG1).
2768 * Note that even though the driver's request is removed power well 1
2769 * may stay enabled after this due to DMC's own request on it.
2770 */
Imre Deakd7d7c9e2016-04-01 16:02:42 +03002771 mutex_lock(&power_domains->lock);
2772
2773 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2774 intel_power_well_disable(dev_priv, well);
2775
2776 mutex_unlock(&power_domains->lock);
Imre Deak846c6b22017-06-29 18:36:58 +03002777
2778 usleep_range(10, 30); /* 10 us delay per Bspec */
Imre Deakd7d7c9e2016-04-01 16:02:42 +03002779}
2780
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002781#define CNL_PROCMON_IDX(val) \
2782 (((val) & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) >> VOLTAGE_INFO_SHIFT)
2783#define NUM_CNL_PROCMON \
2784 (CNL_PROCMON_IDX(VOLTAGE_INFO_MASK | PROCESS_INFO_MASK) + 1)
2785
2786static const struct cnl_procmon {
2787 u32 dw1, dw9, dw10;
2788} cnl_procmon_values[NUM_CNL_PROCMON] = {
2789 [CNL_PROCMON_IDX(VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0)] =
2790 { .dw1 = 0x00 << 16, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
2791 [CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0)] =
2792 { .dw1 = 0x00 << 16, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
2793 [CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1)] =
2794 { .dw1 = 0x00 << 16, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
2795 [CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0)] =
2796 { .dw1 = 0x00 << 16, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
2797 [CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1)] =
2798 { .dw1 = 0x44 << 16, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
2799};
2800
2801static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
2802{
2803 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2804 const struct cnl_procmon *procmon;
2805 struct i915_power_well *well;
2806 u32 val;
2807
2808 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2809
2810 /* 1. Enable PCH Reset Handshake */
2811 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2812 val |= RESET_PCH_HANDSHAKE_ENABLE;
2813 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2814
2815 /* 2. Enable Comp */
2816 val = I915_READ(CHICKEN_MISC_2);
2817 val &= ~COMP_PWR_DOWN;
2818 I915_WRITE(CHICKEN_MISC_2, val);
2819
2820 val = I915_READ(CNL_PORT_COMP_DW3);
2821 procmon = &cnl_procmon_values[CNL_PROCMON_IDX(val)];
2822
2823 WARN_ON(procmon->dw10 == 0);
2824
2825 val = I915_READ(CNL_PORT_COMP_DW1);
2826 val &= ~((0xff << 16) | 0xff);
2827 val |= procmon->dw1;
2828 I915_WRITE(CNL_PORT_COMP_DW1, val);
2829
2830 I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9);
2831 I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10);
2832
2833 val = I915_READ(CNL_PORT_COMP_DW0);
2834 val |= COMP_INIT;
2835 I915_WRITE(CNL_PORT_COMP_DW0, val);
2836
2837 /* 3. */
2838 val = I915_READ(CNL_PORT_CL1CM_DW5);
2839 val |= CL_POWER_DOWN_ENABLE;
2840 I915_WRITE(CNL_PORT_CL1CM_DW5, val);
2841
Imre Deakb38131f2017-06-29 18:37:02 +03002842 /*
2843 * 4. Enable Power Well 1 (PG1).
2844 * The AUX IO power wells will be enabled on demand.
2845 */
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002846 mutex_lock(&power_domains->lock);
2847 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2848 intel_power_well_enable(dev_priv, well);
2849 mutex_unlock(&power_domains->lock);
2850
2851 /* 5. Enable CD clock */
2852 cnl_init_cdclk(dev_priv);
2853
2854 /* 6. Enable DBUF */
2855 gen9_dbuf_enable(dev_priv);
2856}
2857
2858#undef CNL_PROCMON_IDX
2859#undef NUM_CNL_PROCMON
2860
2861static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
2862{
2863 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2864 struct i915_power_well *well;
2865 u32 val;
2866
2867 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2868
2869 /* 1. Disable all display engine functions -> aready done */
2870
2871 /* 2. Disable DBUF */
2872 gen9_dbuf_disable(dev_priv);
2873
2874 /* 3. Disable CD clock */
2875 cnl_uninit_cdclk(dev_priv);
2876
Imre Deakb38131f2017-06-29 18:37:02 +03002877 /*
2878 * 4. Disable Power Well 1 (PG1).
2879 * The AUX IO power wells are toggled on demand, so they are already
2880 * disabled at this point.
2881 */
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002882 mutex_lock(&power_domains->lock);
2883 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2884 intel_power_well_disable(dev_priv, well);
2885 mutex_unlock(&power_domains->lock);
2886
Imre Deak846c6b22017-06-29 18:36:58 +03002887 usleep_range(10, 30); /* 10 us delay per Bspec */
2888
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002889 /* 5. Disable Comp */
2890 val = I915_READ(CHICKEN_MISC_2);
2891 val |= COMP_PWR_DOWN;
2892 I915_WRITE(CHICKEN_MISC_2, val);
2893}
2894
Ville Syrjälä70722462015-04-10 18:21:28 +03002895static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2896{
2897 struct i915_power_well *cmn_bc =
2898 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2899 struct i915_power_well *cmn_d =
2900 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
2901
2902 /*
2903 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
2904 * workaround never ever read DISPLAY_PHY_CONTROL, and
2905 * instead maintain a shadow copy ourselves. Use the actual
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002906 * power well state and lane status to reconstruct the
2907 * expected initial value.
Ville Syrjälä70722462015-04-10 18:21:28 +03002908 */
2909 dev_priv->chv_phy_control =
Ville Syrjäläbc284542015-05-26 20:22:38 +03002910 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
2911 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002912 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
2913 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
2914 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
2915
2916 /*
2917 * If all lanes are disabled we leave the override disabled
2918 * with all power down bits cleared to match the state we
2919 * would use after disabling the port. Otherwise enable the
2920 * override and set the lane powerdown bits accding to the
2921 * current lane status.
2922 */
2923 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
2924 uint32_t status = I915_READ(DPLL(PIPE_A));
2925 unsigned int mask;
2926
2927 mask = status & DPLL_PORTB_READY_MASK;
2928 if (mask == 0xf)
2929 mask = 0x0;
2930 else
2931 dev_priv->chv_phy_control |=
2932 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
2933
2934 dev_priv->chv_phy_control |=
2935 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2936
2937 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2938 if (mask == 0xf)
2939 mask = 0x0;
2940 else
2941 dev_priv->chv_phy_control |=
2942 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2943
2944 dev_priv->chv_phy_control |=
2945 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2946
Ville Syrjälä70722462015-04-10 18:21:28 +03002947 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
Ville Syrjälä3be60de2015-09-08 18:05:45 +03002948
2949 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2950 } else {
2951 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002952 }
2953
2954 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2955 uint32_t status = I915_READ(DPIO_PHY_STATUS);
2956 unsigned int mask;
2957
2958 mask = status & DPLL_PORTD_READY_MASK;
2959
2960 if (mask == 0xf)
2961 mask = 0x0;
2962 else
2963 dev_priv->chv_phy_control |=
2964 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2965
2966 dev_priv->chv_phy_control |=
2967 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2968
Ville Syrjälä70722462015-04-10 18:21:28 +03002969 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
Ville Syrjälä3be60de2015-09-08 18:05:45 +03002970
2971 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2972 } else {
2973 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002974 }
2975
2976 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2977
2978 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2979 dev_priv->chv_phy_control);
Ville Syrjälä70722462015-04-10 18:21:28 +03002980}
2981
Daniel Vetter9c065a72014-09-30 10:56:38 +02002982static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2983{
2984 struct i915_power_well *cmn =
2985 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2986 struct i915_power_well *disp2d =
2987 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2988
Daniel Vetter9c065a72014-09-30 10:56:38 +02002989 /* If the display might be already active skip this */
Ville Syrjälä5d93a6e2014-10-16 20:52:33 +03002990 if (cmn->ops->is_enabled(dev_priv, cmn) &&
2991 disp2d->ops->is_enabled(dev_priv, disp2d) &&
Daniel Vetter9c065a72014-09-30 10:56:38 +02002992 I915_READ(DPIO_CTL) & DPIO_CMNRST)
2993 return;
2994
2995 DRM_DEBUG_KMS("toggling display PHY side reset\n");
2996
2997 /* cmnlane needs DPLL registers */
2998 disp2d->ops->enable(dev_priv, disp2d);
2999
3000 /*
3001 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3002 * Need to assert and de-assert PHY SB reset by gating the
3003 * common lane power, then un-gating it.
3004 * Simply ungating isn't enough to reset the PHY enough to get
3005 * ports and lanes running.
3006 */
3007 cmn->ops->disable(dev_priv, cmn);
3008}
3009
Daniel Vettere4e76842014-09-30 10:56:42 +02003010/**
3011 * intel_power_domains_init_hw - initialize hardware power domain state
3012 * @dev_priv: i915 device instance
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003013 * @resume: Called from resume code paths or not
Daniel Vettere4e76842014-09-30 10:56:42 +02003014 *
3015 * This function initializes the hardware power domain state and enables all
Imre Deak8d8c3862017-02-17 17:39:46 +02003016 * power wells belonging to the INIT power domain. Power wells in other
3017 * domains (and not in the INIT domain) are referenced or disabled during the
3018 * modeset state HW readout. After that the reference count of each power well
3019 * must match its HW enabled state, see intel_power_domains_verify_state().
Daniel Vettere4e76842014-09-30 10:56:42 +02003020 */
Imre Deak73dfc222015-11-17 17:33:53 +02003021void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
Daniel Vetter9c065a72014-09-30 10:56:38 +02003022{
Daniel Vetter9c065a72014-09-30 10:56:38 +02003023 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3024
3025 power_domains->initializing = true;
3026
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07003027 if (IS_CANNONLAKE(dev_priv)) {
3028 cnl_display_core_init(dev_priv, resume);
3029 } else if (IS_GEN9_BC(dev_priv)) {
Imre Deak73dfc222015-11-17 17:33:53 +02003030 skl_display_core_init(dev_priv, resume);
Ander Conselvan de Oliveirab817c442016-12-02 10:23:56 +02003031 } else if (IS_GEN9_LP(dev_priv)) {
Imre Deakd7d7c9e2016-04-01 16:02:42 +03003032 bxt_display_core_init(dev_priv, resume);
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01003033 } else if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä770effb2015-07-08 23:45:51 +03003034 mutex_lock(&power_domains->lock);
Ville Syrjälä70722462015-04-10 18:21:28 +03003035 chv_phy_control_init(dev_priv);
Ville Syrjälä770effb2015-07-08 23:45:51 +03003036 mutex_unlock(&power_domains->lock);
Tvrtko Ursulin11a914c2016-10-13 11:03:08 +01003037 } else if (IS_VALLEYVIEW(dev_priv)) {
Daniel Vetter9c065a72014-09-30 10:56:38 +02003038 mutex_lock(&power_domains->lock);
3039 vlv_cmnlane_wa(dev_priv);
3040 mutex_unlock(&power_domains->lock);
3041 }
3042
3043 /* For now, we need the power well to be always enabled. */
3044 intel_display_set_init_power(dev_priv, true);
Imre Deakd314cd42015-11-17 17:44:23 +02003045 /* Disable power support if the user asked so. */
3046 if (!i915.disable_power_well)
3047 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Imre Deak30eade12015-11-04 19:24:13 +02003048 intel_power_domains_sync_hw(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +02003049 power_domains->initializing = false;
3050}
3051
Daniel Vettere4e76842014-09-30 10:56:42 +02003052/**
Imre Deak73dfc222015-11-17 17:33:53 +02003053 * intel_power_domains_suspend - suspend power domain state
3054 * @dev_priv: i915 device instance
3055 *
3056 * This function prepares the hardware power domain state before entering
3057 * system suspend. It must be paired with intel_power_domains_init_hw().
3058 */
3059void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
3060{
Imre Deakd314cd42015-11-17 17:44:23 +02003061 /*
3062 * Even if power well support was disabled we still want to disable
3063 * power wells while we are system suspended.
3064 */
3065 if (!i915.disable_power_well)
3066 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Imre Deak2622d792016-02-29 22:49:02 +02003067
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07003068 if (IS_CANNONLAKE(dev_priv))
3069 cnl_display_core_uninit(dev_priv);
3070 else if (IS_GEN9_BC(dev_priv))
Imre Deak2622d792016-02-29 22:49:02 +02003071 skl_display_core_uninit(dev_priv);
Ander Conselvan de Oliveirab817c442016-12-02 10:23:56 +02003072 else if (IS_GEN9_LP(dev_priv))
Imre Deakd7d7c9e2016-04-01 16:02:42 +03003073 bxt_display_core_uninit(dev_priv);
Imre Deak73dfc222015-11-17 17:33:53 +02003074}
3075
Imre Deak8d8c3862017-02-17 17:39:46 +02003076static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
3077{
3078 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3079 struct i915_power_well *power_well;
3080
3081 for_each_power_well(dev_priv, power_well) {
3082 enum intel_display_power_domain domain;
3083
3084 DRM_DEBUG_DRIVER("%-25s %d\n",
3085 power_well->name, power_well->count);
3086
3087 for_each_power_domain(domain, power_well->domains)
3088 DRM_DEBUG_DRIVER(" %-23s %d\n",
3089 intel_display_power_domain_str(domain),
3090 power_domains->domain_use_count[domain]);
3091 }
3092}
3093
3094/**
3095 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
3096 * @dev_priv: i915 device instance
3097 *
3098 * Verify if the reference count of each power well matches its HW enabled
3099 * state and the total refcount of the domains it belongs to. This must be
3100 * called after modeset HW state sanitization, which is responsible for
3101 * acquiring reference counts for any power wells in use and disabling the
3102 * ones left on by BIOS but not required by any active output.
3103 */
3104void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3105{
3106 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3107 struct i915_power_well *power_well;
3108 bool dump_domain_info;
3109
3110 mutex_lock(&power_domains->lock);
3111
3112 dump_domain_info = false;
3113 for_each_power_well(dev_priv, power_well) {
3114 enum intel_display_power_domain domain;
3115 int domains_count;
3116 bool enabled;
3117
3118 /*
3119 * Power wells not belonging to any domain (like the MISC_IO
3120 * and PW1 power wells) are under FW control, so ignore them,
3121 * since their state can change asynchronously.
3122 */
3123 if (!power_well->domains)
3124 continue;
3125
3126 enabled = power_well->ops->is_enabled(dev_priv, power_well);
3127 if ((power_well->count || power_well->always_on) != enabled)
3128 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
3129 power_well->name, power_well->count, enabled);
3130
3131 domains_count = 0;
3132 for_each_power_domain(domain, power_well->domains)
3133 domains_count += power_domains->domain_use_count[domain];
3134
3135 if (power_well->count != domains_count) {
3136 DRM_ERROR("power well %s refcount/domain refcount mismatch "
3137 "(refcount %d/domains refcount %d)\n",
3138 power_well->name, power_well->count,
3139 domains_count);
3140 dump_domain_info = true;
3141 }
3142 }
3143
3144 if (dump_domain_info) {
3145 static bool dumped;
3146
3147 if (!dumped) {
3148 intel_power_domains_dump_info(dev_priv);
3149 dumped = true;
3150 }
3151 }
3152
3153 mutex_unlock(&power_domains->lock);
3154}
3155
Imre Deak73dfc222015-11-17 17:33:53 +02003156/**
Daniel Vettere4e76842014-09-30 10:56:42 +02003157 * intel_runtime_pm_get - grab a runtime pm reference
3158 * @dev_priv: i915 device instance
3159 *
3160 * This function grabs a device-level runtime pm reference (mostly used for GEM
3161 * code to ensure the GTT or GT is on) and ensures that it is powered up.
3162 *
3163 * Any runtime pm reference obtained by this function must have a symmetric
3164 * call to intel_runtime_pm_put() to release the reference again.
3165 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02003166void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
3167{
David Weinehall52a05c32016-08-22 13:32:44 +03003168 struct pci_dev *pdev = dev_priv->drm.pdev;
3169 struct device *kdev = &pdev->dev;
Imre Deakf5073822017-03-28 12:38:55 +03003170 int ret;
Daniel Vetter9c065a72014-09-30 10:56:38 +02003171
Imre Deakf5073822017-03-28 12:38:55 +03003172 ret = pm_runtime_get_sync(kdev);
3173 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
Imre Deak1f814da2015-12-16 02:52:19 +02003174
3175 atomic_inc(&dev_priv->pm.wakeref_count);
Imre Deakc9b88462015-12-15 20:10:34 +02003176 assert_rpm_wakelock_held(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +02003177}
3178
Daniel Vettere4e76842014-09-30 10:56:42 +02003179/**
Imre Deak09731282016-02-17 14:17:42 +02003180 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
3181 * @dev_priv: i915 device instance
3182 *
3183 * This function grabs a device-level runtime pm reference if the device is
3184 * already in use and ensures that it is powered up.
3185 *
3186 * Any runtime pm reference obtained by this function must have a symmetric
3187 * call to intel_runtime_pm_put() to release the reference again.
3188 */
3189bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
3190{
David Weinehall52a05c32016-08-22 13:32:44 +03003191 struct pci_dev *pdev = dev_priv->drm.pdev;
3192 struct device *kdev = &pdev->dev;
Imre Deak09731282016-02-17 14:17:42 +02003193
Chris Wilson135dc792016-02-25 21:10:28 +00003194 if (IS_ENABLED(CONFIG_PM)) {
David Weinehallc49d13e2016-08-22 13:32:42 +03003195 int ret = pm_runtime_get_if_in_use(kdev);
Imre Deak09731282016-02-17 14:17:42 +02003196
Chris Wilson135dc792016-02-25 21:10:28 +00003197 /*
3198 * In cases runtime PM is disabled by the RPM core and we get
3199 * an -EINVAL return value we are not supposed to call this
3200 * function, since the power state is undefined. This applies
3201 * atm to the late/early system suspend/resume handlers.
3202 */
Imre Deakf5073822017-03-28 12:38:55 +03003203 WARN_ONCE(ret < 0,
3204 "pm_runtime_get_if_in_use() failed: %d\n", ret);
Chris Wilson135dc792016-02-25 21:10:28 +00003205 if (ret <= 0)
3206 return false;
3207 }
Imre Deak09731282016-02-17 14:17:42 +02003208
3209 atomic_inc(&dev_priv->pm.wakeref_count);
3210 assert_rpm_wakelock_held(dev_priv);
3211
3212 return true;
3213}
3214
3215/**
Daniel Vettere4e76842014-09-30 10:56:42 +02003216 * intel_runtime_pm_get_noresume - grab a runtime pm reference
3217 * @dev_priv: i915 device instance
3218 *
3219 * This function grabs a device-level runtime pm reference (mostly used for GEM
3220 * code to ensure the GTT or GT is on).
3221 *
3222 * It will _not_ power up the device but instead only check that it's powered
3223 * on. Therefore it is only valid to call this functions from contexts where
3224 * the device is known to be powered up and where trying to power it up would
3225 * result in hilarity and deadlocks. That pretty much means only the system
3226 * suspend/resume code where this is used to grab runtime pm references for
3227 * delayed setup down in work items.
3228 *
3229 * Any runtime pm reference obtained by this function must have a symmetric
3230 * call to intel_runtime_pm_put() to release the reference again.
3231 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02003232void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
3233{
David Weinehall52a05c32016-08-22 13:32:44 +03003234 struct pci_dev *pdev = dev_priv->drm.pdev;
3235 struct device *kdev = &pdev->dev;
Daniel Vetter9c065a72014-09-30 10:56:38 +02003236
Imre Deakc9b88462015-12-15 20:10:34 +02003237 assert_rpm_wakelock_held(dev_priv);
David Weinehallc49d13e2016-08-22 13:32:42 +03003238 pm_runtime_get_noresume(kdev);
Imre Deak1f814da2015-12-16 02:52:19 +02003239
3240 atomic_inc(&dev_priv->pm.wakeref_count);
Daniel Vetter9c065a72014-09-30 10:56:38 +02003241}
3242
Daniel Vettere4e76842014-09-30 10:56:42 +02003243/**
3244 * intel_runtime_pm_put - release a runtime pm reference
3245 * @dev_priv: i915 device instance
3246 *
3247 * This function drops the device-level runtime pm reference obtained by
3248 * intel_runtime_pm_get() and might power down the corresponding
3249 * hardware block right away if this is the last reference.
3250 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02003251void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
3252{
David Weinehall52a05c32016-08-22 13:32:44 +03003253 struct pci_dev *pdev = dev_priv->drm.pdev;
3254 struct device *kdev = &pdev->dev;
Daniel Vetter9c065a72014-09-30 10:56:38 +02003255
Imre Deak542db3c2015-12-15 20:10:36 +02003256 assert_rpm_wakelock_held(dev_priv);
Chris Wilson2eedfc72016-10-24 13:42:17 +01003257 atomic_dec(&dev_priv->pm.wakeref_count);
Imre Deak1f814da2015-12-16 02:52:19 +02003258
David Weinehallc49d13e2016-08-22 13:32:42 +03003259 pm_runtime_mark_last_busy(kdev);
3260 pm_runtime_put_autosuspend(kdev);
Daniel Vetter9c065a72014-09-30 10:56:38 +02003261}
3262
Daniel Vettere4e76842014-09-30 10:56:42 +02003263/**
3264 * intel_runtime_pm_enable - enable runtime pm
3265 * @dev_priv: i915 device instance
3266 *
3267 * This function enables runtime pm at the end of the driver load sequence.
3268 *
3269 * Note that this function does currently not enable runtime pm for the
3270 * subordinate display power domains. That is only done on the first modeset
3271 * using intel_display_set_init_power().
3272 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +02003273void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
Daniel Vetter9c065a72014-09-30 10:56:38 +02003274{
David Weinehall52a05c32016-08-22 13:32:44 +03003275 struct pci_dev *pdev = dev_priv->drm.pdev;
David Weinehall52a05c32016-08-22 13:32:44 +03003276 struct device *kdev = &pdev->dev;
Daniel Vetter9c065a72014-09-30 10:56:38 +02003277
David Weinehallc49d13e2016-08-22 13:32:42 +03003278 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
3279 pm_runtime_mark_last_busy(kdev);
Imre Deakcbc68dc2015-12-17 19:04:33 +02003280
Imre Deak25b181b2015-12-17 13:44:56 +02003281 /*
3282 * Take a permanent reference to disable the RPM functionality and drop
3283 * it only when unloading the driver. Use the low level get/put helpers,
3284 * so the driver's own RPM reference tracking asserts also work on
3285 * platforms without RPM support.
3286 */
Tvrtko Ursulin6772ffe2016-10-13 11:02:55 +01003287 if (!HAS_RUNTIME_PM(dev_priv)) {
Imre Deakf5073822017-03-28 12:38:55 +03003288 int ret;
3289
David Weinehallc49d13e2016-08-22 13:32:42 +03003290 pm_runtime_dont_use_autosuspend(kdev);
Imre Deakf5073822017-03-28 12:38:55 +03003291 ret = pm_runtime_get_sync(kdev);
3292 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
Imre Deakcbc68dc2015-12-17 19:04:33 +02003293 } else {
David Weinehallc49d13e2016-08-22 13:32:42 +03003294 pm_runtime_use_autosuspend(kdev);
Imre Deakcbc68dc2015-12-17 19:04:33 +02003295 }
Daniel Vetter9c065a72014-09-30 10:56:38 +02003296
Imre Deakaabee1b2015-12-15 20:10:29 +02003297 /*
3298 * The core calls the driver load handler with an RPM reference held.
3299 * We drop that here and will reacquire it during unloading in
3300 * intel_power_domains_fini().
3301 */
David Weinehallc49d13e2016-08-22 13:32:42 +03003302 pm_runtime_put_autosuspend(kdev);
Daniel Vetter9c065a72014-09-30 10:56:38 +02003303}