blob: 2f7f0ab363fb25a451ffdc491c5e61e0a57a5526 [file] [log] [blame]
Daniel Vetter9c065a72014-09-30 10:56:38 +02001/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29#include <linux/pm_runtime.h>
30#include <linux/vgaarb.h>
31
32#include "i915_drv.h"
33#include "intel_drv.h"
Daniel Vetter9c065a72014-09-30 10:56:38 +020034
Daniel Vettere4e76842014-09-30 10:56:42 +020035/**
36 * DOC: runtime pm
37 *
38 * The i915 driver supports dynamic enabling and disabling of entire hardware
39 * blocks at runtime. This is especially important on the display side where
40 * software is supposed to control many power gates manually on recent hardware,
41 * since on the GT side a lot of the power management is done by the hardware.
42 * But even there some manual control at the device level is required.
43 *
44 * Since i915 supports a diverse set of platforms with a unified codebase and
45 * hardware engineers just love to shuffle functionality around between power
46 * domains there's a sizeable amount of indirection required. This file provides
47 * generic functions to the driver for grabbing and releasing references for
48 * abstract power domains. It then maps those to the actual power wells
49 * present for a given platform.
50 */
51
Suketu Shahdc174302015-04-17 19:46:16 +053052#define GEN9_ENABLE_DC5(dev) (IS_SKYLAKE(dev))
53
Daniel Vetter9c065a72014-09-30 10:56:38 +020054#define for_each_power_well(i, power_well, domain_mask, power_domains) \
55 for (i = 0; \
56 i < (power_domains)->power_well_count && \
57 ((power_well) = &(power_domains)->power_wells[i]); \
58 i++) \
59 if ((power_well)->domains & (domain_mask))
60
61#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
62 for (i = (power_domains)->power_well_count - 1; \
63 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
64 i--) \
65 if ((power_well)->domains & (domain_mask))
66
Suketu Shah5aefb232015-04-16 14:22:10 +053067bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
68 int power_well_id);
69
Daniel Vettere4e76842014-09-30 10:56:42 +020070/*
Daniel Vetter9c065a72014-09-30 10:56:38 +020071 * We should only use the power well if we explicitly asked the hardware to
72 * enable it, so check if it's enabled and also check if we've requested it to
73 * be enabled.
74 */
75static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
76 struct i915_power_well *power_well)
77{
78 return I915_READ(HSW_PWR_WELL_DRIVER) ==
79 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
80}
81
Daniel Vettere4e76842014-09-30 10:56:42 +020082/**
83 * __intel_display_power_is_enabled - unlocked check for a power domain
84 * @dev_priv: i915 device instance
85 * @domain: power domain to check
86 *
87 * This is the unlocked version of intel_display_power_is_enabled() and should
88 * only be used from error capture and recovery code where deadlocks are
89 * possible.
90 *
91 * Returns:
92 * True when the power domain is enabled, false otherwise.
93 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +020094bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
95 enum intel_display_power_domain domain)
Daniel Vetter9c065a72014-09-30 10:56:38 +020096{
97 struct i915_power_domains *power_domains;
98 struct i915_power_well *power_well;
99 bool is_enabled;
100 int i;
101
102 if (dev_priv->pm.suspended)
103 return false;
104
105 power_domains = &dev_priv->power_domains;
106
107 is_enabled = true;
108
109 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
110 if (power_well->always_on)
111 continue;
112
113 if (!power_well->hw_enabled) {
114 is_enabled = false;
115 break;
116 }
117 }
118
119 return is_enabled;
120}
121
Daniel Vettere4e76842014-09-30 10:56:42 +0200122/**
Damien Lespiauf61ccae2014-11-25 13:45:41 +0000123 * intel_display_power_is_enabled - check for a power domain
Daniel Vettere4e76842014-09-30 10:56:42 +0200124 * @dev_priv: i915 device instance
125 * @domain: power domain to check
126 *
127 * This function can be used to check the hw power domain state. It is mostly
128 * used in hardware state readout functions. Everywhere else code should rely
129 * upon explicit power domain reference counting to ensure that the hardware
130 * block is powered up before accessing it.
131 *
132 * Callers must hold the relevant modesetting locks to ensure that concurrent
133 * threads can't disable the power well while the caller tries to read a few
134 * registers.
135 *
136 * Returns:
137 * True when the power domain is enabled, false otherwise.
138 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +0200139bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
140 enum intel_display_power_domain domain)
Daniel Vetter9c065a72014-09-30 10:56:38 +0200141{
142 struct i915_power_domains *power_domains;
143 bool ret;
144
145 power_domains = &dev_priv->power_domains;
146
147 mutex_lock(&power_domains->lock);
Daniel Vetterf458ebb2014-09-30 10:56:39 +0200148 ret = __intel_display_power_is_enabled(dev_priv, domain);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200149 mutex_unlock(&power_domains->lock);
150
151 return ret;
152}
153
Daniel Vettere4e76842014-09-30 10:56:42 +0200154/**
155 * intel_display_set_init_power - set the initial power domain state
156 * @dev_priv: i915 device instance
157 * @enable: whether to enable or disable the initial power domain state
158 *
159 * For simplicity our driver load/unload and system suspend/resume code assumes
160 * that all power domains are always enabled. This functions controls the state
161 * of this little hack. While the initial power domain state is enabled runtime
162 * pm is effectively disabled.
163 */
Daniel Vetterd9bc89d92014-09-30 10:56:40 +0200164void intel_display_set_init_power(struct drm_i915_private *dev_priv,
165 bool enable)
166{
167 if (dev_priv->power_domains.init_power_on == enable)
168 return;
169
170 if (enable)
171 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
172 else
173 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
174
175 dev_priv->power_domains.init_power_on = enable;
176}
177
Daniel Vetter9c065a72014-09-30 10:56:38 +0200178/*
179 * Starting with Haswell, we have a "Power Down Well" that can be turned off
180 * when not needed anymore. We have 4 registers that can request the power well
181 * to be enabled, and it will only be disabled if none of the registers is
182 * requesting it to be enabled.
183 */
184static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
185{
186 struct drm_device *dev = dev_priv->dev;
187
188 /*
189 * After we re-enable the power well, if we touch VGA register 0x3d5
190 * we'll get unclaimed register interrupts. This stops after we write
191 * anything to the VGA MSR register. The vgacon module uses this
192 * register all the time, so if we unbind our driver and, as a
193 * consequence, bind vgacon, we'll get stuck in an infinite loop at
194 * console_unlock(). So make here we touch the VGA MSR register, making
195 * sure vgacon can keep working normally without triggering interrupts
196 * and error messages.
197 */
198 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
199 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
200 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
201
Damien Lespiau25400392015-03-06 18:50:52 +0000202 if (IS_BROADWELL(dev))
Damien Lespiau4c6c03b2015-03-06 18:50:48 +0000203 gen8_irq_power_well_post_enable(dev_priv,
204 1 << PIPE_C | 1 << PIPE_B);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200205}
206
Damien Lespiaud14c0342015-03-06 18:50:51 +0000207static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
208 struct i915_power_well *power_well)
209{
210 struct drm_device *dev = dev_priv->dev;
211
212 /*
213 * After we re-enable the power well, if we touch VGA register 0x3d5
214 * we'll get unclaimed register interrupts. This stops after we write
215 * anything to the VGA MSR register. The vgacon module uses this
216 * register all the time, so if we unbind our driver and, as a
217 * consequence, bind vgacon, we'll get stuck in an infinite loop at
218 * console_unlock(). So make here we touch the VGA MSR register, making
219 * sure vgacon can keep working normally without triggering interrupts
220 * and error messages.
221 */
222 if (power_well->data == SKL_DISP_PW_2) {
223 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
224 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
225 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
226
227 gen8_irq_power_well_post_enable(dev_priv,
228 1 << PIPE_C | 1 << PIPE_B);
229 }
230
Damien Lespiau1d2b9522015-03-06 18:50:53 +0000231 if (power_well->data == SKL_DISP_PW_1) {
232 intel_prepare_ddi(dev);
Damien Lespiaud14c0342015-03-06 18:50:51 +0000233 gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
Damien Lespiau1d2b9522015-03-06 18:50:53 +0000234 }
Damien Lespiaud14c0342015-03-06 18:50:51 +0000235}
236
Daniel Vetter9c065a72014-09-30 10:56:38 +0200237static void hsw_set_power_well(struct drm_i915_private *dev_priv,
238 struct i915_power_well *power_well, bool enable)
239{
240 bool is_enabled, enable_requested;
241 uint32_t tmp;
242
243 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
244 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
245 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
246
247 if (enable) {
248 if (!enable_requested)
249 I915_WRITE(HSW_PWR_WELL_DRIVER,
250 HSW_PWR_WELL_ENABLE_REQUEST);
251
252 if (!is_enabled) {
253 DRM_DEBUG_KMS("Enabling power well\n");
254 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
255 HSW_PWR_WELL_STATE_ENABLED), 20))
256 DRM_ERROR("Timeout enabling power well\n");
Paulo Zanoni6d729bf2014-10-07 16:11:11 -0300257 hsw_power_well_post_enable(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200258 }
259
Daniel Vetter9c065a72014-09-30 10:56:38 +0200260 } else {
261 if (enable_requested) {
262 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
263 POSTING_READ(HSW_PWR_WELL_DRIVER);
264 DRM_DEBUG_KMS("Requesting to disable the power well\n");
265 }
266 }
267}
268
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000269#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
270 BIT(POWER_DOMAIN_TRANSCODER_A) | \
271 BIT(POWER_DOMAIN_PIPE_B) | \
272 BIT(POWER_DOMAIN_TRANSCODER_B) | \
273 BIT(POWER_DOMAIN_PIPE_C) | \
274 BIT(POWER_DOMAIN_TRANSCODER_C) | \
275 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
276 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
277 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
278 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
279 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
280 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
281 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
282 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
283 BIT(POWER_DOMAIN_AUX_B) | \
284 BIT(POWER_DOMAIN_AUX_C) | \
285 BIT(POWER_DOMAIN_AUX_D) | \
286 BIT(POWER_DOMAIN_AUDIO) | \
287 BIT(POWER_DOMAIN_VGA) | \
288 BIT(POWER_DOMAIN_INIT))
289#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
290 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
291 BIT(POWER_DOMAIN_PLLS) | \
292 BIT(POWER_DOMAIN_PIPE_A) | \
293 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
294 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
295 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
296 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
297 BIT(POWER_DOMAIN_AUX_A) | \
298 BIT(POWER_DOMAIN_INIT))
299#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
300 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
301 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
302 BIT(POWER_DOMAIN_INIT))
303#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
304 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
305 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
306 BIT(POWER_DOMAIN_INIT))
307#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
308 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
309 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
310 BIT(POWER_DOMAIN_INIT))
311#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
312 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
313 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
314 BIT(POWER_DOMAIN_INIT))
315#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
316 SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS)
317#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
318 (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
319 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
320 SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \
321 SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
322 SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
323 SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
324 SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
325 BIT(POWER_DOMAIN_INIT))
326
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +0530327#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
328 BIT(POWER_DOMAIN_TRANSCODER_A) | \
329 BIT(POWER_DOMAIN_PIPE_B) | \
330 BIT(POWER_DOMAIN_TRANSCODER_B) | \
331 BIT(POWER_DOMAIN_PIPE_C) | \
332 BIT(POWER_DOMAIN_TRANSCODER_C) | \
333 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
334 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
335 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
336 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
337 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
338 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
339 BIT(POWER_DOMAIN_AUX_B) | \
340 BIT(POWER_DOMAIN_AUX_C) | \
341 BIT(POWER_DOMAIN_AUDIO) | \
342 BIT(POWER_DOMAIN_VGA) | \
343 BIT(POWER_DOMAIN_INIT))
344#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
345 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
346 BIT(POWER_DOMAIN_PIPE_A) | \
347 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
348 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
349 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
350 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
351 BIT(POWER_DOMAIN_AUX_A) | \
352 BIT(POWER_DOMAIN_PLLS) | \
353 BIT(POWER_DOMAIN_INIT))
354#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
355 (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
356 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \
357 BIT(POWER_DOMAIN_INIT))
358
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530359static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
360{
361 struct drm_device *dev = dev_priv->dev;
362
363 WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
364 WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
365 "DC9 already programmed to be enabled.\n");
366 WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
367 "DC5 still not disabled to enable DC9.\n");
368 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
369 WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
370
371 /*
372 * TODO: check for the following to verify the conditions to enter DC9
373 * state are satisfied:
374 * 1] Check relevant display engine registers to verify if mode set
375 * disable sequence was followed.
376 * 2] Check if display uninitialize sequence is initialized.
377 */
378}
379
380static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
381{
382 WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
383 WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
384 "DC9 already programmed to be disabled.\n");
385 WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
386 "DC5 still not disabled.\n");
387
388 /*
389 * TODO: check for the following to verify DC9 state was indeed
390 * entered before programming to disable it:
391 * 1] Check relevant display engine registers to verify if mode
392 * set disable sequence was followed.
393 * 2] Check if display uninitialize sequence is initialized.
394 */
395}
396
397void bxt_enable_dc9(struct drm_i915_private *dev_priv)
398{
399 uint32_t val;
400
401 assert_can_enable_dc9(dev_priv);
402
403 DRM_DEBUG_KMS("Enabling DC9\n");
404
405 val = I915_READ(DC_STATE_EN);
406 val |= DC_STATE_EN_DC9;
407 I915_WRITE(DC_STATE_EN, val);
408 POSTING_READ(DC_STATE_EN);
409}
410
411void bxt_disable_dc9(struct drm_i915_private *dev_priv)
412{
413 uint32_t val;
414
415 assert_can_disable_dc9(dev_priv);
416
417 DRM_DEBUG_KMS("Disabling DC9\n");
418
419 val = I915_READ(DC_STATE_EN);
420 val &= ~DC_STATE_EN_DC9;
421 I915_WRITE(DC_STATE_EN, val);
422 POSTING_READ(DC_STATE_EN);
423}
424
A.Sunil Kamath6b457d32015-04-16 14:22:09 +0530425static void gen9_set_dc_state_debugmask_memory_up(
426 struct drm_i915_private *dev_priv)
427{
428 uint32_t val;
429
430 /* The below bit doesn't need to be cleared ever afterwards */
431 val = I915_READ(DC_STATE_DEBUG);
432 if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
433 val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
434 I915_WRITE(DC_STATE_DEBUG, val);
435 POSTING_READ(DC_STATE_DEBUG);
436 }
437}
438
Suketu Shah5aefb232015-04-16 14:22:10 +0530439static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
Suketu Shahdc174302015-04-17 19:46:16 +0530440{
A.Sunil Kamath6b457d32015-04-16 14:22:09 +0530441 struct drm_device *dev = dev_priv->dev;
Suketu Shah5aefb232015-04-16 14:22:10 +0530442 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
443 SKL_DISP_PW_2);
444
445 WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
446 WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
447 WARN(pg2_enabled, "PG2 not disabled to enable DC5.\n");
448
449 WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
450 "DC5 already programmed to be enabled.\n");
451 WARN(dev_priv->pm.suspended,
452 "DC5 cannot be enabled, if platform is runtime-suspended.\n");
453
454 assert_csr_loaded(dev_priv);
455}
456
457static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
458{
459 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
460 SKL_DISP_PW_2);
461
462 WARN(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
463 WARN(dev_priv->pm.suspended,
464 "Disabling of DC5 while platform is runtime-suspended should never happen.\n");
465}
466
467static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
468{
A.Sunil Kamath6b457d32015-04-16 14:22:09 +0530469 uint32_t val;
470
Suketu Shah5aefb232015-04-16 14:22:10 +0530471 assert_can_enable_dc5(dev_priv);
A.Sunil Kamath6b457d32015-04-16 14:22:09 +0530472
473 DRM_DEBUG_KMS("Enabling DC5\n");
474
475 gen9_set_dc_state_debugmask_memory_up(dev_priv);
476
477 val = I915_READ(DC_STATE_EN);
478 val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
479 val |= DC_STATE_EN_UPTO_DC5;
480 I915_WRITE(DC_STATE_EN, val);
481 POSTING_READ(DC_STATE_EN);
Suketu Shahdc174302015-04-17 19:46:16 +0530482}
483
484static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
485{
A.Sunil Kamath6b457d32015-04-16 14:22:09 +0530486 uint32_t val;
487
Suketu Shah5aefb232015-04-16 14:22:10 +0530488 assert_can_disable_dc5(dev_priv);
A.Sunil Kamath6b457d32015-04-16 14:22:09 +0530489
490 DRM_DEBUG_KMS("Disabling DC5\n");
491
492 val = I915_READ(DC_STATE_EN);
493 val &= ~DC_STATE_EN_UPTO_DC5;
494 I915_WRITE(DC_STATE_EN, val);
495 POSTING_READ(DC_STATE_EN);
Suketu Shahdc174302015-04-17 19:46:16 +0530496}
497
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000498static void skl_set_power_well(struct drm_i915_private *dev_priv,
499 struct i915_power_well *power_well, bool enable)
500{
Suketu Shahdc174302015-04-17 19:46:16 +0530501 struct drm_device *dev = dev_priv->dev;
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000502 uint32_t tmp, fuse_status;
503 uint32_t req_mask, state_mask;
Damien Lespiau2a518352015-03-06 18:50:49 +0000504 bool is_enabled, enable_requested, check_fuse_status = false;
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000505
506 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
507 fuse_status = I915_READ(SKL_FUSE_STATUS);
508
509 switch (power_well->data) {
510 case SKL_DISP_PW_1:
511 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
512 SKL_FUSE_PG0_DIST_STATUS), 1)) {
513 DRM_ERROR("PG0 not enabled\n");
514 return;
515 }
516 break;
517 case SKL_DISP_PW_2:
518 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
519 DRM_ERROR("PG1 in disabled state\n");
520 return;
521 }
522 break;
523 case SKL_DISP_PW_DDI_A_E:
524 case SKL_DISP_PW_DDI_B:
525 case SKL_DISP_PW_DDI_C:
526 case SKL_DISP_PW_DDI_D:
527 case SKL_DISP_PW_MISC_IO:
528 break;
529 default:
530 WARN(1, "Unknown power well %lu\n", power_well->data);
531 return;
532 }
533
534 req_mask = SKL_POWER_WELL_REQ(power_well->data);
Damien Lespiau2a518352015-03-06 18:50:49 +0000535 enable_requested = tmp & req_mask;
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000536 state_mask = SKL_POWER_WELL_STATE(power_well->data);
Damien Lespiau2a518352015-03-06 18:50:49 +0000537 is_enabled = tmp & state_mask;
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000538
539 if (enable) {
Damien Lespiau2a518352015-03-06 18:50:49 +0000540 if (!enable_requested) {
Suketu Shahdc174302015-04-17 19:46:16 +0530541 WARN((tmp & state_mask) &&
542 !I915_READ(HSW_PWR_WELL_BIOS),
543 "Invalid for power well status to be enabled, unless done by the BIOS, \
544 when request is to disable!\n");
545 if (GEN9_ENABLE_DC5(dev) &&
546 power_well->data == SKL_DISP_PW_2)
547 gen9_disable_dc5(dev_priv);
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000548 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000549 }
550
Damien Lespiau2a518352015-03-06 18:50:49 +0000551 if (!is_enabled) {
Damien Lespiau510e6fd2015-03-06 18:50:50 +0000552 DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000553 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
554 state_mask), 1))
555 DRM_ERROR("%s enable timeout\n",
556 power_well->name);
557 check_fuse_status = true;
558 }
559 } else {
Damien Lespiau2a518352015-03-06 18:50:49 +0000560 if (enable_requested) {
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000561 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
562 POSTING_READ(HSW_PWR_WELL_DRIVER);
563 DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
Suketu Shahdc174302015-04-17 19:46:16 +0530564
565 if (GEN9_ENABLE_DC5(dev) &&
566 power_well->data == SKL_DISP_PW_2) {
567 enum csr_state state;
568
569 wait_for((state = intel_csr_load_status_get(dev_priv)) !=
570 FW_UNINITIALIZED, 1000);
571 if (state != FW_LOADED)
572 DRM_ERROR("CSR firmware not ready (%d)\n",
573 state);
574 else
575 gen9_enable_dc5(dev_priv);
576 }
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000577 }
578 }
579
580 if (check_fuse_status) {
581 if (power_well->data == SKL_DISP_PW_1) {
582 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
583 SKL_FUSE_PG1_DIST_STATUS), 1))
584 DRM_ERROR("PG1 distributing status timeout\n");
585 } else if (power_well->data == SKL_DISP_PW_2) {
586 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
587 SKL_FUSE_PG2_DIST_STATUS), 1))
588 DRM_ERROR("PG2 distributing status timeout\n");
589 }
590 }
Damien Lespiaud14c0342015-03-06 18:50:51 +0000591
592 if (enable && !is_enabled)
593 skl_power_well_post_enable(dev_priv, power_well);
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000594}
595
Daniel Vetter9c065a72014-09-30 10:56:38 +0200596static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
597 struct i915_power_well *power_well)
598{
599 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
600
601 /*
602 * We're taking over the BIOS, so clear any requests made by it since
603 * the driver is in charge now.
604 */
605 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
606 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
607}
608
609static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
610 struct i915_power_well *power_well)
611{
612 hsw_set_power_well(dev_priv, power_well, true);
613}
614
615static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
616 struct i915_power_well *power_well)
617{
618 hsw_set_power_well(dev_priv, power_well, false);
619}
620
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000621static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
622 struct i915_power_well *power_well)
623{
624 uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
625 SKL_POWER_WELL_STATE(power_well->data);
626
627 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
628}
629
630static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
631 struct i915_power_well *power_well)
632{
633 skl_set_power_well(dev_priv, power_well, power_well->count > 0);
634
635 /* Clear any request made by BIOS as driver is taking over */
636 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
637}
638
639static void skl_power_well_enable(struct drm_i915_private *dev_priv,
640 struct i915_power_well *power_well)
641{
642 skl_set_power_well(dev_priv, power_well, true);
643}
644
645static void skl_power_well_disable(struct drm_i915_private *dev_priv,
646 struct i915_power_well *power_well)
647{
648 skl_set_power_well(dev_priv, power_well, false);
649}
650
Daniel Vetter9c065a72014-09-30 10:56:38 +0200651static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
652 struct i915_power_well *power_well)
653{
654}
655
656static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
657 struct i915_power_well *power_well)
658{
659 return true;
660}
661
662static void vlv_set_power_well(struct drm_i915_private *dev_priv,
663 struct i915_power_well *power_well, bool enable)
664{
665 enum punit_power_well power_well_id = power_well->data;
666 u32 mask;
667 u32 state;
668 u32 ctrl;
669
670 mask = PUNIT_PWRGT_MASK(power_well_id);
671 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
672 PUNIT_PWRGT_PWR_GATE(power_well_id);
673
674 mutex_lock(&dev_priv->rps.hw_lock);
675
676#define COND \
677 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
678
679 if (COND)
680 goto out;
681
682 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
683 ctrl &= ~mask;
684 ctrl |= state;
685 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
686
687 if (wait_for(COND, 100))
688 DRM_ERROR("timout setting power well state %08x (%08x)\n",
689 state,
690 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
691
692#undef COND
693
694out:
695 mutex_unlock(&dev_priv->rps.hw_lock);
696}
697
698static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
699 struct i915_power_well *power_well)
700{
701 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
702}
703
704static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
705 struct i915_power_well *power_well)
706{
707 vlv_set_power_well(dev_priv, power_well, true);
708}
709
710static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
711 struct i915_power_well *power_well)
712{
713 vlv_set_power_well(dev_priv, power_well, false);
714}
715
716static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
717 struct i915_power_well *power_well)
718{
719 int power_well_id = power_well->data;
720 bool enabled = false;
721 u32 mask;
722 u32 state;
723 u32 ctrl;
724
725 mask = PUNIT_PWRGT_MASK(power_well_id);
726 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
727
728 mutex_lock(&dev_priv->rps.hw_lock);
729
730 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
731 /*
732 * We only ever set the power-on and power-gate states, anything
733 * else is unexpected.
734 */
735 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
736 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
737 if (state == ctrl)
738 enabled = true;
739
740 /*
741 * A transient state at this point would mean some unexpected party
742 * is poking at the power controls too.
743 */
744 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
745 WARN_ON(ctrl != state);
746
747 mutex_unlock(&dev_priv->rps.hw_lock);
748
749 return enabled;
750}
751
752static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
753 struct i915_power_well *power_well)
754{
755 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
756
757 vlv_set_power_well(dev_priv, power_well, true);
758
759 spin_lock_irq(&dev_priv->irq_lock);
760 valleyview_enable_display_irqs(dev_priv);
761 spin_unlock_irq(&dev_priv->irq_lock);
762
763 /*
764 * During driver initialization/resume we can avoid restoring the
765 * part of the HW/SW state that will be inited anyway explicitly.
766 */
767 if (dev_priv->power_domains.initializing)
768 return;
769
Daniel Vetterb9632912014-09-30 10:56:44 +0200770 intel_hpd_init(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200771
772 i915_redisable_vga_power_on(dev_priv->dev);
773}
774
775static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
776 struct i915_power_well *power_well)
777{
778 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
779
780 spin_lock_irq(&dev_priv->irq_lock);
781 valleyview_disable_display_irqs(dev_priv);
782 spin_unlock_irq(&dev_priv->irq_lock);
783
784 vlv_set_power_well(dev_priv, power_well, false);
785
786 vlv_power_sequencer_reset(dev_priv);
787}
788
789static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
790 struct i915_power_well *power_well)
791{
792 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
793
794 /*
795 * Enable the CRI clock source so we can get at the
796 * display and the reference clock for VGA
797 * hotplug / manual detection.
798 */
799 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
800 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
801 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
802
803 vlv_set_power_well(dev_priv, power_well, true);
804
805 /*
806 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
807 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
808 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
809 * b. The other bits such as sfr settings / modesel may all
810 * be set to 0.
811 *
812 * This should only be done on init and resume from S3 with
813 * both PLLs disabled, or we risk losing DPIO and PLL
814 * synchronization.
815 */
816 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
817}
818
819static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
820 struct i915_power_well *power_well)
821{
822 enum pipe pipe;
823
824 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
825
826 for_each_pipe(dev_priv, pipe)
827 assert_pll_disabled(dev_priv, pipe);
828
829 /* Assert common reset */
830 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
831
832 vlv_set_power_well(dev_priv, power_well, false);
833}
834
835static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
836 struct i915_power_well *power_well)
837{
838 enum dpio_phy phy;
839
840 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
841 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
842
843 /*
844 * Enable the CRI clock source so we can get at the
845 * display and the reference clock for VGA
846 * hotplug / manual detection.
847 */
848 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
849 phy = DPIO_PHY0;
850 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
851 DPLL_REFA_CLK_ENABLE_VLV);
852 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
853 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
854 } else {
855 phy = DPIO_PHY1;
856 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
857 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
858 }
859 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
860 vlv_set_power_well(dev_priv, power_well, true);
861
862 /* Poll for phypwrgood signal */
863 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
864 DRM_ERROR("Display PHY %d is not power up\n", phy);
865
866 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
867 PHY_COM_LANE_RESET_DEASSERT(phy));
868}
869
870static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
871 struct i915_power_well *power_well)
872{
873 enum dpio_phy phy;
874
875 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
876 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
877
878 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
879 phy = DPIO_PHY0;
880 assert_pll_disabled(dev_priv, PIPE_A);
881 assert_pll_disabled(dev_priv, PIPE_B);
882 } else {
883 phy = DPIO_PHY1;
884 assert_pll_disabled(dev_priv, PIPE_C);
885 }
886
887 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
888 ~PHY_COM_LANE_RESET_DEASSERT(phy));
889
890 vlv_set_power_well(dev_priv, power_well, false);
891}
892
893static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
894 struct i915_power_well *power_well)
895{
896 enum pipe pipe = power_well->data;
897 bool enabled;
898 u32 state, ctrl;
899
900 mutex_lock(&dev_priv->rps.hw_lock);
901
902 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
903 /*
904 * We only ever set the power-on and power-gate states, anything
905 * else is unexpected.
906 */
907 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
908 enabled = state == DP_SSS_PWR_ON(pipe);
909
910 /*
911 * A transient state at this point would mean some unexpected party
912 * is poking at the power controls too.
913 */
914 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
915 WARN_ON(ctrl << 16 != state);
916
917 mutex_unlock(&dev_priv->rps.hw_lock);
918
919 return enabled;
920}
921
922static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
923 struct i915_power_well *power_well,
924 bool enable)
925{
926 enum pipe pipe = power_well->data;
927 u32 state;
928 u32 ctrl;
929
930 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
931
932 mutex_lock(&dev_priv->rps.hw_lock);
933
934#define COND \
935 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
936
937 if (COND)
938 goto out;
939
940 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
941 ctrl &= ~DP_SSC_MASK(pipe);
942 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
943 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
944
945 if (wait_for(COND, 100))
946 DRM_ERROR("timout setting power well state %08x (%08x)\n",
947 state,
948 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
949
950#undef COND
951
952out:
953 mutex_unlock(&dev_priv->rps.hw_lock);
954}
955
956static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
957 struct i915_power_well *power_well)
958{
959 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
960}
961
962static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
963 struct i915_power_well *power_well)
964{
965 WARN_ON_ONCE(power_well->data != PIPE_A &&
966 power_well->data != PIPE_B &&
967 power_well->data != PIPE_C);
968
969 chv_set_pipe_power_well(dev_priv, power_well, true);
Ville Syrjäläafd62752014-10-30 19:43:03 +0200970
971 if (power_well->data == PIPE_A) {
972 spin_lock_irq(&dev_priv->irq_lock);
973 valleyview_enable_display_irqs(dev_priv);
974 spin_unlock_irq(&dev_priv->irq_lock);
975
976 /*
977 * During driver initialization/resume we can avoid restoring the
978 * part of the HW/SW state that will be inited anyway explicitly.
979 */
980 if (dev_priv->power_domains.initializing)
981 return;
982
983 intel_hpd_init(dev_priv);
984
985 i915_redisable_vga_power_on(dev_priv->dev);
986 }
Daniel Vetter9c065a72014-09-30 10:56:38 +0200987}
988
989static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
990 struct i915_power_well *power_well)
991{
992 WARN_ON_ONCE(power_well->data != PIPE_A &&
993 power_well->data != PIPE_B &&
994 power_well->data != PIPE_C);
995
Ville Syrjäläafd62752014-10-30 19:43:03 +0200996 if (power_well->data == PIPE_A) {
997 spin_lock_irq(&dev_priv->irq_lock);
998 valleyview_disable_display_irqs(dev_priv);
999 spin_unlock_irq(&dev_priv->irq_lock);
1000 }
1001
Daniel Vetter9c065a72014-09-30 10:56:38 +02001002 chv_set_pipe_power_well(dev_priv, power_well, false);
Ville Syrjäläbaa4e572014-10-27 16:07:32 +02001003
1004 if (power_well->data == PIPE_A)
1005 vlv_power_sequencer_reset(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001006}
1007
Daniel Vettere4e76842014-09-30 10:56:42 +02001008/**
1009 * intel_display_power_get - grab a power domain reference
1010 * @dev_priv: i915 device instance
1011 * @domain: power domain to reference
1012 *
1013 * This function grabs a power domain reference for @domain and ensures that the
1014 * power domain and all its parents are powered up. Therefore users should only
1015 * grab a reference to the innermost power domain they need.
1016 *
1017 * Any power domain reference obtained by this function must have a symmetric
1018 * call to intel_display_power_put() to release the reference again.
1019 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001020void intel_display_power_get(struct drm_i915_private *dev_priv,
1021 enum intel_display_power_domain domain)
1022{
1023 struct i915_power_domains *power_domains;
1024 struct i915_power_well *power_well;
1025 int i;
1026
1027 intel_runtime_pm_get(dev_priv);
1028
1029 power_domains = &dev_priv->power_domains;
1030
1031 mutex_lock(&power_domains->lock);
1032
1033 for_each_power_well(i, power_well, BIT(domain), power_domains) {
1034 if (!power_well->count++) {
1035 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
1036 power_well->ops->enable(dev_priv, power_well);
1037 power_well->hw_enabled = true;
1038 }
Daniel Vetter9c065a72014-09-30 10:56:38 +02001039 }
1040
1041 power_domains->domain_use_count[domain]++;
1042
1043 mutex_unlock(&power_domains->lock);
1044}
1045
Daniel Vettere4e76842014-09-30 10:56:42 +02001046/**
1047 * intel_display_power_put - release a power domain reference
1048 * @dev_priv: i915 device instance
1049 * @domain: power domain to reference
1050 *
1051 * This function drops the power domain reference obtained by
1052 * intel_display_power_get() and might power down the corresponding hardware
1053 * block right away if this is the last reference.
1054 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001055void intel_display_power_put(struct drm_i915_private *dev_priv,
1056 enum intel_display_power_domain domain)
1057{
1058 struct i915_power_domains *power_domains;
1059 struct i915_power_well *power_well;
1060 int i;
1061
1062 power_domains = &dev_priv->power_domains;
1063
1064 mutex_lock(&power_domains->lock);
1065
1066 WARN_ON(!power_domains->domain_use_count[domain]);
1067 power_domains->domain_use_count[domain]--;
1068
1069 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
1070 WARN_ON(!power_well->count);
1071
1072 if (!--power_well->count && i915.disable_power_well) {
1073 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
1074 power_well->hw_enabled = false;
1075 power_well->ops->disable(dev_priv, power_well);
1076 }
Daniel Vetter9c065a72014-09-30 10:56:38 +02001077 }
1078
1079 mutex_unlock(&power_domains->lock);
1080
1081 intel_runtime_pm_put(dev_priv);
1082}
1083
1084#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
1085
1086#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
1087 BIT(POWER_DOMAIN_PIPE_A) | \
1088 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
1089 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
1090 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
1091 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
1092 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
1093 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
1094 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
1095 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
1096 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
1097 BIT(POWER_DOMAIN_PORT_CRT) | \
1098 BIT(POWER_DOMAIN_PLLS) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +00001099 BIT(POWER_DOMAIN_AUX_A) | \
1100 BIT(POWER_DOMAIN_AUX_B) | \
1101 BIT(POWER_DOMAIN_AUX_C) | \
1102 BIT(POWER_DOMAIN_AUX_D) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +02001103 BIT(POWER_DOMAIN_INIT))
1104#define HSW_DISPLAY_POWER_DOMAINS ( \
1105 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
1106 BIT(POWER_DOMAIN_INIT))
1107
1108#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
1109 HSW_ALWAYS_ON_POWER_DOMAINS | \
1110 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
1111#define BDW_DISPLAY_POWER_DOMAINS ( \
1112 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
1113 BIT(POWER_DOMAIN_INIT))
1114
1115#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
1116#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
1117
1118#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
1119 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
1120 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
1121 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
1122 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
1123 BIT(POWER_DOMAIN_PORT_CRT) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +00001124 BIT(POWER_DOMAIN_AUX_B) | \
1125 BIT(POWER_DOMAIN_AUX_C) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +02001126 BIT(POWER_DOMAIN_INIT))
1127
1128#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
1129 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
1130 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +00001131 BIT(POWER_DOMAIN_AUX_B) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +02001132 BIT(POWER_DOMAIN_INIT))
1133
1134#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
1135 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +00001136 BIT(POWER_DOMAIN_AUX_B) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +02001137 BIT(POWER_DOMAIN_INIT))
1138
1139#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
1140 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
1141 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +00001142 BIT(POWER_DOMAIN_AUX_C) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +02001143 BIT(POWER_DOMAIN_INIT))
1144
1145#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
1146 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +00001147 BIT(POWER_DOMAIN_AUX_C) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +02001148 BIT(POWER_DOMAIN_INIT))
1149
1150#define CHV_PIPE_A_POWER_DOMAINS ( \
1151 BIT(POWER_DOMAIN_PIPE_A) | \
1152 BIT(POWER_DOMAIN_INIT))
1153
1154#define CHV_PIPE_B_POWER_DOMAINS ( \
1155 BIT(POWER_DOMAIN_PIPE_B) | \
1156 BIT(POWER_DOMAIN_INIT))
1157
1158#define CHV_PIPE_C_POWER_DOMAINS ( \
1159 BIT(POWER_DOMAIN_PIPE_C) | \
1160 BIT(POWER_DOMAIN_INIT))
1161
1162#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
1163 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
1164 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
1165 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
1166 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +00001167 BIT(POWER_DOMAIN_AUX_B) | \
1168 BIT(POWER_DOMAIN_AUX_C) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +02001169 BIT(POWER_DOMAIN_INIT))
1170
1171#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
1172 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
1173 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +00001174 BIT(POWER_DOMAIN_AUX_D) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +02001175 BIT(POWER_DOMAIN_INIT))
1176
1177#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
1178 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
1179 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +00001180 BIT(POWER_DOMAIN_AUX_D) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +02001181 BIT(POWER_DOMAIN_INIT))
1182
1183#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
1184 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +00001185 BIT(POWER_DOMAIN_AUX_D) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +02001186 BIT(POWER_DOMAIN_INIT))
1187
1188static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1189 .sync_hw = i9xx_always_on_power_well_noop,
1190 .enable = i9xx_always_on_power_well_noop,
1191 .disable = i9xx_always_on_power_well_noop,
1192 .is_enabled = i9xx_always_on_power_well_enabled,
1193};
1194
1195static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1196 .sync_hw = chv_pipe_power_well_sync_hw,
1197 .enable = chv_pipe_power_well_enable,
1198 .disable = chv_pipe_power_well_disable,
1199 .is_enabled = chv_pipe_power_well_enabled,
1200};
1201
1202static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1203 .sync_hw = vlv_power_well_sync_hw,
1204 .enable = chv_dpio_cmn_power_well_enable,
1205 .disable = chv_dpio_cmn_power_well_disable,
1206 .is_enabled = vlv_power_well_enabled,
1207};
1208
1209static struct i915_power_well i9xx_always_on_power_well[] = {
1210 {
1211 .name = "always-on",
1212 .always_on = 1,
1213 .domains = POWER_DOMAIN_MASK,
1214 .ops = &i9xx_always_on_power_well_ops,
1215 },
1216};
1217
1218static const struct i915_power_well_ops hsw_power_well_ops = {
1219 .sync_hw = hsw_power_well_sync_hw,
1220 .enable = hsw_power_well_enable,
1221 .disable = hsw_power_well_disable,
1222 .is_enabled = hsw_power_well_enabled,
1223};
1224
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00001225static const struct i915_power_well_ops skl_power_well_ops = {
1226 .sync_hw = skl_power_well_sync_hw,
1227 .enable = skl_power_well_enable,
1228 .disable = skl_power_well_disable,
1229 .is_enabled = skl_power_well_enabled,
1230};
1231
Daniel Vetter9c065a72014-09-30 10:56:38 +02001232static struct i915_power_well hsw_power_wells[] = {
1233 {
1234 .name = "always-on",
1235 .always_on = 1,
1236 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
1237 .ops = &i9xx_always_on_power_well_ops,
1238 },
1239 {
1240 .name = "display",
1241 .domains = HSW_DISPLAY_POWER_DOMAINS,
1242 .ops = &hsw_power_well_ops,
1243 },
1244};
1245
1246static struct i915_power_well bdw_power_wells[] = {
1247 {
1248 .name = "always-on",
1249 .always_on = 1,
1250 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
1251 .ops = &i9xx_always_on_power_well_ops,
1252 },
1253 {
1254 .name = "display",
1255 .domains = BDW_DISPLAY_POWER_DOMAINS,
1256 .ops = &hsw_power_well_ops,
1257 },
1258};
1259
1260static const struct i915_power_well_ops vlv_display_power_well_ops = {
1261 .sync_hw = vlv_power_well_sync_hw,
1262 .enable = vlv_display_power_well_enable,
1263 .disable = vlv_display_power_well_disable,
1264 .is_enabled = vlv_power_well_enabled,
1265};
1266
1267static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1268 .sync_hw = vlv_power_well_sync_hw,
1269 .enable = vlv_dpio_cmn_power_well_enable,
1270 .disable = vlv_dpio_cmn_power_well_disable,
1271 .is_enabled = vlv_power_well_enabled,
1272};
1273
1274static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1275 .sync_hw = vlv_power_well_sync_hw,
1276 .enable = vlv_power_well_enable,
1277 .disable = vlv_power_well_disable,
1278 .is_enabled = vlv_power_well_enabled,
1279};
1280
1281static struct i915_power_well vlv_power_wells[] = {
1282 {
1283 .name = "always-on",
1284 .always_on = 1,
1285 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1286 .ops = &i9xx_always_on_power_well_ops,
1287 },
1288 {
1289 .name = "display",
1290 .domains = VLV_DISPLAY_POWER_DOMAINS,
1291 .data = PUNIT_POWER_WELL_DISP2D,
1292 .ops = &vlv_display_power_well_ops,
1293 },
1294 {
1295 .name = "dpio-tx-b-01",
1296 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1297 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1298 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1299 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1300 .ops = &vlv_dpio_power_well_ops,
1301 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1302 },
1303 {
1304 .name = "dpio-tx-b-23",
1305 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1306 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1307 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1308 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1309 .ops = &vlv_dpio_power_well_ops,
1310 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1311 },
1312 {
1313 .name = "dpio-tx-c-01",
1314 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1315 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1316 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1317 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1318 .ops = &vlv_dpio_power_well_ops,
1319 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1320 },
1321 {
1322 .name = "dpio-tx-c-23",
1323 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1324 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1325 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1326 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1327 .ops = &vlv_dpio_power_well_ops,
1328 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1329 },
1330 {
1331 .name = "dpio-common",
1332 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
1333 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1334 .ops = &vlv_dpio_cmn_power_well_ops,
1335 },
1336};
1337
1338static struct i915_power_well chv_power_wells[] = {
1339 {
1340 .name = "always-on",
1341 .always_on = 1,
1342 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1343 .ops = &i9xx_always_on_power_well_ops,
1344 },
1345#if 0
1346 {
1347 .name = "display",
1348 .domains = VLV_DISPLAY_POWER_DOMAINS,
1349 .data = PUNIT_POWER_WELL_DISP2D,
1350 .ops = &vlv_display_power_well_ops,
1351 },
Ville Syrjäläbaa4e572014-10-27 16:07:32 +02001352#endif
Daniel Vetter9c065a72014-09-30 10:56:38 +02001353 {
1354 .name = "pipe-a",
Ville Syrjäläbaa4e572014-10-27 16:07:32 +02001355 /*
1356 * FIXME: pipe A power well seems to be the new disp2d well.
1357 * At least all registers seem to be housed there. Figure
1358 * out if this a a temporary situation in pre-production
1359 * hardware or a permanent state of affairs.
1360 */
1361 .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001362 .data = PIPE_A,
1363 .ops = &chv_pipe_power_well_ops,
1364 },
Ville Syrjäläbaa4e572014-10-27 16:07:32 +02001365#if 0
Daniel Vetter9c065a72014-09-30 10:56:38 +02001366 {
1367 .name = "pipe-b",
1368 .domains = CHV_PIPE_B_POWER_DOMAINS,
1369 .data = PIPE_B,
1370 .ops = &chv_pipe_power_well_ops,
1371 },
1372 {
1373 .name = "pipe-c",
1374 .domains = CHV_PIPE_C_POWER_DOMAINS,
1375 .data = PIPE_C,
1376 .ops = &chv_pipe_power_well_ops,
1377 },
1378#endif
1379 {
1380 .name = "dpio-common-bc",
1381 /*
1382 * XXX: cmnreset for one PHY seems to disturb the other.
1383 * As a workaround keep both powered on at the same
1384 * time for now.
1385 */
1386 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
1387 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1388 .ops = &chv_dpio_cmn_power_well_ops,
1389 },
1390 {
1391 .name = "dpio-common-d",
1392 /*
1393 * XXX: cmnreset for one PHY seems to disturb the other.
1394 * As a workaround keep both powered on at the same
1395 * time for now.
1396 */
1397 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
1398 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
1399 .ops = &chv_dpio_cmn_power_well_ops,
1400 },
1401#if 0
1402 {
1403 .name = "dpio-tx-b-01",
1404 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1405 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
1406 .ops = &vlv_dpio_power_well_ops,
1407 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1408 },
1409 {
1410 .name = "dpio-tx-b-23",
1411 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1412 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
1413 .ops = &vlv_dpio_power_well_ops,
1414 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1415 },
1416 {
1417 .name = "dpio-tx-c-01",
1418 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1419 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1420 .ops = &vlv_dpio_power_well_ops,
1421 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1422 },
1423 {
1424 .name = "dpio-tx-c-23",
1425 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1426 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1427 .ops = &vlv_dpio_power_well_ops,
1428 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1429 },
1430 {
1431 .name = "dpio-tx-d-01",
1432 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
1433 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
1434 .ops = &vlv_dpio_power_well_ops,
1435 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
1436 },
1437 {
1438 .name = "dpio-tx-d-23",
1439 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
1440 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
1441 .ops = &vlv_dpio_power_well_ops,
1442 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
1443 },
1444#endif
1445};
1446
1447static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
Suketu Shah5aefb232015-04-16 14:22:10 +05301448 int power_well_id)
Daniel Vetter9c065a72014-09-30 10:56:38 +02001449{
1450 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1451 struct i915_power_well *power_well;
1452 int i;
1453
1454 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1455 if (power_well->data == power_well_id)
1456 return power_well;
1457 }
1458
1459 return NULL;
1460}
1461
Suketu Shah5aefb232015-04-16 14:22:10 +05301462bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
1463 int power_well_id)
1464{
1465 struct i915_power_well *power_well;
1466 bool ret;
1467
1468 power_well = lookup_power_well(dev_priv, power_well_id);
1469 ret = power_well->ops->is_enabled(dev_priv, power_well);
1470
1471 return ret;
1472}
1473
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00001474static struct i915_power_well skl_power_wells[] = {
1475 {
1476 .name = "always-on",
1477 .always_on = 1,
1478 .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1479 .ops = &i9xx_always_on_power_well_ops,
1480 },
1481 {
1482 .name = "power well 1",
1483 .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1484 .ops = &skl_power_well_ops,
1485 .data = SKL_DISP_PW_1,
1486 },
1487 {
1488 .name = "MISC IO power well",
1489 .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
1490 .ops = &skl_power_well_ops,
1491 .data = SKL_DISP_PW_MISC_IO,
1492 },
1493 {
1494 .name = "power well 2",
1495 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1496 .ops = &skl_power_well_ops,
1497 .data = SKL_DISP_PW_2,
1498 },
1499 {
1500 .name = "DDI A/E power well",
1501 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
1502 .ops = &skl_power_well_ops,
1503 .data = SKL_DISP_PW_DDI_A_E,
1504 },
1505 {
1506 .name = "DDI B power well",
1507 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
1508 .ops = &skl_power_well_ops,
1509 .data = SKL_DISP_PW_DDI_B,
1510 },
1511 {
1512 .name = "DDI C power well",
1513 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
1514 .ops = &skl_power_well_ops,
1515 .data = SKL_DISP_PW_DDI_C,
1516 },
1517 {
1518 .name = "DDI D power well",
1519 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
1520 .ops = &skl_power_well_ops,
1521 .data = SKL_DISP_PW_DDI_D,
1522 },
1523};
1524
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05301525static struct i915_power_well bxt_power_wells[] = {
1526 {
1527 .name = "always-on",
1528 .always_on = 1,
1529 .domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1530 .ops = &i9xx_always_on_power_well_ops,
1531 },
1532 {
1533 .name = "power well 1",
1534 .domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1535 .ops = &skl_power_well_ops,
1536 .data = SKL_DISP_PW_1,
1537 },
1538 {
1539 .name = "power well 2",
1540 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1541 .ops = &skl_power_well_ops,
1542 .data = SKL_DISP_PW_2,
1543 }
1544};
1545
Daniel Vetter9c065a72014-09-30 10:56:38 +02001546#define set_power_wells(power_domains, __power_wells) ({ \
1547 (power_domains)->power_wells = (__power_wells); \
1548 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
1549})
1550
Daniel Vettere4e76842014-09-30 10:56:42 +02001551/**
1552 * intel_power_domains_init - initializes the power domain structures
1553 * @dev_priv: i915 device instance
1554 *
1555 * Initializes the power domain structures for @dev_priv depending upon the
1556 * supported platform.
1557 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001558int intel_power_domains_init(struct drm_i915_private *dev_priv)
1559{
1560 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1561
1562 mutex_init(&power_domains->lock);
1563
1564 /*
1565 * The enabling order will be from lower to higher indexed wells,
1566 * the disabling order is reversed.
1567 */
1568 if (IS_HASWELL(dev_priv->dev)) {
1569 set_power_wells(power_domains, hsw_power_wells);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001570 } else if (IS_BROADWELL(dev_priv->dev)) {
1571 set_power_wells(power_domains, bdw_power_wells);
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00001572 } else if (IS_SKYLAKE(dev_priv->dev)) {
1573 set_power_wells(power_domains, skl_power_wells);
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05301574 } else if (IS_BROXTON(dev_priv->dev)) {
1575 set_power_wells(power_domains, bxt_power_wells);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001576 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1577 set_power_wells(power_domains, chv_power_wells);
1578 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
1579 set_power_wells(power_domains, vlv_power_wells);
1580 } else {
1581 set_power_wells(power_domains, i9xx_always_on_power_well);
1582 }
1583
1584 return 0;
1585}
1586
Daniel Vetter41373cd2014-09-30 10:56:41 +02001587static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1588{
1589 struct drm_device *dev = dev_priv->dev;
1590 struct device *device = &dev->pdev->dev;
1591
1592 if (!HAS_RUNTIME_PM(dev))
1593 return;
1594
1595 if (!intel_enable_rc6(dev))
1596 return;
1597
1598 /* Make sure we're not suspended first. */
1599 pm_runtime_get_sync(device);
1600 pm_runtime_disable(device);
1601}
1602
Daniel Vettere4e76842014-09-30 10:56:42 +02001603/**
1604 * intel_power_domains_fini - finalizes the power domain structures
1605 * @dev_priv: i915 device instance
1606 *
1607 * Finalizes the power domain structures for @dev_priv depending upon the
1608 * supported platform. This function also disables runtime pm and ensures that
1609 * the device stays powered up so that the driver can be reloaded.
1610 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +02001611void intel_power_domains_fini(struct drm_i915_private *dev_priv)
Daniel Vetter9c065a72014-09-30 10:56:38 +02001612{
Daniel Vetter41373cd2014-09-30 10:56:41 +02001613 intel_runtime_pm_disable(dev_priv);
1614
Daniel Vetterf458ebb2014-09-30 10:56:39 +02001615 /* The i915.ko module is still not prepared to be loaded when
1616 * the power well is not enabled, so just enable it in case
1617 * we're going to unload/reload. */
1618 intel_display_set_init_power(dev_priv, true);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001619}
1620
1621static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1622{
1623 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1624 struct i915_power_well *power_well;
1625 int i;
1626
1627 mutex_lock(&power_domains->lock);
1628 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1629 power_well->ops->sync_hw(dev_priv, power_well);
1630 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
1631 power_well);
1632 }
1633 mutex_unlock(&power_domains->lock);
1634}
1635
1636static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1637{
1638 struct i915_power_well *cmn =
1639 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1640 struct i915_power_well *disp2d =
1641 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
1642
Daniel Vetter9c065a72014-09-30 10:56:38 +02001643 /* If the display might be already active skip this */
Ville Syrjälä5d93a6e2014-10-16 20:52:33 +03001644 if (cmn->ops->is_enabled(dev_priv, cmn) &&
1645 disp2d->ops->is_enabled(dev_priv, disp2d) &&
Daniel Vetter9c065a72014-09-30 10:56:38 +02001646 I915_READ(DPIO_CTL) & DPIO_CMNRST)
1647 return;
1648
1649 DRM_DEBUG_KMS("toggling display PHY side reset\n");
1650
1651 /* cmnlane needs DPLL registers */
1652 disp2d->ops->enable(dev_priv, disp2d);
1653
1654 /*
1655 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1656 * Need to assert and de-assert PHY SB reset by gating the
1657 * common lane power, then un-gating it.
1658 * Simply ungating isn't enough to reset the PHY enough to get
1659 * ports and lanes running.
1660 */
1661 cmn->ops->disable(dev_priv, cmn);
1662}
1663
Daniel Vettere4e76842014-09-30 10:56:42 +02001664/**
1665 * intel_power_domains_init_hw - initialize hardware power domain state
1666 * @dev_priv: i915 device instance
1667 *
1668 * This function initializes the hardware power domain state and enables all
1669 * power domains using intel_display_set_init_power().
1670 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001671void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
1672{
1673 struct drm_device *dev = dev_priv->dev;
1674 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1675
1676 power_domains->initializing = true;
1677
1678 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
1679 mutex_lock(&power_domains->lock);
1680 vlv_cmnlane_wa(dev_priv);
1681 mutex_unlock(&power_domains->lock);
1682 }
1683
1684 /* For now, we need the power well to be always enabled. */
1685 intel_display_set_init_power(dev_priv, true);
1686 intel_power_domains_resume(dev_priv);
1687 power_domains->initializing = false;
1688}
1689
Daniel Vettere4e76842014-09-30 10:56:42 +02001690/**
Geert Uytterhoevenca2b1402015-03-09 21:21:08 +01001691 * intel_aux_display_runtime_get - grab an auxiliary power domain reference
Daniel Vettere4e76842014-09-30 10:56:42 +02001692 * @dev_priv: i915 device instance
1693 *
1694 * This function grabs a power domain reference for the auxiliary power domain
1695 * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
1696 * parents are powered up. Therefore users should only grab a reference to the
1697 * innermost power domain they need.
1698 *
1699 * Any power domain reference obtained by this function must have a symmetric
1700 * call to intel_aux_display_runtime_put() to release the reference again.
1701 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001702void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
1703{
1704 intel_runtime_pm_get(dev_priv);
1705}
1706
Daniel Vettere4e76842014-09-30 10:56:42 +02001707/**
Geert Uytterhoevenca2b1402015-03-09 21:21:08 +01001708 * intel_aux_display_runtime_put - release an auxiliary power domain reference
Daniel Vettere4e76842014-09-30 10:56:42 +02001709 * @dev_priv: i915 device instance
1710 *
Geert Uytterhoevenca2b1402015-03-09 21:21:08 +01001711 * This function drops the auxiliary power domain reference obtained by
Daniel Vettere4e76842014-09-30 10:56:42 +02001712 * intel_aux_display_runtime_get() and might power down the corresponding
1713 * hardware block right away if this is the last reference.
1714 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001715void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
1716{
1717 intel_runtime_pm_put(dev_priv);
1718}
1719
Daniel Vettere4e76842014-09-30 10:56:42 +02001720/**
1721 * intel_runtime_pm_get - grab a runtime pm reference
1722 * @dev_priv: i915 device instance
1723 *
1724 * This function grabs a device-level runtime pm reference (mostly used for GEM
1725 * code to ensure the GTT or GT is on) and ensures that it is powered up.
1726 *
1727 * Any runtime pm reference obtained by this function must have a symmetric
1728 * call to intel_runtime_pm_put() to release the reference again.
1729 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001730void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
1731{
1732 struct drm_device *dev = dev_priv->dev;
1733 struct device *device = &dev->pdev->dev;
1734
1735 if (!HAS_RUNTIME_PM(dev))
1736 return;
1737
1738 pm_runtime_get_sync(device);
1739 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
1740}
1741
Daniel Vettere4e76842014-09-30 10:56:42 +02001742/**
1743 * intel_runtime_pm_get_noresume - grab a runtime pm reference
1744 * @dev_priv: i915 device instance
1745 *
1746 * This function grabs a device-level runtime pm reference (mostly used for GEM
1747 * code to ensure the GTT or GT is on).
1748 *
1749 * It will _not_ power up the device but instead only check that it's powered
1750 * on. Therefore it is only valid to call this functions from contexts where
1751 * the device is known to be powered up and where trying to power it up would
1752 * result in hilarity and deadlocks. That pretty much means only the system
1753 * suspend/resume code where this is used to grab runtime pm references for
1754 * delayed setup down in work items.
1755 *
1756 * Any runtime pm reference obtained by this function must have a symmetric
1757 * call to intel_runtime_pm_put() to release the reference again.
1758 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001759void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
1760{
1761 struct drm_device *dev = dev_priv->dev;
1762 struct device *device = &dev->pdev->dev;
1763
1764 if (!HAS_RUNTIME_PM(dev))
1765 return;
1766
1767 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
1768 pm_runtime_get_noresume(device);
1769}
1770
Daniel Vettere4e76842014-09-30 10:56:42 +02001771/**
1772 * intel_runtime_pm_put - release a runtime pm reference
1773 * @dev_priv: i915 device instance
1774 *
1775 * This function drops the device-level runtime pm reference obtained by
1776 * intel_runtime_pm_get() and might power down the corresponding
1777 * hardware block right away if this is the last reference.
1778 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001779void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
1780{
1781 struct drm_device *dev = dev_priv->dev;
1782 struct device *device = &dev->pdev->dev;
1783
1784 if (!HAS_RUNTIME_PM(dev))
1785 return;
1786
1787 pm_runtime_mark_last_busy(device);
1788 pm_runtime_put_autosuspend(device);
1789}
1790
Daniel Vettere4e76842014-09-30 10:56:42 +02001791/**
1792 * intel_runtime_pm_enable - enable runtime pm
1793 * @dev_priv: i915 device instance
1794 *
1795 * This function enables runtime pm at the end of the driver load sequence.
1796 *
1797 * Note that this function does currently not enable runtime pm for the
1798 * subordinate display power domains. That is only done on the first modeset
1799 * using intel_display_set_init_power().
1800 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +02001801void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
Daniel Vetter9c065a72014-09-30 10:56:38 +02001802{
1803 struct drm_device *dev = dev_priv->dev;
1804 struct device *device = &dev->pdev->dev;
1805
1806 if (!HAS_RUNTIME_PM(dev))
1807 return;
1808
1809 pm_runtime_set_active(device);
1810
1811 /*
1812 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
1813 * requirement.
1814 */
1815 if (!intel_enable_rc6(dev)) {
1816 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
1817 return;
1818 }
1819
1820 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
1821 pm_runtime_mark_last_busy(device);
1822 pm_runtime_use_autosuspend(device);
1823
1824 pm_runtime_put_autosuspend(device);
1825}
1826