blob: 35e0cb60b0acc1e0f52b20d35d9cb675a884f4a3 [file] [log] [blame]
Daniel Vetter9c065a72014-09-30 10:56:38 +02001/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29#include <linux/pm_runtime.h>
30#include <linux/vgaarb.h>
31
32#include "i915_drv.h"
33#include "intel_drv.h"
Daniel Vetter9c065a72014-09-30 10:56:38 +020034
Daniel Vettere4e76842014-09-30 10:56:42 +020035/**
36 * DOC: runtime pm
37 *
38 * The i915 driver supports dynamic enabling and disabling of entire hardware
39 * blocks at runtime. This is especially important on the display side where
40 * software is supposed to control many power gates manually on recent hardware,
41 * since on the GT side a lot of the power management is done by the hardware.
42 * But even there some manual control at the device level is required.
43 *
44 * Since i915 supports a diverse set of platforms with a unified codebase and
45 * hardware engineers just love to shuffle functionality around between power
46 * domains there's a sizeable amount of indirection required. This file provides
47 * generic functions to the driver for grabbing and releasing references for
48 * abstract power domains. It then maps those to the actual power wells
49 * present for a given platform.
50 */
51
Daniel Vetter9c065a72014-09-30 10:56:38 +020052#define for_each_power_well(i, power_well, domain_mask, power_domains) \
53 for (i = 0; \
54 i < (power_domains)->power_well_count && \
55 ((power_well) = &(power_domains)->power_wells[i]); \
56 i++) \
57 if ((power_well)->domains & (domain_mask))
58
59#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
60 for (i = (power_domains)->power_well_count - 1; \
61 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
62 i--) \
63 if ((power_well)->domains & (domain_mask))
64
Daniel Vettere4e76842014-09-30 10:56:42 +020065/*
Daniel Vetter9c065a72014-09-30 10:56:38 +020066 * We should only use the power well if we explicitly asked the hardware to
67 * enable it, so check if it's enabled and also check if we've requested it to
68 * be enabled.
69 */
70static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
71 struct i915_power_well *power_well)
72{
73 return I915_READ(HSW_PWR_WELL_DRIVER) ==
74 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
75}
76
Daniel Vettere4e76842014-09-30 10:56:42 +020077/**
78 * __intel_display_power_is_enabled - unlocked check for a power domain
79 * @dev_priv: i915 device instance
80 * @domain: power domain to check
81 *
82 * This is the unlocked version of intel_display_power_is_enabled() and should
83 * only be used from error capture and recovery code where deadlocks are
84 * possible.
85 *
86 * Returns:
87 * True when the power domain is enabled, false otherwise.
88 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +020089bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
90 enum intel_display_power_domain domain)
Daniel Vetter9c065a72014-09-30 10:56:38 +020091{
92 struct i915_power_domains *power_domains;
93 struct i915_power_well *power_well;
94 bool is_enabled;
95 int i;
96
97 if (dev_priv->pm.suspended)
98 return false;
99
100 power_domains = &dev_priv->power_domains;
101
102 is_enabled = true;
103
104 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
105 if (power_well->always_on)
106 continue;
107
108 if (!power_well->hw_enabled) {
109 is_enabled = false;
110 break;
111 }
112 }
113
114 return is_enabled;
115}
116
Daniel Vettere4e76842014-09-30 10:56:42 +0200117/**
Damien Lespiauf61ccae2014-11-25 13:45:41 +0000118 * intel_display_power_is_enabled - check for a power domain
Daniel Vettere4e76842014-09-30 10:56:42 +0200119 * @dev_priv: i915 device instance
120 * @domain: power domain to check
121 *
122 * This function can be used to check the hw power domain state. It is mostly
123 * used in hardware state readout functions. Everywhere else code should rely
124 * upon explicit power domain reference counting to ensure that the hardware
125 * block is powered up before accessing it.
126 *
127 * Callers must hold the relevant modesetting locks to ensure that concurrent
128 * threads can't disable the power well while the caller tries to read a few
129 * registers.
130 *
131 * Returns:
132 * True when the power domain is enabled, false otherwise.
133 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +0200134bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
135 enum intel_display_power_domain domain)
Daniel Vetter9c065a72014-09-30 10:56:38 +0200136{
137 struct i915_power_domains *power_domains;
138 bool ret;
139
140 power_domains = &dev_priv->power_domains;
141
142 mutex_lock(&power_domains->lock);
Daniel Vetterf458ebb2014-09-30 10:56:39 +0200143 ret = __intel_display_power_is_enabled(dev_priv, domain);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200144 mutex_unlock(&power_domains->lock);
145
146 return ret;
147}
148
Daniel Vettere4e76842014-09-30 10:56:42 +0200149/**
150 * intel_display_set_init_power - set the initial power domain state
151 * @dev_priv: i915 device instance
152 * @enable: whether to enable or disable the initial power domain state
153 *
154 * For simplicity our driver load/unload and system suspend/resume code assumes
155 * that all power domains are always enabled. This functions controls the state
156 * of this little hack. While the initial power domain state is enabled runtime
157 * pm is effectively disabled.
158 */
Daniel Vetterd9bc89d92014-09-30 10:56:40 +0200159void intel_display_set_init_power(struct drm_i915_private *dev_priv,
160 bool enable)
161{
162 if (dev_priv->power_domains.init_power_on == enable)
163 return;
164
165 if (enable)
166 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
167 else
168 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
169
170 dev_priv->power_domains.init_power_on = enable;
171}
172
Daniel Vetter9c065a72014-09-30 10:56:38 +0200173/*
174 * Starting with Haswell, we have a "Power Down Well" that can be turned off
175 * when not needed anymore. We have 4 registers that can request the power well
176 * to be enabled, and it will only be disabled if none of the registers is
177 * requesting it to be enabled.
178 */
179static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
180{
181 struct drm_device *dev = dev_priv->dev;
182
183 /*
184 * After we re-enable the power well, if we touch VGA register 0x3d5
185 * we'll get unclaimed register interrupts. This stops after we write
186 * anything to the VGA MSR register. The vgacon module uses this
187 * register all the time, so if we unbind our driver and, as a
188 * consequence, bind vgacon, we'll get stuck in an infinite loop at
189 * console_unlock(). So make here we touch the VGA MSR register, making
190 * sure vgacon can keep working normally without triggering interrupts
191 * and error messages.
192 */
193 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
194 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
195 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
196
197 if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
Damien Lespiau4c6c03b2015-03-06 18:50:48 +0000198 gen8_irq_power_well_post_enable(dev_priv,
199 1 << PIPE_C | 1 << PIPE_B);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200200}
201
202static void hsw_set_power_well(struct drm_i915_private *dev_priv,
203 struct i915_power_well *power_well, bool enable)
204{
205 bool is_enabled, enable_requested;
206 uint32_t tmp;
207
208 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
209 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
210 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
211
212 if (enable) {
213 if (!enable_requested)
214 I915_WRITE(HSW_PWR_WELL_DRIVER,
215 HSW_PWR_WELL_ENABLE_REQUEST);
216
217 if (!is_enabled) {
218 DRM_DEBUG_KMS("Enabling power well\n");
219 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
220 HSW_PWR_WELL_STATE_ENABLED), 20))
221 DRM_ERROR("Timeout enabling power well\n");
Paulo Zanoni6d729bf2014-10-07 16:11:11 -0300222 hsw_power_well_post_enable(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200223 }
224
Daniel Vetter9c065a72014-09-30 10:56:38 +0200225 } else {
226 if (enable_requested) {
227 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
228 POSTING_READ(HSW_PWR_WELL_DRIVER);
229 DRM_DEBUG_KMS("Requesting to disable the power well\n");
230 }
231 }
232}
233
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000234#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
235 BIT(POWER_DOMAIN_TRANSCODER_A) | \
236 BIT(POWER_DOMAIN_PIPE_B) | \
237 BIT(POWER_DOMAIN_TRANSCODER_B) | \
238 BIT(POWER_DOMAIN_PIPE_C) | \
239 BIT(POWER_DOMAIN_TRANSCODER_C) | \
240 BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
241 BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
242 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
243 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
244 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
245 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
246 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
247 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
248 BIT(POWER_DOMAIN_AUX_B) | \
249 BIT(POWER_DOMAIN_AUX_C) | \
250 BIT(POWER_DOMAIN_AUX_D) | \
251 BIT(POWER_DOMAIN_AUDIO) | \
252 BIT(POWER_DOMAIN_VGA) | \
253 BIT(POWER_DOMAIN_INIT))
254#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
255 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
256 BIT(POWER_DOMAIN_PLLS) | \
257 BIT(POWER_DOMAIN_PIPE_A) | \
258 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
259 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
260 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
261 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
262 BIT(POWER_DOMAIN_AUX_A) | \
263 BIT(POWER_DOMAIN_INIT))
264#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
265 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
266 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
267 BIT(POWER_DOMAIN_INIT))
268#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
269 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
270 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
271 BIT(POWER_DOMAIN_INIT))
272#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
273 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
274 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
275 BIT(POWER_DOMAIN_INIT))
276#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
277 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
278 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
279 BIT(POWER_DOMAIN_INIT))
280#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
281 SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS)
282#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
283 (POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
284 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
285 SKL_DISPLAY_DDI_A_E_POWER_DOMAINS | \
286 SKL_DISPLAY_DDI_B_POWER_DOMAINS | \
287 SKL_DISPLAY_DDI_C_POWER_DOMAINS | \
288 SKL_DISPLAY_DDI_D_POWER_DOMAINS | \
289 SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
290 BIT(POWER_DOMAIN_INIT))
291
292static void skl_set_power_well(struct drm_i915_private *dev_priv,
293 struct i915_power_well *power_well, bool enable)
294{
295 uint32_t tmp, fuse_status;
296 uint32_t req_mask, state_mask;
297 bool check_fuse_status = false;
298
299 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
300 fuse_status = I915_READ(SKL_FUSE_STATUS);
301
302 switch (power_well->data) {
303 case SKL_DISP_PW_1:
304 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
305 SKL_FUSE_PG0_DIST_STATUS), 1)) {
306 DRM_ERROR("PG0 not enabled\n");
307 return;
308 }
309 break;
310 case SKL_DISP_PW_2:
311 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
312 DRM_ERROR("PG1 in disabled state\n");
313 return;
314 }
315 break;
316 case SKL_DISP_PW_DDI_A_E:
317 case SKL_DISP_PW_DDI_B:
318 case SKL_DISP_PW_DDI_C:
319 case SKL_DISP_PW_DDI_D:
320 case SKL_DISP_PW_MISC_IO:
321 break;
322 default:
323 WARN(1, "Unknown power well %lu\n", power_well->data);
324 return;
325 }
326
327 req_mask = SKL_POWER_WELL_REQ(power_well->data);
328 state_mask = SKL_POWER_WELL_STATE(power_well->data);
329
330 if (enable) {
331 if (!(tmp & req_mask)) {
332 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
333 DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
334 }
335
336 if (!(tmp & state_mask)) {
337 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
338 state_mask), 1))
339 DRM_ERROR("%s enable timeout\n",
340 power_well->name);
341 check_fuse_status = true;
342 }
343 } else {
344 if (tmp & req_mask) {
345 I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
346 POSTING_READ(HSW_PWR_WELL_DRIVER);
347 DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
348 }
349 }
350
351 if (check_fuse_status) {
352 if (power_well->data == SKL_DISP_PW_1) {
353 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
354 SKL_FUSE_PG1_DIST_STATUS), 1))
355 DRM_ERROR("PG1 distributing status timeout\n");
356 } else if (power_well->data == SKL_DISP_PW_2) {
357 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
358 SKL_FUSE_PG2_DIST_STATUS), 1))
359 DRM_ERROR("PG2 distributing status timeout\n");
360 }
361 }
362}
363
Daniel Vetter9c065a72014-09-30 10:56:38 +0200364static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
365 struct i915_power_well *power_well)
366{
367 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
368
369 /*
370 * We're taking over the BIOS, so clear any requests made by it since
371 * the driver is in charge now.
372 */
373 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
374 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
375}
376
377static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
378 struct i915_power_well *power_well)
379{
380 hsw_set_power_well(dev_priv, power_well, true);
381}
382
383static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
384 struct i915_power_well *power_well)
385{
386 hsw_set_power_well(dev_priv, power_well, false);
387}
388
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000389static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
390 struct i915_power_well *power_well)
391{
392 uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
393 SKL_POWER_WELL_STATE(power_well->data);
394
395 return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
396}
397
398static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
399 struct i915_power_well *power_well)
400{
401 skl_set_power_well(dev_priv, power_well, power_well->count > 0);
402
403 /* Clear any request made by BIOS as driver is taking over */
404 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
405}
406
407static void skl_power_well_enable(struct drm_i915_private *dev_priv,
408 struct i915_power_well *power_well)
409{
410 skl_set_power_well(dev_priv, power_well, true);
411}
412
413static void skl_power_well_disable(struct drm_i915_private *dev_priv,
414 struct i915_power_well *power_well)
415{
416 skl_set_power_well(dev_priv, power_well, false);
417}
418
Daniel Vetter9c065a72014-09-30 10:56:38 +0200419static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
420 struct i915_power_well *power_well)
421{
422}
423
424static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
425 struct i915_power_well *power_well)
426{
427 return true;
428}
429
430static void vlv_set_power_well(struct drm_i915_private *dev_priv,
431 struct i915_power_well *power_well, bool enable)
432{
433 enum punit_power_well power_well_id = power_well->data;
434 u32 mask;
435 u32 state;
436 u32 ctrl;
437
438 mask = PUNIT_PWRGT_MASK(power_well_id);
439 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
440 PUNIT_PWRGT_PWR_GATE(power_well_id);
441
442 mutex_lock(&dev_priv->rps.hw_lock);
443
444#define COND \
445 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
446
447 if (COND)
448 goto out;
449
450 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
451 ctrl &= ~mask;
452 ctrl |= state;
453 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
454
455 if (wait_for(COND, 100))
456 DRM_ERROR("timout setting power well state %08x (%08x)\n",
457 state,
458 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
459
460#undef COND
461
462out:
463 mutex_unlock(&dev_priv->rps.hw_lock);
464}
465
466static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
467 struct i915_power_well *power_well)
468{
469 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
470}
471
472static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
473 struct i915_power_well *power_well)
474{
475 vlv_set_power_well(dev_priv, power_well, true);
476}
477
478static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
479 struct i915_power_well *power_well)
480{
481 vlv_set_power_well(dev_priv, power_well, false);
482}
483
484static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
485 struct i915_power_well *power_well)
486{
487 int power_well_id = power_well->data;
488 bool enabled = false;
489 u32 mask;
490 u32 state;
491 u32 ctrl;
492
493 mask = PUNIT_PWRGT_MASK(power_well_id);
494 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
495
496 mutex_lock(&dev_priv->rps.hw_lock);
497
498 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
499 /*
500 * We only ever set the power-on and power-gate states, anything
501 * else is unexpected.
502 */
503 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
504 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
505 if (state == ctrl)
506 enabled = true;
507
508 /*
509 * A transient state at this point would mean some unexpected party
510 * is poking at the power controls too.
511 */
512 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
513 WARN_ON(ctrl != state);
514
515 mutex_unlock(&dev_priv->rps.hw_lock);
516
517 return enabled;
518}
519
520static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
521 struct i915_power_well *power_well)
522{
523 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
524
525 vlv_set_power_well(dev_priv, power_well, true);
526
527 spin_lock_irq(&dev_priv->irq_lock);
528 valleyview_enable_display_irqs(dev_priv);
529 spin_unlock_irq(&dev_priv->irq_lock);
530
531 /*
532 * During driver initialization/resume we can avoid restoring the
533 * part of the HW/SW state that will be inited anyway explicitly.
534 */
535 if (dev_priv->power_domains.initializing)
536 return;
537
Daniel Vetterb9632912014-09-30 10:56:44 +0200538 intel_hpd_init(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200539
540 i915_redisable_vga_power_on(dev_priv->dev);
541}
542
543static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
544 struct i915_power_well *power_well)
545{
546 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
547
548 spin_lock_irq(&dev_priv->irq_lock);
549 valleyview_disable_display_irqs(dev_priv);
550 spin_unlock_irq(&dev_priv->irq_lock);
551
552 vlv_set_power_well(dev_priv, power_well, false);
553
554 vlv_power_sequencer_reset(dev_priv);
555}
556
557static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
558 struct i915_power_well *power_well)
559{
560 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
561
562 /*
563 * Enable the CRI clock source so we can get at the
564 * display and the reference clock for VGA
565 * hotplug / manual detection.
566 */
567 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
568 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
569 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
570
571 vlv_set_power_well(dev_priv, power_well, true);
572
573 /*
574 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
575 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
576 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
577 * b. The other bits such as sfr settings / modesel may all
578 * be set to 0.
579 *
580 * This should only be done on init and resume from S3 with
581 * both PLLs disabled, or we risk losing DPIO and PLL
582 * synchronization.
583 */
584 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
585}
586
587static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
588 struct i915_power_well *power_well)
589{
590 enum pipe pipe;
591
592 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
593
594 for_each_pipe(dev_priv, pipe)
595 assert_pll_disabled(dev_priv, pipe);
596
597 /* Assert common reset */
598 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
599
600 vlv_set_power_well(dev_priv, power_well, false);
601}
602
603static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
604 struct i915_power_well *power_well)
605{
606 enum dpio_phy phy;
607
608 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
609 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
610
611 /*
612 * Enable the CRI clock source so we can get at the
613 * display and the reference clock for VGA
614 * hotplug / manual detection.
615 */
616 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
617 phy = DPIO_PHY0;
618 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
619 DPLL_REFA_CLK_ENABLE_VLV);
620 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
621 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
622 } else {
623 phy = DPIO_PHY1;
624 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
625 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
626 }
627 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
628 vlv_set_power_well(dev_priv, power_well, true);
629
630 /* Poll for phypwrgood signal */
631 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
632 DRM_ERROR("Display PHY %d is not power up\n", phy);
633
634 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
635 PHY_COM_LANE_RESET_DEASSERT(phy));
636}
637
638static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
639 struct i915_power_well *power_well)
640{
641 enum dpio_phy phy;
642
643 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
644 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
645
646 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
647 phy = DPIO_PHY0;
648 assert_pll_disabled(dev_priv, PIPE_A);
649 assert_pll_disabled(dev_priv, PIPE_B);
650 } else {
651 phy = DPIO_PHY1;
652 assert_pll_disabled(dev_priv, PIPE_C);
653 }
654
655 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
656 ~PHY_COM_LANE_RESET_DEASSERT(phy));
657
658 vlv_set_power_well(dev_priv, power_well, false);
659}
660
661static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
662 struct i915_power_well *power_well)
663{
664 enum pipe pipe = power_well->data;
665 bool enabled;
666 u32 state, ctrl;
667
668 mutex_lock(&dev_priv->rps.hw_lock);
669
670 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
671 /*
672 * We only ever set the power-on and power-gate states, anything
673 * else is unexpected.
674 */
675 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
676 enabled = state == DP_SSS_PWR_ON(pipe);
677
678 /*
679 * A transient state at this point would mean some unexpected party
680 * is poking at the power controls too.
681 */
682 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
683 WARN_ON(ctrl << 16 != state);
684
685 mutex_unlock(&dev_priv->rps.hw_lock);
686
687 return enabled;
688}
689
690static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
691 struct i915_power_well *power_well,
692 bool enable)
693{
694 enum pipe pipe = power_well->data;
695 u32 state;
696 u32 ctrl;
697
698 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
699
700 mutex_lock(&dev_priv->rps.hw_lock);
701
702#define COND \
703 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
704
705 if (COND)
706 goto out;
707
708 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
709 ctrl &= ~DP_SSC_MASK(pipe);
710 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
711 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
712
713 if (wait_for(COND, 100))
714 DRM_ERROR("timout setting power well state %08x (%08x)\n",
715 state,
716 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
717
718#undef COND
719
720out:
721 mutex_unlock(&dev_priv->rps.hw_lock);
722}
723
724static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
725 struct i915_power_well *power_well)
726{
727 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
728}
729
730static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
731 struct i915_power_well *power_well)
732{
733 WARN_ON_ONCE(power_well->data != PIPE_A &&
734 power_well->data != PIPE_B &&
735 power_well->data != PIPE_C);
736
737 chv_set_pipe_power_well(dev_priv, power_well, true);
Ville Syrjäläafd62752014-10-30 19:43:03 +0200738
739 if (power_well->data == PIPE_A) {
740 spin_lock_irq(&dev_priv->irq_lock);
741 valleyview_enable_display_irqs(dev_priv);
742 spin_unlock_irq(&dev_priv->irq_lock);
743
744 /*
745 * During driver initialization/resume we can avoid restoring the
746 * part of the HW/SW state that will be inited anyway explicitly.
747 */
748 if (dev_priv->power_domains.initializing)
749 return;
750
751 intel_hpd_init(dev_priv);
752
753 i915_redisable_vga_power_on(dev_priv->dev);
754 }
Daniel Vetter9c065a72014-09-30 10:56:38 +0200755}
756
757static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
758 struct i915_power_well *power_well)
759{
760 WARN_ON_ONCE(power_well->data != PIPE_A &&
761 power_well->data != PIPE_B &&
762 power_well->data != PIPE_C);
763
Ville Syrjäläafd62752014-10-30 19:43:03 +0200764 if (power_well->data == PIPE_A) {
765 spin_lock_irq(&dev_priv->irq_lock);
766 valleyview_disable_display_irqs(dev_priv);
767 spin_unlock_irq(&dev_priv->irq_lock);
768 }
769
Daniel Vetter9c065a72014-09-30 10:56:38 +0200770 chv_set_pipe_power_well(dev_priv, power_well, false);
Ville Syrjäläbaa4e572014-10-27 16:07:32 +0200771
772 if (power_well->data == PIPE_A)
773 vlv_power_sequencer_reset(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200774}
775
Daniel Vettere4e76842014-09-30 10:56:42 +0200776/**
777 * intel_display_power_get - grab a power domain reference
778 * @dev_priv: i915 device instance
779 * @domain: power domain to reference
780 *
781 * This function grabs a power domain reference for @domain and ensures that the
782 * power domain and all its parents are powered up. Therefore users should only
783 * grab a reference to the innermost power domain they need.
784 *
785 * Any power domain reference obtained by this function must have a symmetric
786 * call to intel_display_power_put() to release the reference again.
787 */
Daniel Vetter9c065a72014-09-30 10:56:38 +0200788void intel_display_power_get(struct drm_i915_private *dev_priv,
789 enum intel_display_power_domain domain)
790{
791 struct i915_power_domains *power_domains;
792 struct i915_power_well *power_well;
793 int i;
794
795 intel_runtime_pm_get(dev_priv);
796
797 power_domains = &dev_priv->power_domains;
798
799 mutex_lock(&power_domains->lock);
800
801 for_each_power_well(i, power_well, BIT(domain), power_domains) {
802 if (!power_well->count++) {
803 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
804 power_well->ops->enable(dev_priv, power_well);
805 power_well->hw_enabled = true;
806 }
Daniel Vetter9c065a72014-09-30 10:56:38 +0200807 }
808
809 power_domains->domain_use_count[domain]++;
810
811 mutex_unlock(&power_domains->lock);
812}
813
Daniel Vettere4e76842014-09-30 10:56:42 +0200814/**
815 * intel_display_power_put - release a power domain reference
816 * @dev_priv: i915 device instance
817 * @domain: power domain to reference
818 *
819 * This function drops the power domain reference obtained by
820 * intel_display_power_get() and might power down the corresponding hardware
821 * block right away if this is the last reference.
822 */
Daniel Vetter9c065a72014-09-30 10:56:38 +0200823void intel_display_power_put(struct drm_i915_private *dev_priv,
824 enum intel_display_power_domain domain)
825{
826 struct i915_power_domains *power_domains;
827 struct i915_power_well *power_well;
828 int i;
829
830 power_domains = &dev_priv->power_domains;
831
832 mutex_lock(&power_domains->lock);
833
834 WARN_ON(!power_domains->domain_use_count[domain]);
835 power_domains->domain_use_count[domain]--;
836
837 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
838 WARN_ON(!power_well->count);
839
840 if (!--power_well->count && i915.disable_power_well) {
841 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
842 power_well->hw_enabled = false;
843 power_well->ops->disable(dev_priv, power_well);
844 }
Daniel Vetter9c065a72014-09-30 10:56:38 +0200845 }
846
847 mutex_unlock(&power_domains->lock);
848
849 intel_runtime_pm_put(dev_priv);
850}
851
852#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
853
854#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
855 BIT(POWER_DOMAIN_PIPE_A) | \
856 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
857 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
858 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
859 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
860 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
861 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
862 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
863 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
864 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
865 BIT(POWER_DOMAIN_PORT_CRT) | \
866 BIT(POWER_DOMAIN_PLLS) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +0000867 BIT(POWER_DOMAIN_AUX_A) | \
868 BIT(POWER_DOMAIN_AUX_B) | \
869 BIT(POWER_DOMAIN_AUX_C) | \
870 BIT(POWER_DOMAIN_AUX_D) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +0200871 BIT(POWER_DOMAIN_INIT))
872#define HSW_DISPLAY_POWER_DOMAINS ( \
873 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
874 BIT(POWER_DOMAIN_INIT))
875
876#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
877 HSW_ALWAYS_ON_POWER_DOMAINS | \
878 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
879#define BDW_DISPLAY_POWER_DOMAINS ( \
880 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
881 BIT(POWER_DOMAIN_INIT))
882
883#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
884#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
885
886#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
887 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
888 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
889 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
890 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
891 BIT(POWER_DOMAIN_PORT_CRT) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +0000892 BIT(POWER_DOMAIN_AUX_B) | \
893 BIT(POWER_DOMAIN_AUX_C) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +0200894 BIT(POWER_DOMAIN_INIT))
895
896#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
897 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
898 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +0000899 BIT(POWER_DOMAIN_AUX_B) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +0200900 BIT(POWER_DOMAIN_INIT))
901
902#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
903 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +0000904 BIT(POWER_DOMAIN_AUX_B) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +0200905 BIT(POWER_DOMAIN_INIT))
906
907#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
908 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
909 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +0000910 BIT(POWER_DOMAIN_AUX_C) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +0200911 BIT(POWER_DOMAIN_INIT))
912
913#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
914 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +0000915 BIT(POWER_DOMAIN_AUX_C) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +0200916 BIT(POWER_DOMAIN_INIT))
917
918#define CHV_PIPE_A_POWER_DOMAINS ( \
919 BIT(POWER_DOMAIN_PIPE_A) | \
920 BIT(POWER_DOMAIN_INIT))
921
922#define CHV_PIPE_B_POWER_DOMAINS ( \
923 BIT(POWER_DOMAIN_PIPE_B) | \
924 BIT(POWER_DOMAIN_INIT))
925
926#define CHV_PIPE_C_POWER_DOMAINS ( \
927 BIT(POWER_DOMAIN_PIPE_C) | \
928 BIT(POWER_DOMAIN_INIT))
929
930#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
931 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
932 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
933 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
934 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +0000935 BIT(POWER_DOMAIN_AUX_B) | \
936 BIT(POWER_DOMAIN_AUX_C) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +0200937 BIT(POWER_DOMAIN_INIT))
938
939#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
940 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
941 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +0000942 BIT(POWER_DOMAIN_AUX_D) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +0200943 BIT(POWER_DOMAIN_INIT))
944
945#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
946 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
947 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +0000948 BIT(POWER_DOMAIN_AUX_D) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +0200949 BIT(POWER_DOMAIN_INIT))
950
951#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
952 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
Satheeshakrishna M14071212015-01-16 15:57:51 +0000953 BIT(POWER_DOMAIN_AUX_D) | \
Daniel Vetter9c065a72014-09-30 10:56:38 +0200954 BIT(POWER_DOMAIN_INIT))
955
956static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
957 .sync_hw = i9xx_always_on_power_well_noop,
958 .enable = i9xx_always_on_power_well_noop,
959 .disable = i9xx_always_on_power_well_noop,
960 .is_enabled = i9xx_always_on_power_well_enabled,
961};
962
963static const struct i915_power_well_ops chv_pipe_power_well_ops = {
964 .sync_hw = chv_pipe_power_well_sync_hw,
965 .enable = chv_pipe_power_well_enable,
966 .disable = chv_pipe_power_well_disable,
967 .is_enabled = chv_pipe_power_well_enabled,
968};
969
970static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
971 .sync_hw = vlv_power_well_sync_hw,
972 .enable = chv_dpio_cmn_power_well_enable,
973 .disable = chv_dpio_cmn_power_well_disable,
974 .is_enabled = vlv_power_well_enabled,
975};
976
977static struct i915_power_well i9xx_always_on_power_well[] = {
978 {
979 .name = "always-on",
980 .always_on = 1,
981 .domains = POWER_DOMAIN_MASK,
982 .ops = &i9xx_always_on_power_well_ops,
983 },
984};
985
986static const struct i915_power_well_ops hsw_power_well_ops = {
987 .sync_hw = hsw_power_well_sync_hw,
988 .enable = hsw_power_well_enable,
989 .disable = hsw_power_well_disable,
990 .is_enabled = hsw_power_well_enabled,
991};
992
Satheeshakrishna M94dd5132015-02-04 13:57:44 +0000993static const struct i915_power_well_ops skl_power_well_ops = {
994 .sync_hw = skl_power_well_sync_hw,
995 .enable = skl_power_well_enable,
996 .disable = skl_power_well_disable,
997 .is_enabled = skl_power_well_enabled,
998};
999
Daniel Vetter9c065a72014-09-30 10:56:38 +02001000static struct i915_power_well hsw_power_wells[] = {
1001 {
1002 .name = "always-on",
1003 .always_on = 1,
1004 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
1005 .ops = &i9xx_always_on_power_well_ops,
1006 },
1007 {
1008 .name = "display",
1009 .domains = HSW_DISPLAY_POWER_DOMAINS,
1010 .ops = &hsw_power_well_ops,
1011 },
1012};
1013
1014static struct i915_power_well bdw_power_wells[] = {
1015 {
1016 .name = "always-on",
1017 .always_on = 1,
1018 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
1019 .ops = &i9xx_always_on_power_well_ops,
1020 },
1021 {
1022 .name = "display",
1023 .domains = BDW_DISPLAY_POWER_DOMAINS,
1024 .ops = &hsw_power_well_ops,
1025 },
1026};
1027
1028static const struct i915_power_well_ops vlv_display_power_well_ops = {
1029 .sync_hw = vlv_power_well_sync_hw,
1030 .enable = vlv_display_power_well_enable,
1031 .disable = vlv_display_power_well_disable,
1032 .is_enabled = vlv_power_well_enabled,
1033};
1034
1035static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1036 .sync_hw = vlv_power_well_sync_hw,
1037 .enable = vlv_dpio_cmn_power_well_enable,
1038 .disable = vlv_dpio_cmn_power_well_disable,
1039 .is_enabled = vlv_power_well_enabled,
1040};
1041
1042static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1043 .sync_hw = vlv_power_well_sync_hw,
1044 .enable = vlv_power_well_enable,
1045 .disable = vlv_power_well_disable,
1046 .is_enabled = vlv_power_well_enabled,
1047};
1048
1049static struct i915_power_well vlv_power_wells[] = {
1050 {
1051 .name = "always-on",
1052 .always_on = 1,
1053 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1054 .ops = &i9xx_always_on_power_well_ops,
1055 },
1056 {
1057 .name = "display",
1058 .domains = VLV_DISPLAY_POWER_DOMAINS,
1059 .data = PUNIT_POWER_WELL_DISP2D,
1060 .ops = &vlv_display_power_well_ops,
1061 },
1062 {
1063 .name = "dpio-tx-b-01",
1064 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1065 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1066 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1067 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1068 .ops = &vlv_dpio_power_well_ops,
1069 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1070 },
1071 {
1072 .name = "dpio-tx-b-23",
1073 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1074 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1075 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1076 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1077 .ops = &vlv_dpio_power_well_ops,
1078 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1079 },
1080 {
1081 .name = "dpio-tx-c-01",
1082 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1083 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1084 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1085 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1086 .ops = &vlv_dpio_power_well_ops,
1087 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1088 },
1089 {
1090 .name = "dpio-tx-c-23",
1091 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1092 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1093 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1094 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1095 .ops = &vlv_dpio_power_well_ops,
1096 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1097 },
1098 {
1099 .name = "dpio-common",
1100 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
1101 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1102 .ops = &vlv_dpio_cmn_power_well_ops,
1103 },
1104};
1105
1106static struct i915_power_well chv_power_wells[] = {
1107 {
1108 .name = "always-on",
1109 .always_on = 1,
1110 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1111 .ops = &i9xx_always_on_power_well_ops,
1112 },
1113#if 0
1114 {
1115 .name = "display",
1116 .domains = VLV_DISPLAY_POWER_DOMAINS,
1117 .data = PUNIT_POWER_WELL_DISP2D,
1118 .ops = &vlv_display_power_well_ops,
1119 },
Ville Syrjäläbaa4e572014-10-27 16:07:32 +02001120#endif
Daniel Vetter9c065a72014-09-30 10:56:38 +02001121 {
1122 .name = "pipe-a",
Ville Syrjäläbaa4e572014-10-27 16:07:32 +02001123 /*
1124 * FIXME: pipe A power well seems to be the new disp2d well.
1125 * At least all registers seem to be housed there. Figure
1126 * out if this a a temporary situation in pre-production
1127 * hardware or a permanent state of affairs.
1128 */
1129 .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001130 .data = PIPE_A,
1131 .ops = &chv_pipe_power_well_ops,
1132 },
Ville Syrjäläbaa4e572014-10-27 16:07:32 +02001133#if 0
Daniel Vetter9c065a72014-09-30 10:56:38 +02001134 {
1135 .name = "pipe-b",
1136 .domains = CHV_PIPE_B_POWER_DOMAINS,
1137 .data = PIPE_B,
1138 .ops = &chv_pipe_power_well_ops,
1139 },
1140 {
1141 .name = "pipe-c",
1142 .domains = CHV_PIPE_C_POWER_DOMAINS,
1143 .data = PIPE_C,
1144 .ops = &chv_pipe_power_well_ops,
1145 },
1146#endif
1147 {
1148 .name = "dpio-common-bc",
1149 /*
1150 * XXX: cmnreset for one PHY seems to disturb the other.
1151 * As a workaround keep both powered on at the same
1152 * time for now.
1153 */
1154 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
1155 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1156 .ops = &chv_dpio_cmn_power_well_ops,
1157 },
1158 {
1159 .name = "dpio-common-d",
1160 /*
1161 * XXX: cmnreset for one PHY seems to disturb the other.
1162 * As a workaround keep both powered on at the same
1163 * time for now.
1164 */
1165 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
1166 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
1167 .ops = &chv_dpio_cmn_power_well_ops,
1168 },
1169#if 0
1170 {
1171 .name = "dpio-tx-b-01",
1172 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1173 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
1174 .ops = &vlv_dpio_power_well_ops,
1175 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1176 },
1177 {
1178 .name = "dpio-tx-b-23",
1179 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1180 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
1181 .ops = &vlv_dpio_power_well_ops,
1182 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1183 },
1184 {
1185 .name = "dpio-tx-c-01",
1186 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1187 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1188 .ops = &vlv_dpio_power_well_ops,
1189 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1190 },
1191 {
1192 .name = "dpio-tx-c-23",
1193 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1194 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1195 .ops = &vlv_dpio_power_well_ops,
1196 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1197 },
1198 {
1199 .name = "dpio-tx-d-01",
1200 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
1201 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
1202 .ops = &vlv_dpio_power_well_ops,
1203 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
1204 },
1205 {
1206 .name = "dpio-tx-d-23",
1207 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
1208 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
1209 .ops = &vlv_dpio_power_well_ops,
1210 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
1211 },
1212#endif
1213};
1214
1215static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1216 enum punit_power_well power_well_id)
1217{
1218 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1219 struct i915_power_well *power_well;
1220 int i;
1221
1222 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1223 if (power_well->data == power_well_id)
1224 return power_well;
1225 }
1226
1227 return NULL;
1228}
1229
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00001230static struct i915_power_well skl_power_wells[] = {
1231 {
1232 .name = "always-on",
1233 .always_on = 1,
1234 .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1235 .ops = &i9xx_always_on_power_well_ops,
1236 },
1237 {
1238 .name = "power well 1",
1239 .domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1240 .ops = &skl_power_well_ops,
1241 .data = SKL_DISP_PW_1,
1242 },
1243 {
1244 .name = "MISC IO power well",
1245 .domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
1246 .ops = &skl_power_well_ops,
1247 .data = SKL_DISP_PW_MISC_IO,
1248 },
1249 {
1250 .name = "power well 2",
1251 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1252 .ops = &skl_power_well_ops,
1253 .data = SKL_DISP_PW_2,
1254 },
1255 {
1256 .name = "DDI A/E power well",
1257 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
1258 .ops = &skl_power_well_ops,
1259 .data = SKL_DISP_PW_DDI_A_E,
1260 },
1261 {
1262 .name = "DDI B power well",
1263 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
1264 .ops = &skl_power_well_ops,
1265 .data = SKL_DISP_PW_DDI_B,
1266 },
1267 {
1268 .name = "DDI C power well",
1269 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
1270 .ops = &skl_power_well_ops,
1271 .data = SKL_DISP_PW_DDI_C,
1272 },
1273 {
1274 .name = "DDI D power well",
1275 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
1276 .ops = &skl_power_well_ops,
1277 .data = SKL_DISP_PW_DDI_D,
1278 },
1279};
1280
Daniel Vetter9c065a72014-09-30 10:56:38 +02001281#define set_power_wells(power_domains, __power_wells) ({ \
1282 (power_domains)->power_wells = (__power_wells); \
1283 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
1284})
1285
Daniel Vettere4e76842014-09-30 10:56:42 +02001286/**
1287 * intel_power_domains_init - initializes the power domain structures
1288 * @dev_priv: i915 device instance
1289 *
1290 * Initializes the power domain structures for @dev_priv depending upon the
1291 * supported platform.
1292 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001293int intel_power_domains_init(struct drm_i915_private *dev_priv)
1294{
1295 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1296
1297 mutex_init(&power_domains->lock);
1298
1299 /*
1300 * The enabling order will be from lower to higher indexed wells,
1301 * the disabling order is reversed.
1302 */
1303 if (IS_HASWELL(dev_priv->dev)) {
1304 set_power_wells(power_domains, hsw_power_wells);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001305 } else if (IS_BROADWELL(dev_priv->dev)) {
1306 set_power_wells(power_domains, bdw_power_wells);
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00001307 } else if (IS_SKYLAKE(dev_priv->dev)) {
1308 set_power_wells(power_domains, skl_power_wells);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001309 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1310 set_power_wells(power_domains, chv_power_wells);
1311 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
1312 set_power_wells(power_domains, vlv_power_wells);
1313 } else {
1314 set_power_wells(power_domains, i9xx_always_on_power_well);
1315 }
1316
1317 return 0;
1318}
1319
Daniel Vetter41373cd2014-09-30 10:56:41 +02001320static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1321{
1322 struct drm_device *dev = dev_priv->dev;
1323 struct device *device = &dev->pdev->dev;
1324
1325 if (!HAS_RUNTIME_PM(dev))
1326 return;
1327
1328 if (!intel_enable_rc6(dev))
1329 return;
1330
1331 /* Make sure we're not suspended first. */
1332 pm_runtime_get_sync(device);
1333 pm_runtime_disable(device);
1334}
1335
Daniel Vettere4e76842014-09-30 10:56:42 +02001336/**
1337 * intel_power_domains_fini - finalizes the power domain structures
1338 * @dev_priv: i915 device instance
1339 *
1340 * Finalizes the power domain structures for @dev_priv depending upon the
1341 * supported platform. This function also disables runtime pm and ensures that
1342 * the device stays powered up so that the driver can be reloaded.
1343 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +02001344void intel_power_domains_fini(struct drm_i915_private *dev_priv)
Daniel Vetter9c065a72014-09-30 10:56:38 +02001345{
Daniel Vetter41373cd2014-09-30 10:56:41 +02001346 intel_runtime_pm_disable(dev_priv);
1347
Daniel Vetterf458ebb2014-09-30 10:56:39 +02001348 /* The i915.ko module is still not prepared to be loaded when
1349 * the power well is not enabled, so just enable it in case
1350 * we're going to unload/reload. */
1351 intel_display_set_init_power(dev_priv, true);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001352}
1353
1354static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1355{
1356 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1357 struct i915_power_well *power_well;
1358 int i;
1359
1360 mutex_lock(&power_domains->lock);
1361 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1362 power_well->ops->sync_hw(dev_priv, power_well);
1363 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
1364 power_well);
1365 }
1366 mutex_unlock(&power_domains->lock);
1367}
1368
1369static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1370{
1371 struct i915_power_well *cmn =
1372 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1373 struct i915_power_well *disp2d =
1374 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
1375
Daniel Vetter9c065a72014-09-30 10:56:38 +02001376 /* If the display might be already active skip this */
Ville Syrjälä5d93a6e2014-10-16 20:52:33 +03001377 if (cmn->ops->is_enabled(dev_priv, cmn) &&
1378 disp2d->ops->is_enabled(dev_priv, disp2d) &&
Daniel Vetter9c065a72014-09-30 10:56:38 +02001379 I915_READ(DPIO_CTL) & DPIO_CMNRST)
1380 return;
1381
1382 DRM_DEBUG_KMS("toggling display PHY side reset\n");
1383
1384 /* cmnlane needs DPLL registers */
1385 disp2d->ops->enable(dev_priv, disp2d);
1386
1387 /*
1388 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1389 * Need to assert and de-assert PHY SB reset by gating the
1390 * common lane power, then un-gating it.
1391 * Simply ungating isn't enough to reset the PHY enough to get
1392 * ports and lanes running.
1393 */
1394 cmn->ops->disable(dev_priv, cmn);
1395}
1396
Daniel Vettere4e76842014-09-30 10:56:42 +02001397/**
1398 * intel_power_domains_init_hw - initialize hardware power domain state
1399 * @dev_priv: i915 device instance
1400 *
1401 * This function initializes the hardware power domain state and enables all
1402 * power domains using intel_display_set_init_power().
1403 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001404void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
1405{
1406 struct drm_device *dev = dev_priv->dev;
1407 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1408
1409 power_domains->initializing = true;
1410
1411 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
1412 mutex_lock(&power_domains->lock);
1413 vlv_cmnlane_wa(dev_priv);
1414 mutex_unlock(&power_domains->lock);
1415 }
1416
1417 /* For now, we need the power well to be always enabled. */
1418 intel_display_set_init_power(dev_priv, true);
1419 intel_power_domains_resume(dev_priv);
1420 power_domains->initializing = false;
1421}
1422
Daniel Vettere4e76842014-09-30 10:56:42 +02001423/**
1424 * intel_aux_display_runtime_get - grab an auxilliary power domain reference
1425 * @dev_priv: i915 device instance
1426 *
1427 * This function grabs a power domain reference for the auxiliary power domain
1428 * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
1429 * parents are powered up. Therefore users should only grab a reference to the
1430 * innermost power domain they need.
1431 *
1432 * Any power domain reference obtained by this function must have a symmetric
1433 * call to intel_aux_display_runtime_put() to release the reference again.
1434 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001435void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
1436{
1437 intel_runtime_pm_get(dev_priv);
1438}
1439
Daniel Vettere4e76842014-09-30 10:56:42 +02001440/**
1441 * intel_aux_display_runtime_put - release an auxilliary power domain reference
1442 * @dev_priv: i915 device instance
1443 *
1444 * This function drops the auxilliary power domain reference obtained by
1445 * intel_aux_display_runtime_get() and might power down the corresponding
1446 * hardware block right away if this is the last reference.
1447 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001448void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
1449{
1450 intel_runtime_pm_put(dev_priv);
1451}
1452
Daniel Vettere4e76842014-09-30 10:56:42 +02001453/**
1454 * intel_runtime_pm_get - grab a runtime pm reference
1455 * @dev_priv: i915 device instance
1456 *
1457 * This function grabs a device-level runtime pm reference (mostly used for GEM
1458 * code to ensure the GTT or GT is on) and ensures that it is powered up.
1459 *
1460 * Any runtime pm reference obtained by this function must have a symmetric
1461 * call to intel_runtime_pm_put() to release the reference again.
1462 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001463void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
1464{
1465 struct drm_device *dev = dev_priv->dev;
1466 struct device *device = &dev->pdev->dev;
1467
1468 if (!HAS_RUNTIME_PM(dev))
1469 return;
1470
1471 pm_runtime_get_sync(device);
1472 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
1473}
1474
Daniel Vettere4e76842014-09-30 10:56:42 +02001475/**
1476 * intel_runtime_pm_get_noresume - grab a runtime pm reference
1477 * @dev_priv: i915 device instance
1478 *
1479 * This function grabs a device-level runtime pm reference (mostly used for GEM
1480 * code to ensure the GTT or GT is on).
1481 *
1482 * It will _not_ power up the device but instead only check that it's powered
1483 * on. Therefore it is only valid to call this functions from contexts where
1484 * the device is known to be powered up and where trying to power it up would
1485 * result in hilarity and deadlocks. That pretty much means only the system
1486 * suspend/resume code where this is used to grab runtime pm references for
1487 * delayed setup down in work items.
1488 *
1489 * Any runtime pm reference obtained by this function must have a symmetric
1490 * call to intel_runtime_pm_put() to release the reference again.
1491 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001492void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
1493{
1494 struct drm_device *dev = dev_priv->dev;
1495 struct device *device = &dev->pdev->dev;
1496
1497 if (!HAS_RUNTIME_PM(dev))
1498 return;
1499
1500 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
1501 pm_runtime_get_noresume(device);
1502}
1503
Daniel Vettere4e76842014-09-30 10:56:42 +02001504/**
1505 * intel_runtime_pm_put - release a runtime pm reference
1506 * @dev_priv: i915 device instance
1507 *
1508 * This function drops the device-level runtime pm reference obtained by
1509 * intel_runtime_pm_get() and might power down the corresponding
1510 * hardware block right away if this is the last reference.
1511 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001512void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
1513{
1514 struct drm_device *dev = dev_priv->dev;
1515 struct device *device = &dev->pdev->dev;
1516
1517 if (!HAS_RUNTIME_PM(dev))
1518 return;
1519
1520 pm_runtime_mark_last_busy(device);
1521 pm_runtime_put_autosuspend(device);
1522}
1523
Daniel Vettere4e76842014-09-30 10:56:42 +02001524/**
1525 * intel_runtime_pm_enable - enable runtime pm
1526 * @dev_priv: i915 device instance
1527 *
1528 * This function enables runtime pm at the end of the driver load sequence.
1529 *
1530 * Note that this function does currently not enable runtime pm for the
1531 * subordinate display power domains. That is only done on the first modeset
1532 * using intel_display_set_init_power().
1533 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +02001534void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
Daniel Vetter9c065a72014-09-30 10:56:38 +02001535{
1536 struct drm_device *dev = dev_priv->dev;
1537 struct device *device = &dev->pdev->dev;
1538
1539 if (!HAS_RUNTIME_PM(dev))
1540 return;
1541
1542 pm_runtime_set_active(device);
1543
1544 /*
1545 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
1546 * requirement.
1547 */
1548 if (!intel_enable_rc6(dev)) {
1549 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
1550 return;
1551 }
1552
1553 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
1554 pm_runtime_mark_last_busy(device);
1555 pm_runtime_use_autosuspend(device);
1556
1557 pm_runtime_put_autosuspend(device);
1558}
1559