blob: dc63b7890eeffd9b3fa33fd7a5b4098ccafdc54d [file] [log] [blame]
Daniel Vetter9c065a72014-09-30 10:56:38 +02001/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29#include <linux/pm_runtime.h>
30#include <linux/vgaarb.h>
31
32#include "i915_drv.h"
33#include "intel_drv.h"
34#include <drm/i915_powerwell.h>
35
36static struct i915_power_domains *hsw_pwr;
37
38#define for_each_power_well(i, power_well, domain_mask, power_domains) \
39 for (i = 0; \
40 i < (power_domains)->power_well_count && \
41 ((power_well) = &(power_domains)->power_wells[i]); \
42 i++) \
43 if ((power_well)->domains & (domain_mask))
44
45#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
46 for (i = (power_domains)->power_well_count - 1; \
47 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
48 i--) \
49 if ((power_well)->domains & (domain_mask))
50
51/**
52 * We should only use the power well if we explicitly asked the hardware to
53 * enable it, so check if it's enabled and also check if we've requested it to
54 * be enabled.
55 */
56static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
57 struct i915_power_well *power_well)
58{
59 return I915_READ(HSW_PWR_WELL_DRIVER) ==
60 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
61}
62
Daniel Vetterf458ebb2014-09-30 10:56:39 +020063bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
64 enum intel_display_power_domain domain)
Daniel Vetter9c065a72014-09-30 10:56:38 +020065{
66 struct i915_power_domains *power_domains;
67 struct i915_power_well *power_well;
68 bool is_enabled;
69 int i;
70
71 if (dev_priv->pm.suspended)
72 return false;
73
74 power_domains = &dev_priv->power_domains;
75
76 is_enabled = true;
77
78 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
79 if (power_well->always_on)
80 continue;
81
82 if (!power_well->hw_enabled) {
83 is_enabled = false;
84 break;
85 }
86 }
87
88 return is_enabled;
89}
90
Daniel Vetterf458ebb2014-09-30 10:56:39 +020091bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
92 enum intel_display_power_domain domain)
Daniel Vetter9c065a72014-09-30 10:56:38 +020093{
94 struct i915_power_domains *power_domains;
95 bool ret;
96
97 power_domains = &dev_priv->power_domains;
98
99 mutex_lock(&power_domains->lock);
Daniel Vetterf458ebb2014-09-30 10:56:39 +0200100 ret = __intel_display_power_is_enabled(dev_priv, domain);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200101 mutex_unlock(&power_domains->lock);
102
103 return ret;
104}
105
Daniel Vetterd9bc89d92014-09-30 10:56:40 +0200106void intel_display_set_init_power(struct drm_i915_private *dev_priv,
107 bool enable)
108{
109 if (dev_priv->power_domains.init_power_on == enable)
110 return;
111
112 if (enable)
113 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
114 else
115 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
116
117 dev_priv->power_domains.init_power_on = enable;
118}
119
Daniel Vetter9c065a72014-09-30 10:56:38 +0200120/*
121 * Starting with Haswell, we have a "Power Down Well" that can be turned off
122 * when not needed anymore. We have 4 registers that can request the power well
123 * to be enabled, and it will only be disabled if none of the registers is
124 * requesting it to be enabled.
125 */
126static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
127{
128 struct drm_device *dev = dev_priv->dev;
129
130 /*
131 * After we re-enable the power well, if we touch VGA register 0x3d5
132 * we'll get unclaimed register interrupts. This stops after we write
133 * anything to the VGA MSR register. The vgacon module uses this
134 * register all the time, so if we unbind our driver and, as a
135 * consequence, bind vgacon, we'll get stuck in an infinite loop at
136 * console_unlock(). So make here we touch the VGA MSR register, making
137 * sure vgacon can keep working normally without triggering interrupts
138 * and error messages.
139 */
140 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
141 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
142 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
143
144 if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
145 gen8_irq_power_well_post_enable(dev_priv);
146}
147
148static void hsw_set_power_well(struct drm_i915_private *dev_priv,
149 struct i915_power_well *power_well, bool enable)
150{
151 bool is_enabled, enable_requested;
152 uint32_t tmp;
153
154 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
155 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
156 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
157
158 if (enable) {
159 if (!enable_requested)
160 I915_WRITE(HSW_PWR_WELL_DRIVER,
161 HSW_PWR_WELL_ENABLE_REQUEST);
162
163 if (!is_enabled) {
164 DRM_DEBUG_KMS("Enabling power well\n");
165 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
166 HSW_PWR_WELL_STATE_ENABLED), 20))
167 DRM_ERROR("Timeout enabling power well\n");
168 }
169
170 hsw_power_well_post_enable(dev_priv);
171 } else {
172 if (enable_requested) {
173 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
174 POSTING_READ(HSW_PWR_WELL_DRIVER);
175 DRM_DEBUG_KMS("Requesting to disable the power well\n");
176 }
177 }
178}
179
180static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
181 struct i915_power_well *power_well)
182{
183 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
184
185 /*
186 * We're taking over the BIOS, so clear any requests made by it since
187 * the driver is in charge now.
188 */
189 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
190 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
191}
192
193static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
194 struct i915_power_well *power_well)
195{
196 hsw_set_power_well(dev_priv, power_well, true);
197}
198
199static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
200 struct i915_power_well *power_well)
201{
202 hsw_set_power_well(dev_priv, power_well, false);
203}
204
205static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
206 struct i915_power_well *power_well)
207{
208}
209
210static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
211 struct i915_power_well *power_well)
212{
213 return true;
214}
215
216static void vlv_set_power_well(struct drm_i915_private *dev_priv,
217 struct i915_power_well *power_well, bool enable)
218{
219 enum punit_power_well power_well_id = power_well->data;
220 u32 mask;
221 u32 state;
222 u32 ctrl;
223
224 mask = PUNIT_PWRGT_MASK(power_well_id);
225 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
226 PUNIT_PWRGT_PWR_GATE(power_well_id);
227
228 mutex_lock(&dev_priv->rps.hw_lock);
229
230#define COND \
231 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
232
233 if (COND)
234 goto out;
235
236 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
237 ctrl &= ~mask;
238 ctrl |= state;
239 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
240
241 if (wait_for(COND, 100))
242 DRM_ERROR("timout setting power well state %08x (%08x)\n",
243 state,
244 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
245
246#undef COND
247
248out:
249 mutex_unlock(&dev_priv->rps.hw_lock);
250}
251
252static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
253 struct i915_power_well *power_well)
254{
255 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
256}
257
258static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
259 struct i915_power_well *power_well)
260{
261 vlv_set_power_well(dev_priv, power_well, true);
262}
263
264static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
265 struct i915_power_well *power_well)
266{
267 vlv_set_power_well(dev_priv, power_well, false);
268}
269
270static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
271 struct i915_power_well *power_well)
272{
273 int power_well_id = power_well->data;
274 bool enabled = false;
275 u32 mask;
276 u32 state;
277 u32 ctrl;
278
279 mask = PUNIT_PWRGT_MASK(power_well_id);
280 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
281
282 mutex_lock(&dev_priv->rps.hw_lock);
283
284 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
285 /*
286 * We only ever set the power-on and power-gate states, anything
287 * else is unexpected.
288 */
289 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
290 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
291 if (state == ctrl)
292 enabled = true;
293
294 /*
295 * A transient state at this point would mean some unexpected party
296 * is poking at the power controls too.
297 */
298 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
299 WARN_ON(ctrl != state);
300
301 mutex_unlock(&dev_priv->rps.hw_lock);
302
303 return enabled;
304}
305
306static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
307 struct i915_power_well *power_well)
308{
309 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
310
311 vlv_set_power_well(dev_priv, power_well, true);
312
313 spin_lock_irq(&dev_priv->irq_lock);
314 valleyview_enable_display_irqs(dev_priv);
315 spin_unlock_irq(&dev_priv->irq_lock);
316
317 /*
318 * During driver initialization/resume we can avoid restoring the
319 * part of the HW/SW state that will be inited anyway explicitly.
320 */
321 if (dev_priv->power_domains.initializing)
322 return;
323
324 intel_hpd_init(dev_priv->dev);
325
326 i915_redisable_vga_power_on(dev_priv->dev);
327}
328
329static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
330 struct i915_power_well *power_well)
331{
332 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
333
334 spin_lock_irq(&dev_priv->irq_lock);
335 valleyview_disable_display_irqs(dev_priv);
336 spin_unlock_irq(&dev_priv->irq_lock);
337
338 vlv_set_power_well(dev_priv, power_well, false);
339
340 vlv_power_sequencer_reset(dev_priv);
341}
342
343static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
344 struct i915_power_well *power_well)
345{
346 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
347
348 /*
349 * Enable the CRI clock source so we can get at the
350 * display and the reference clock for VGA
351 * hotplug / manual detection.
352 */
353 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
354 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
355 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
356
357 vlv_set_power_well(dev_priv, power_well, true);
358
359 /*
360 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
361 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
362 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
363 * b. The other bits such as sfr settings / modesel may all
364 * be set to 0.
365 *
366 * This should only be done on init and resume from S3 with
367 * both PLLs disabled, or we risk losing DPIO and PLL
368 * synchronization.
369 */
370 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
371}
372
373static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
374 struct i915_power_well *power_well)
375{
376 enum pipe pipe;
377
378 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
379
380 for_each_pipe(dev_priv, pipe)
381 assert_pll_disabled(dev_priv, pipe);
382
383 /* Assert common reset */
384 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
385
386 vlv_set_power_well(dev_priv, power_well, false);
387}
388
389static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
390 struct i915_power_well *power_well)
391{
392 enum dpio_phy phy;
393
394 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
395 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
396
397 /*
398 * Enable the CRI clock source so we can get at the
399 * display and the reference clock for VGA
400 * hotplug / manual detection.
401 */
402 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
403 phy = DPIO_PHY0;
404 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
405 DPLL_REFA_CLK_ENABLE_VLV);
406 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
407 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
408 } else {
409 phy = DPIO_PHY1;
410 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
411 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
412 }
413 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
414 vlv_set_power_well(dev_priv, power_well, true);
415
416 /* Poll for phypwrgood signal */
417 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
418 DRM_ERROR("Display PHY %d is not power up\n", phy);
419
420 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
421 PHY_COM_LANE_RESET_DEASSERT(phy));
422}
423
424static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
425 struct i915_power_well *power_well)
426{
427 enum dpio_phy phy;
428
429 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
430 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
431
432 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
433 phy = DPIO_PHY0;
434 assert_pll_disabled(dev_priv, PIPE_A);
435 assert_pll_disabled(dev_priv, PIPE_B);
436 } else {
437 phy = DPIO_PHY1;
438 assert_pll_disabled(dev_priv, PIPE_C);
439 }
440
441 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
442 ~PHY_COM_LANE_RESET_DEASSERT(phy));
443
444 vlv_set_power_well(dev_priv, power_well, false);
445}
446
447static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
448 struct i915_power_well *power_well)
449{
450 enum pipe pipe = power_well->data;
451 bool enabled;
452 u32 state, ctrl;
453
454 mutex_lock(&dev_priv->rps.hw_lock);
455
456 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
457 /*
458 * We only ever set the power-on and power-gate states, anything
459 * else is unexpected.
460 */
461 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
462 enabled = state == DP_SSS_PWR_ON(pipe);
463
464 /*
465 * A transient state at this point would mean some unexpected party
466 * is poking at the power controls too.
467 */
468 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
469 WARN_ON(ctrl << 16 != state);
470
471 mutex_unlock(&dev_priv->rps.hw_lock);
472
473 return enabled;
474}
475
476static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
477 struct i915_power_well *power_well,
478 bool enable)
479{
480 enum pipe pipe = power_well->data;
481 u32 state;
482 u32 ctrl;
483
484 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
485
486 mutex_lock(&dev_priv->rps.hw_lock);
487
488#define COND \
489 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
490
491 if (COND)
492 goto out;
493
494 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
495 ctrl &= ~DP_SSC_MASK(pipe);
496 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
497 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
498
499 if (wait_for(COND, 100))
500 DRM_ERROR("timout setting power well state %08x (%08x)\n",
501 state,
502 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
503
504#undef COND
505
506out:
507 mutex_unlock(&dev_priv->rps.hw_lock);
508}
509
510static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
511 struct i915_power_well *power_well)
512{
513 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
514}
515
516static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
517 struct i915_power_well *power_well)
518{
519 WARN_ON_ONCE(power_well->data != PIPE_A &&
520 power_well->data != PIPE_B &&
521 power_well->data != PIPE_C);
522
523 chv_set_pipe_power_well(dev_priv, power_well, true);
524}
525
526static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
527 struct i915_power_well *power_well)
528{
529 WARN_ON_ONCE(power_well->data != PIPE_A &&
530 power_well->data != PIPE_B &&
531 power_well->data != PIPE_C);
532
533 chv_set_pipe_power_well(dev_priv, power_well, false);
534}
535
536static void check_power_well_state(struct drm_i915_private *dev_priv,
537 struct i915_power_well *power_well)
538{
539 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
540
541 if (power_well->always_on || !i915.disable_power_well) {
542 if (!enabled)
543 goto mismatch;
544
545 return;
546 }
547
548 if (enabled != (power_well->count > 0))
549 goto mismatch;
550
551 return;
552
553mismatch:
554 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
555 power_well->name, power_well->always_on, enabled,
556 power_well->count, i915.disable_power_well);
557}
558
559void intel_display_power_get(struct drm_i915_private *dev_priv,
560 enum intel_display_power_domain domain)
561{
562 struct i915_power_domains *power_domains;
563 struct i915_power_well *power_well;
564 int i;
565
566 intel_runtime_pm_get(dev_priv);
567
568 power_domains = &dev_priv->power_domains;
569
570 mutex_lock(&power_domains->lock);
571
572 for_each_power_well(i, power_well, BIT(domain), power_domains) {
573 if (!power_well->count++) {
574 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
575 power_well->ops->enable(dev_priv, power_well);
576 power_well->hw_enabled = true;
577 }
578
579 check_power_well_state(dev_priv, power_well);
580 }
581
582 power_domains->domain_use_count[domain]++;
583
584 mutex_unlock(&power_domains->lock);
585}
586
587void intel_display_power_put(struct drm_i915_private *dev_priv,
588 enum intel_display_power_domain domain)
589{
590 struct i915_power_domains *power_domains;
591 struct i915_power_well *power_well;
592 int i;
593
594 power_domains = &dev_priv->power_domains;
595
596 mutex_lock(&power_domains->lock);
597
598 WARN_ON(!power_domains->domain_use_count[domain]);
599 power_domains->domain_use_count[domain]--;
600
601 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
602 WARN_ON(!power_well->count);
603
604 if (!--power_well->count && i915.disable_power_well) {
605 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
606 power_well->hw_enabled = false;
607 power_well->ops->disable(dev_priv, power_well);
608 }
609
610 check_power_well_state(dev_priv, power_well);
611 }
612
613 mutex_unlock(&power_domains->lock);
614
615 intel_runtime_pm_put(dev_priv);
616}
617
618#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
619
620#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
621 BIT(POWER_DOMAIN_PIPE_A) | \
622 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
623 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
624 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
625 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
626 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
627 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
628 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
629 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
630 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
631 BIT(POWER_DOMAIN_PORT_CRT) | \
632 BIT(POWER_DOMAIN_PLLS) | \
633 BIT(POWER_DOMAIN_INIT))
634#define HSW_DISPLAY_POWER_DOMAINS ( \
635 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
636 BIT(POWER_DOMAIN_INIT))
637
638#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
639 HSW_ALWAYS_ON_POWER_DOMAINS | \
640 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
641#define BDW_DISPLAY_POWER_DOMAINS ( \
642 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
643 BIT(POWER_DOMAIN_INIT))
644
645#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
646#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
647
648#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
649 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
650 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
651 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
652 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
653 BIT(POWER_DOMAIN_PORT_CRT) | \
654 BIT(POWER_DOMAIN_INIT))
655
656#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
657 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
658 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
659 BIT(POWER_DOMAIN_INIT))
660
661#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
662 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
663 BIT(POWER_DOMAIN_INIT))
664
665#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
666 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
667 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
668 BIT(POWER_DOMAIN_INIT))
669
670#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
671 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
672 BIT(POWER_DOMAIN_INIT))
673
674#define CHV_PIPE_A_POWER_DOMAINS ( \
675 BIT(POWER_DOMAIN_PIPE_A) | \
676 BIT(POWER_DOMAIN_INIT))
677
678#define CHV_PIPE_B_POWER_DOMAINS ( \
679 BIT(POWER_DOMAIN_PIPE_B) | \
680 BIT(POWER_DOMAIN_INIT))
681
682#define CHV_PIPE_C_POWER_DOMAINS ( \
683 BIT(POWER_DOMAIN_PIPE_C) | \
684 BIT(POWER_DOMAIN_INIT))
685
686#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
687 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
688 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
689 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
690 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
691 BIT(POWER_DOMAIN_INIT))
692
693#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
694 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
695 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
696 BIT(POWER_DOMAIN_INIT))
697
698#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
699 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
700 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
701 BIT(POWER_DOMAIN_INIT))
702
703#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
704 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
705 BIT(POWER_DOMAIN_INIT))
706
707static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
708 .sync_hw = i9xx_always_on_power_well_noop,
709 .enable = i9xx_always_on_power_well_noop,
710 .disable = i9xx_always_on_power_well_noop,
711 .is_enabled = i9xx_always_on_power_well_enabled,
712};
713
714static const struct i915_power_well_ops chv_pipe_power_well_ops = {
715 .sync_hw = chv_pipe_power_well_sync_hw,
716 .enable = chv_pipe_power_well_enable,
717 .disable = chv_pipe_power_well_disable,
718 .is_enabled = chv_pipe_power_well_enabled,
719};
720
721static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
722 .sync_hw = vlv_power_well_sync_hw,
723 .enable = chv_dpio_cmn_power_well_enable,
724 .disable = chv_dpio_cmn_power_well_disable,
725 .is_enabled = vlv_power_well_enabled,
726};
727
728static struct i915_power_well i9xx_always_on_power_well[] = {
729 {
730 .name = "always-on",
731 .always_on = 1,
732 .domains = POWER_DOMAIN_MASK,
733 .ops = &i9xx_always_on_power_well_ops,
734 },
735};
736
737static const struct i915_power_well_ops hsw_power_well_ops = {
738 .sync_hw = hsw_power_well_sync_hw,
739 .enable = hsw_power_well_enable,
740 .disable = hsw_power_well_disable,
741 .is_enabled = hsw_power_well_enabled,
742};
743
744static struct i915_power_well hsw_power_wells[] = {
745 {
746 .name = "always-on",
747 .always_on = 1,
748 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
749 .ops = &i9xx_always_on_power_well_ops,
750 },
751 {
752 .name = "display",
753 .domains = HSW_DISPLAY_POWER_DOMAINS,
754 .ops = &hsw_power_well_ops,
755 },
756};
757
758static struct i915_power_well bdw_power_wells[] = {
759 {
760 .name = "always-on",
761 .always_on = 1,
762 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
763 .ops = &i9xx_always_on_power_well_ops,
764 },
765 {
766 .name = "display",
767 .domains = BDW_DISPLAY_POWER_DOMAINS,
768 .ops = &hsw_power_well_ops,
769 },
770};
771
772static const struct i915_power_well_ops vlv_display_power_well_ops = {
773 .sync_hw = vlv_power_well_sync_hw,
774 .enable = vlv_display_power_well_enable,
775 .disable = vlv_display_power_well_disable,
776 .is_enabled = vlv_power_well_enabled,
777};
778
779static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
780 .sync_hw = vlv_power_well_sync_hw,
781 .enable = vlv_dpio_cmn_power_well_enable,
782 .disable = vlv_dpio_cmn_power_well_disable,
783 .is_enabled = vlv_power_well_enabled,
784};
785
786static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
787 .sync_hw = vlv_power_well_sync_hw,
788 .enable = vlv_power_well_enable,
789 .disable = vlv_power_well_disable,
790 .is_enabled = vlv_power_well_enabled,
791};
792
793static struct i915_power_well vlv_power_wells[] = {
794 {
795 .name = "always-on",
796 .always_on = 1,
797 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
798 .ops = &i9xx_always_on_power_well_ops,
799 },
800 {
801 .name = "display",
802 .domains = VLV_DISPLAY_POWER_DOMAINS,
803 .data = PUNIT_POWER_WELL_DISP2D,
804 .ops = &vlv_display_power_well_ops,
805 },
806 {
807 .name = "dpio-tx-b-01",
808 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
809 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
810 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
811 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
812 .ops = &vlv_dpio_power_well_ops,
813 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
814 },
815 {
816 .name = "dpio-tx-b-23",
817 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
818 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
819 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
820 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
821 .ops = &vlv_dpio_power_well_ops,
822 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
823 },
824 {
825 .name = "dpio-tx-c-01",
826 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
827 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
828 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
829 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
830 .ops = &vlv_dpio_power_well_ops,
831 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
832 },
833 {
834 .name = "dpio-tx-c-23",
835 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
836 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
837 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
838 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
839 .ops = &vlv_dpio_power_well_ops,
840 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
841 },
842 {
843 .name = "dpio-common",
844 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
845 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
846 .ops = &vlv_dpio_cmn_power_well_ops,
847 },
848};
849
850static struct i915_power_well chv_power_wells[] = {
851 {
852 .name = "always-on",
853 .always_on = 1,
854 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
855 .ops = &i9xx_always_on_power_well_ops,
856 },
857#if 0
858 {
859 .name = "display",
860 .domains = VLV_DISPLAY_POWER_DOMAINS,
861 .data = PUNIT_POWER_WELL_DISP2D,
862 .ops = &vlv_display_power_well_ops,
863 },
864 {
865 .name = "pipe-a",
866 .domains = CHV_PIPE_A_POWER_DOMAINS,
867 .data = PIPE_A,
868 .ops = &chv_pipe_power_well_ops,
869 },
870 {
871 .name = "pipe-b",
872 .domains = CHV_PIPE_B_POWER_DOMAINS,
873 .data = PIPE_B,
874 .ops = &chv_pipe_power_well_ops,
875 },
876 {
877 .name = "pipe-c",
878 .domains = CHV_PIPE_C_POWER_DOMAINS,
879 .data = PIPE_C,
880 .ops = &chv_pipe_power_well_ops,
881 },
882#endif
883 {
884 .name = "dpio-common-bc",
885 /*
886 * XXX: cmnreset for one PHY seems to disturb the other.
887 * As a workaround keep both powered on at the same
888 * time for now.
889 */
890 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
891 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
892 .ops = &chv_dpio_cmn_power_well_ops,
893 },
894 {
895 .name = "dpio-common-d",
896 /*
897 * XXX: cmnreset for one PHY seems to disturb the other.
898 * As a workaround keep both powered on at the same
899 * time for now.
900 */
901 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
902 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
903 .ops = &chv_dpio_cmn_power_well_ops,
904 },
905#if 0
906 {
907 .name = "dpio-tx-b-01",
908 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
909 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
910 .ops = &vlv_dpio_power_well_ops,
911 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
912 },
913 {
914 .name = "dpio-tx-b-23",
915 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
916 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
917 .ops = &vlv_dpio_power_well_ops,
918 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
919 },
920 {
921 .name = "dpio-tx-c-01",
922 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
923 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
924 .ops = &vlv_dpio_power_well_ops,
925 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
926 },
927 {
928 .name = "dpio-tx-c-23",
929 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
930 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
931 .ops = &vlv_dpio_power_well_ops,
932 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
933 },
934 {
935 .name = "dpio-tx-d-01",
936 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
937 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
938 .ops = &vlv_dpio_power_well_ops,
939 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
940 },
941 {
942 .name = "dpio-tx-d-23",
943 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
944 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
945 .ops = &vlv_dpio_power_well_ops,
946 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
947 },
948#endif
949};
950
951static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
952 enum punit_power_well power_well_id)
953{
954 struct i915_power_domains *power_domains = &dev_priv->power_domains;
955 struct i915_power_well *power_well;
956 int i;
957
958 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
959 if (power_well->data == power_well_id)
960 return power_well;
961 }
962
963 return NULL;
964}
965
966#define set_power_wells(power_domains, __power_wells) ({ \
967 (power_domains)->power_wells = (__power_wells); \
968 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
969})
970
971int intel_power_domains_init(struct drm_i915_private *dev_priv)
972{
973 struct i915_power_domains *power_domains = &dev_priv->power_domains;
974
975 mutex_init(&power_domains->lock);
976
977 /*
978 * The enabling order will be from lower to higher indexed wells,
979 * the disabling order is reversed.
980 */
981 if (IS_HASWELL(dev_priv->dev)) {
982 set_power_wells(power_domains, hsw_power_wells);
983 hsw_pwr = power_domains;
984 } else if (IS_BROADWELL(dev_priv->dev)) {
985 set_power_wells(power_domains, bdw_power_wells);
986 hsw_pwr = power_domains;
987 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
988 set_power_wells(power_domains, chv_power_wells);
989 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
990 set_power_wells(power_domains, vlv_power_wells);
991 } else {
992 set_power_wells(power_domains, i9xx_always_on_power_well);
993 }
994
995 return 0;
996}
997
Daniel Vetter41373cd2014-09-30 10:56:41 +0200998static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
999{
1000 struct drm_device *dev = dev_priv->dev;
1001 struct device *device = &dev->pdev->dev;
1002
1003 if (!HAS_RUNTIME_PM(dev))
1004 return;
1005
1006 if (!intel_enable_rc6(dev))
1007 return;
1008
1009 /* Make sure we're not suspended first. */
1010 pm_runtime_get_sync(device);
1011 pm_runtime_disable(device);
1012}
1013
Daniel Vetterf458ebb2014-09-30 10:56:39 +02001014void intel_power_domains_fini(struct drm_i915_private *dev_priv)
Daniel Vetter9c065a72014-09-30 10:56:38 +02001015{
Daniel Vetter41373cd2014-09-30 10:56:41 +02001016 intel_runtime_pm_disable(dev_priv);
1017
Daniel Vetterf458ebb2014-09-30 10:56:39 +02001018 /* The i915.ko module is still not prepared to be loaded when
1019 * the power well is not enabled, so just enable it in case
1020 * we're going to unload/reload. */
1021 intel_display_set_init_power(dev_priv, true);
1022
Daniel Vetter9c065a72014-09-30 10:56:38 +02001023 hsw_pwr = NULL;
1024}
1025
1026static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1027{
1028 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1029 struct i915_power_well *power_well;
1030 int i;
1031
1032 mutex_lock(&power_domains->lock);
1033 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1034 power_well->ops->sync_hw(dev_priv, power_well);
1035 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
1036 power_well);
1037 }
1038 mutex_unlock(&power_domains->lock);
1039}
1040
1041static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1042{
1043 struct i915_power_well *cmn =
1044 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1045 struct i915_power_well *disp2d =
1046 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
1047
1048 /* nothing to do if common lane is already off */
1049 if (!cmn->ops->is_enabled(dev_priv, cmn))
1050 return;
1051
1052 /* If the display might be already active skip this */
1053 if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
1054 I915_READ(DPIO_CTL) & DPIO_CMNRST)
1055 return;
1056
1057 DRM_DEBUG_KMS("toggling display PHY side reset\n");
1058
1059 /* cmnlane needs DPLL registers */
1060 disp2d->ops->enable(dev_priv, disp2d);
1061
1062 /*
1063 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1064 * Need to assert and de-assert PHY SB reset by gating the
1065 * common lane power, then un-gating it.
1066 * Simply ungating isn't enough to reset the PHY enough to get
1067 * ports and lanes running.
1068 */
1069 cmn->ops->disable(dev_priv, cmn);
1070}
1071
1072void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
1073{
1074 struct drm_device *dev = dev_priv->dev;
1075 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1076
1077 power_domains->initializing = true;
1078
1079 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
1080 mutex_lock(&power_domains->lock);
1081 vlv_cmnlane_wa(dev_priv);
1082 mutex_unlock(&power_domains->lock);
1083 }
1084
1085 /* For now, we need the power well to be always enabled. */
1086 intel_display_set_init_power(dev_priv, true);
1087 intel_power_domains_resume(dev_priv);
1088 power_domains->initializing = false;
1089}
1090
1091void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
1092{
1093 intel_runtime_pm_get(dev_priv);
1094}
1095
1096void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
1097{
1098 intel_runtime_pm_put(dev_priv);
1099}
1100
1101void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
1102{
1103 struct drm_device *dev = dev_priv->dev;
1104 struct device *device = &dev->pdev->dev;
1105
1106 if (!HAS_RUNTIME_PM(dev))
1107 return;
1108
1109 pm_runtime_get_sync(device);
1110 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
1111}
1112
1113void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
1114{
1115 struct drm_device *dev = dev_priv->dev;
1116 struct device *device = &dev->pdev->dev;
1117
1118 if (!HAS_RUNTIME_PM(dev))
1119 return;
1120
1121 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
1122 pm_runtime_get_noresume(device);
1123}
1124
1125void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
1126{
1127 struct drm_device *dev = dev_priv->dev;
1128 struct device *device = &dev->pdev->dev;
1129
1130 if (!HAS_RUNTIME_PM(dev))
1131 return;
1132
1133 pm_runtime_mark_last_busy(device);
1134 pm_runtime_put_autosuspend(device);
1135}
1136
Daniel Vetterf458ebb2014-09-30 10:56:39 +02001137void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
Daniel Vetter9c065a72014-09-30 10:56:38 +02001138{
1139 struct drm_device *dev = dev_priv->dev;
1140 struct device *device = &dev->pdev->dev;
1141
1142 if (!HAS_RUNTIME_PM(dev))
1143 return;
1144
1145 pm_runtime_set_active(device);
1146
1147 /*
1148 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
1149 * requirement.
1150 */
1151 if (!intel_enable_rc6(dev)) {
1152 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
1153 return;
1154 }
1155
1156 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
1157 pm_runtime_mark_last_busy(device);
1158 pm_runtime_use_autosuspend(device);
1159
1160 pm_runtime_put_autosuspend(device);
1161}
1162
Daniel Vetter9c065a72014-09-30 10:56:38 +02001163/* Display audio driver power well request */
1164int i915_request_power_well(void)
1165{
1166 struct drm_i915_private *dev_priv;
1167
1168 if (!hsw_pwr)
1169 return -ENODEV;
1170
1171 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1172 power_domains);
1173 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
1174 return 0;
1175}
1176EXPORT_SYMBOL_GPL(i915_request_power_well);
1177
1178/* Display audio driver power well release */
1179int i915_release_power_well(void)
1180{
1181 struct drm_i915_private *dev_priv;
1182
1183 if (!hsw_pwr)
1184 return -ENODEV;
1185
1186 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1187 power_domains);
1188 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
1189 return 0;
1190}
1191EXPORT_SYMBOL_GPL(i915_release_power_well);
1192
1193/*
1194 * Private interface for the audio driver to get CDCLK in kHz.
1195 *
1196 * Caller must request power well using i915_request_power_well() prior to
1197 * making the call.
1198 */
1199int i915_get_cdclk_freq(void)
1200{
1201 struct drm_i915_private *dev_priv;
1202
1203 if (!hsw_pwr)
1204 return -ENODEV;
1205
1206 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1207 power_domains);
1208
1209 return intel_ddi_get_cdclk_freq(dev_priv);
1210}
1211EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);