blob: 16790f2576ec7cb221d48f08c28af5c1aba89535 [file] [log] [blame]
Daniel Vetter9c065a72014-09-30 10:56:38 +02001/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29#include <linux/pm_runtime.h>
30#include <linux/vgaarb.h>
31
32#include "i915_drv.h"
33#include "intel_drv.h"
Daniel Vetter9c065a72014-09-30 10:56:38 +020034
Daniel Vettere4e76842014-09-30 10:56:42 +020035/**
36 * DOC: runtime pm
37 *
38 * The i915 driver supports dynamic enabling and disabling of entire hardware
39 * blocks at runtime. This is especially important on the display side where
40 * software is supposed to control many power gates manually on recent hardware,
41 * since on the GT side a lot of the power management is done by the hardware.
42 * But even there some manual control at the device level is required.
43 *
44 * Since i915 supports a diverse set of platforms with a unified codebase and
45 * hardware engineers just love to shuffle functionality around between power
46 * domains there's a sizeable amount of indirection required. This file provides
47 * generic functions to the driver for grabbing and releasing references for
48 * abstract power domains. It then maps those to the actual power wells
49 * present for a given platform.
50 */
51
Suketu Shah5aefb232015-04-16 14:22:10 +053052bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
Imre Deak438b8dc2017-07-11 23:42:30 +030053 enum i915_power_well_id power_well_id);
Suketu Shah5aefb232015-04-16 14:22:10 +053054
Imre Deak9c8d0b82016-06-13 16:44:34 +030055static struct i915_power_well *
Imre Deak438b8dc2017-07-11 23:42:30 +030056lookup_power_well(struct drm_i915_private *dev_priv,
57 enum i915_power_well_id power_well_id);
Imre Deak9c8d0b82016-06-13 16:44:34 +030058
Daniel Stone9895ad02015-11-20 15:55:33 +000059const char *
60intel_display_power_domain_str(enum intel_display_power_domain domain)
61{
62 switch (domain) {
63 case POWER_DOMAIN_PIPE_A:
64 return "PIPE_A";
65 case POWER_DOMAIN_PIPE_B:
66 return "PIPE_B";
67 case POWER_DOMAIN_PIPE_C:
68 return "PIPE_C";
69 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
70 return "PIPE_A_PANEL_FITTER";
71 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
72 return "PIPE_B_PANEL_FITTER";
73 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
74 return "PIPE_C_PANEL_FITTER";
75 case POWER_DOMAIN_TRANSCODER_A:
76 return "TRANSCODER_A";
77 case POWER_DOMAIN_TRANSCODER_B:
78 return "TRANSCODER_B";
79 case POWER_DOMAIN_TRANSCODER_C:
80 return "TRANSCODER_C";
81 case POWER_DOMAIN_TRANSCODER_EDP:
82 return "TRANSCODER_EDP";
Jani Nikula4d1de972016-03-18 17:05:42 +020083 case POWER_DOMAIN_TRANSCODER_DSI_A:
84 return "TRANSCODER_DSI_A";
85 case POWER_DOMAIN_TRANSCODER_DSI_C:
86 return "TRANSCODER_DSI_C";
Daniel Stone9895ad02015-11-20 15:55:33 +000087 case POWER_DOMAIN_PORT_DDI_A_LANES:
88 return "PORT_DDI_A_LANES";
89 case POWER_DOMAIN_PORT_DDI_B_LANES:
90 return "PORT_DDI_B_LANES";
91 case POWER_DOMAIN_PORT_DDI_C_LANES:
92 return "PORT_DDI_C_LANES";
93 case POWER_DOMAIN_PORT_DDI_D_LANES:
94 return "PORT_DDI_D_LANES";
95 case POWER_DOMAIN_PORT_DDI_E_LANES:
96 return "PORT_DDI_E_LANES";
Rodrigo Vivi9787e832018-01-29 15:22:22 -080097 case POWER_DOMAIN_PORT_DDI_F_LANES:
98 return "PORT_DDI_F_LANES";
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020099 case POWER_DOMAIN_PORT_DDI_A_IO:
100 return "PORT_DDI_A_IO";
101 case POWER_DOMAIN_PORT_DDI_B_IO:
102 return "PORT_DDI_B_IO";
103 case POWER_DOMAIN_PORT_DDI_C_IO:
104 return "PORT_DDI_C_IO";
105 case POWER_DOMAIN_PORT_DDI_D_IO:
106 return "PORT_DDI_D_IO";
107 case POWER_DOMAIN_PORT_DDI_E_IO:
108 return "PORT_DDI_E_IO";
Rodrigo Vivi9787e832018-01-29 15:22:22 -0800109 case POWER_DOMAIN_PORT_DDI_F_IO:
110 return "PORT_DDI_F_IO";
Daniel Stone9895ad02015-11-20 15:55:33 +0000111 case POWER_DOMAIN_PORT_DSI:
112 return "PORT_DSI";
113 case POWER_DOMAIN_PORT_CRT:
114 return "PORT_CRT";
115 case POWER_DOMAIN_PORT_OTHER:
116 return "PORT_OTHER";
117 case POWER_DOMAIN_VGA:
118 return "VGA";
119 case POWER_DOMAIN_AUDIO:
120 return "AUDIO";
121 case POWER_DOMAIN_PLLS:
122 return "PLLS";
123 case POWER_DOMAIN_AUX_A:
124 return "AUX_A";
125 case POWER_DOMAIN_AUX_B:
126 return "AUX_B";
127 case POWER_DOMAIN_AUX_C:
128 return "AUX_C";
129 case POWER_DOMAIN_AUX_D:
130 return "AUX_D";
Rodrigo Vivia324fca2018-01-29 15:22:15 -0800131 case POWER_DOMAIN_AUX_F:
132 return "AUX_F";
Daniel Stone9895ad02015-11-20 15:55:33 +0000133 case POWER_DOMAIN_GMBUS:
134 return "GMBUS";
135 case POWER_DOMAIN_INIT:
136 return "INIT";
137 case POWER_DOMAIN_MODESET:
138 return "MODESET";
Tvrtko Ursulinb6876372017-12-05 13:28:54 +0000139 case POWER_DOMAIN_GT_IRQ:
140 return "GT_IRQ";
Daniel Stone9895ad02015-11-20 15:55:33 +0000141 default:
142 MISSING_CASE(domain);
143 return "?";
144 }
145}
146
Damien Lespiaue8ca9322015-07-30 18:20:26 -0300147static void intel_power_well_enable(struct drm_i915_private *dev_priv,
148 struct i915_power_well *power_well)
149{
150 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
151 power_well->ops->enable(dev_priv, power_well);
152 power_well->hw_enabled = true;
153}
154
Damien Lespiaudcddab32015-07-30 18:20:27 -0300155static void intel_power_well_disable(struct drm_i915_private *dev_priv,
156 struct i915_power_well *power_well)
157{
158 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
159 power_well->hw_enabled = false;
160 power_well->ops->disable(dev_priv, power_well);
161}
162
Imre Deakb409ca92016-06-13 16:44:33 +0300163static void intel_power_well_get(struct drm_i915_private *dev_priv,
164 struct i915_power_well *power_well)
165{
166 if (!power_well->count++)
167 intel_power_well_enable(dev_priv, power_well);
168}
169
170static void intel_power_well_put(struct drm_i915_private *dev_priv,
171 struct i915_power_well *power_well)
172{
173 WARN(!power_well->count, "Use count on power well %s is already zero",
174 power_well->name);
175
176 if (!--power_well->count)
177 intel_power_well_disable(dev_priv, power_well);
178}
179
Daniel Vettere4e76842014-09-30 10:56:42 +0200180/**
181 * __intel_display_power_is_enabled - unlocked check for a power domain
182 * @dev_priv: i915 device instance
183 * @domain: power domain to check
184 *
185 * This is the unlocked version of intel_display_power_is_enabled() and should
186 * only be used from error capture and recovery code where deadlocks are
187 * possible.
188 *
189 * Returns:
190 * True when the power domain is enabled, false otherwise.
191 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +0200192bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
193 enum intel_display_power_domain domain)
Daniel Vetter9c065a72014-09-30 10:56:38 +0200194{
Daniel Vetter9c065a72014-09-30 10:56:38 +0200195 struct i915_power_well *power_well;
196 bool is_enabled;
Daniel Vetter9c065a72014-09-30 10:56:38 +0200197
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +0100198 if (dev_priv->runtime_pm.suspended)
Daniel Vetter9c065a72014-09-30 10:56:38 +0200199 return false;
200
Daniel Vetter9c065a72014-09-30 10:56:38 +0200201 is_enabled = true;
202
Imre Deak75ccb2e2017-02-17 17:39:43 +0200203 for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
Daniel Vetter9c065a72014-09-30 10:56:38 +0200204 if (power_well->always_on)
205 continue;
206
207 if (!power_well->hw_enabled) {
208 is_enabled = false;
209 break;
210 }
211 }
212
213 return is_enabled;
214}
215
Daniel Vettere4e76842014-09-30 10:56:42 +0200216/**
Damien Lespiauf61ccae2014-11-25 13:45:41 +0000217 * intel_display_power_is_enabled - check for a power domain
Daniel Vettere4e76842014-09-30 10:56:42 +0200218 * @dev_priv: i915 device instance
219 * @domain: power domain to check
220 *
221 * This function can be used to check the hw power domain state. It is mostly
222 * used in hardware state readout functions. Everywhere else code should rely
223 * upon explicit power domain reference counting to ensure that the hardware
224 * block is powered up before accessing it.
225 *
226 * Callers must hold the relevant modesetting locks to ensure that concurrent
227 * threads can't disable the power well while the caller tries to read a few
228 * registers.
229 *
230 * Returns:
231 * True when the power domain is enabled, false otherwise.
232 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +0200233bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
234 enum intel_display_power_domain domain)
Daniel Vetter9c065a72014-09-30 10:56:38 +0200235{
236 struct i915_power_domains *power_domains;
237 bool ret;
238
239 power_domains = &dev_priv->power_domains;
240
241 mutex_lock(&power_domains->lock);
Daniel Vetterf458ebb2014-09-30 10:56:39 +0200242 ret = __intel_display_power_is_enabled(dev_priv, domain);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200243 mutex_unlock(&power_domains->lock);
244
245 return ret;
246}
247
Daniel Vettere4e76842014-09-30 10:56:42 +0200248/**
249 * intel_display_set_init_power - set the initial power domain state
250 * @dev_priv: i915 device instance
251 * @enable: whether to enable or disable the initial power domain state
252 *
253 * For simplicity our driver load/unload and system suspend/resume code assumes
254 * that all power domains are always enabled. This functions controls the state
255 * of this little hack. While the initial power domain state is enabled runtime
256 * pm is effectively disabled.
257 */
Daniel Vetterd9bc89d92014-09-30 10:56:40 +0200258void intel_display_set_init_power(struct drm_i915_private *dev_priv,
259 bool enable)
260{
261 if (dev_priv->power_domains.init_power_on == enable)
262 return;
263
264 if (enable)
265 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
266 else
267 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
268
269 dev_priv->power_domains.init_power_on = enable;
270}
271
Daniel Vetter9c065a72014-09-30 10:56:38 +0200272/*
273 * Starting with Haswell, we have a "Power Down Well" that can be turned off
274 * when not needed anymore. We have 4 registers that can request the power well
275 * to be enabled, and it will only be disabled if none of the registers is
276 * requesting it to be enabled.
277 */
Imre Deak001bd2c2017-07-12 18:54:13 +0300278static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
279 u8 irq_pipe_mask, bool has_vga)
Daniel Vetter9c065a72014-09-30 10:56:38 +0200280{
David Weinehall52a05c32016-08-22 13:32:44 +0300281 struct pci_dev *pdev = dev_priv->drm.pdev;
Daniel Vetter9c065a72014-09-30 10:56:38 +0200282
283 /*
284 * After we re-enable the power well, if we touch VGA register 0x3d5
285 * we'll get unclaimed register interrupts. This stops after we write
286 * anything to the VGA MSR register. The vgacon module uses this
287 * register all the time, so if we unbind our driver and, as a
288 * consequence, bind vgacon, we'll get stuck in an infinite loop at
289 * console_unlock(). So make here we touch the VGA MSR register, making
290 * sure vgacon can keep working normally without triggering interrupts
291 * and error messages.
292 */
Imre Deak001bd2c2017-07-12 18:54:13 +0300293 if (has_vga) {
294 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
295 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
296 vga_put(pdev, VGA_RSRC_LEGACY_IO);
297 }
Daniel Vetter9c065a72014-09-30 10:56:38 +0200298
Imre Deak001bd2c2017-07-12 18:54:13 +0300299 if (irq_pipe_mask)
300 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200301}
302
Imre Deak001bd2c2017-07-12 18:54:13 +0300303static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
304 u8 irq_pipe_mask)
Ville Syrjäläaae8ba82016-02-19 20:47:30 +0200305{
Imre Deak001bd2c2017-07-12 18:54:13 +0300306 if (irq_pipe_mask)
307 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
Ville Syrjäläaae8ba82016-02-19 20:47:30 +0200308}
309
Ville Syrjäläaae8ba82016-02-19 20:47:30 +0200310
Imre Deak76347c02017-07-06 17:40:36 +0300311static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
312 struct i915_power_well *power_well)
Imre Deak42d93662017-06-29 18:37:01 +0300313{
Imre Deak438b8dc2017-07-11 23:42:30 +0300314 enum i915_power_well_id id = power_well->id;
Imre Deak42d93662017-06-29 18:37:01 +0300315
316 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
317 WARN_ON(intel_wait_for_register(dev_priv,
Imre Deak9c3a16c2017-08-14 18:15:30 +0300318 HSW_PWR_WELL_CTL_DRIVER(id),
Imre Deak1af474f2017-07-06 17:40:34 +0300319 HSW_PWR_WELL_CTL_STATE(id),
320 HSW_PWR_WELL_CTL_STATE(id),
Imre Deak42d93662017-06-29 18:37:01 +0300321 1));
322}
323
Imre Deak76347c02017-07-06 17:40:36 +0300324static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
325 enum i915_power_well_id id)
Imre Deak42d93662017-06-29 18:37:01 +0300326{
Imre Deak1af474f2017-07-06 17:40:34 +0300327 u32 req_mask = HSW_PWR_WELL_CTL_REQ(id);
Imre Deak42d93662017-06-29 18:37:01 +0300328 u32 ret;
329
Imre Deak9c3a16c2017-08-14 18:15:30 +0300330 ret = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)) & req_mask ? 1 : 0;
331 ret |= I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & req_mask ? 2 : 0;
332 ret |= I915_READ(HSW_PWR_WELL_CTL_KVMR) & req_mask ? 4 : 0;
333 ret |= I915_READ(HSW_PWR_WELL_CTL_DEBUG(id)) & req_mask ? 8 : 0;
Imre Deak42d93662017-06-29 18:37:01 +0300334
335 return ret;
336}
337
Imre Deak76347c02017-07-06 17:40:36 +0300338static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
339 struct i915_power_well *power_well)
Imre Deak42d93662017-06-29 18:37:01 +0300340{
Imre Deak438b8dc2017-07-11 23:42:30 +0300341 enum i915_power_well_id id = power_well->id;
Imre Deak42d93662017-06-29 18:37:01 +0300342 bool disabled;
343 u32 reqs;
344
345 /*
346 * Bspec doesn't require waiting for PWs to get disabled, but still do
347 * this for paranoia. The known cases where a PW will be forced on:
348 * - a KVMR request on any power well via the KVMR request register
349 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
350 * DEBUG request registers
351 * Skip the wait in case any of the request bits are set and print a
352 * diagnostic message.
353 */
Imre Deak9c3a16c2017-08-14 18:15:30 +0300354 wait_for((disabled = !(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
Imre Deak1af474f2017-07-06 17:40:34 +0300355 HSW_PWR_WELL_CTL_STATE(id))) ||
Imre Deak76347c02017-07-06 17:40:36 +0300356 (reqs = hsw_power_well_requesters(dev_priv, id)), 1);
Imre Deak42d93662017-06-29 18:37:01 +0300357 if (disabled)
358 return;
359
360 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
361 power_well->name,
362 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
363}
364
Imre Deakb2891eb2017-07-11 23:42:35 +0300365static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
366 enum skl_power_gate pg)
367{
368 /* Timeout 5us for PG#0, for other PGs 1us */
369 WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
370 SKL_FUSE_PG_DIST_STATUS(pg),
371 SKL_FUSE_PG_DIST_STATUS(pg), 1));
372}
373
Imre Deakec46d482017-07-06 17:40:33 +0300374static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
375 struct i915_power_well *power_well)
Daniel Vetter9c065a72014-09-30 10:56:38 +0200376{
Imre Deak1af474f2017-07-06 17:40:34 +0300377 enum i915_power_well_id id = power_well->id;
Imre Deakb2891eb2017-07-11 23:42:35 +0300378 bool wait_fuses = power_well->hsw.has_fuses;
Chris Wilson320671f2017-10-02 11:04:16 +0100379 enum skl_power_gate uninitialized_var(pg);
Imre Deak1af474f2017-07-06 17:40:34 +0300380 u32 val;
381
Imre Deakb2891eb2017-07-11 23:42:35 +0300382 if (wait_fuses) {
383 pg = SKL_PW_TO_PG(id);
384 /*
385 * For PW1 we have to wait both for the PW0/PG0 fuse state
386 * before enabling the power well and PW1/PG1's own fuse
387 * state after the enabling. For all other power wells with
388 * fuses we only have to wait for that PW/PG's fuse state
389 * after the enabling.
390 */
391 if (pg == SKL_PG1)
392 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
393 }
394
Imre Deak9c3a16c2017-08-14 18:15:30 +0300395 val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
396 I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
Imre Deak76347c02017-07-06 17:40:36 +0300397 hsw_wait_for_power_well_enable(dev_priv, power_well);
Imre Deak001bd2c2017-07-12 18:54:13 +0300398
Lucas De Marchiddd39e42017-11-28 14:05:53 -0800399 /* Display WA #1178: cnl */
400 if (IS_CANNONLAKE(dev_priv) &&
401 (id == CNL_DISP_PW_AUX_B || id == CNL_DISP_PW_AUX_C ||
Rodrigo Vivib1ae6a82018-01-29 15:22:16 -0800402 id == CNL_DISP_PW_AUX_D || id == CNL_DISP_PW_AUX_F)) {
Lucas De Marchiddd39e42017-11-28 14:05:53 -0800403 val = I915_READ(CNL_AUX_ANAOVRD1(id));
404 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
405 I915_WRITE(CNL_AUX_ANAOVRD1(id), val);
406 }
407
Imre Deakb2891eb2017-07-11 23:42:35 +0300408 if (wait_fuses)
409 gen9_wait_for_power_well_fuses(dev_priv, pg);
410
Imre Deak001bd2c2017-07-12 18:54:13 +0300411 hsw_power_well_post_enable(dev_priv, power_well->hsw.irq_pipe_mask,
412 power_well->hsw.has_vga);
Imre Deakec46d482017-07-06 17:40:33 +0300413}
Daniel Vetter9c065a72014-09-30 10:56:38 +0200414
Imre Deakec46d482017-07-06 17:40:33 +0300415static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
416 struct i915_power_well *power_well)
417{
Imre Deak1af474f2017-07-06 17:40:34 +0300418 enum i915_power_well_id id = power_well->id;
419 u32 val;
420
Imre Deak001bd2c2017-07-12 18:54:13 +0300421 hsw_power_well_pre_disable(dev_priv, power_well->hsw.irq_pipe_mask);
422
Imre Deak9c3a16c2017-08-14 18:15:30 +0300423 val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
424 I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
425 val & ~HSW_PWR_WELL_CTL_REQ(id));
Imre Deak76347c02017-07-06 17:40:36 +0300426 hsw_wait_for_power_well_disable(dev_priv, power_well);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200427}
428
Imre Deakd42539b2017-07-06 17:40:39 +0300429/*
430 * We should only use the power well if we explicitly asked the hardware to
431 * enable it, so check if it's enabled and also check if we've requested it to
432 * be enabled.
433 */
434static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
435 struct i915_power_well *power_well)
436{
437 enum i915_power_well_id id = power_well->id;
438 u32 mask = HSW_PWR_WELL_CTL_REQ(id) | HSW_PWR_WELL_CTL_STATE(id);
439
Imre Deak9c3a16c2017-08-14 18:15:30 +0300440 return (I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & mask) == mask;
Imre Deakd42539b2017-07-06 17:40:39 +0300441}
442
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530443static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
444{
Imre Deak9c3a16c2017-08-14 18:15:30 +0300445 enum i915_power_well_id id = SKL_DISP_PW_2;
446
Imre Deakbfcdabe2016-04-01 16:02:37 +0300447 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
448 "DC9 already programmed to be enabled.\n");
449 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
450 "DC5 still not disabled to enable DC9.\n");
Imre Deak9c3a16c2017-08-14 18:15:30 +0300451 WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
452 HSW_PWR_WELL_CTL_REQ(id),
Imre Deake8a3a2a2017-06-29 18:37:00 +0300453 "Power well 2 on.\n");
Imre Deakbfcdabe2016-04-01 16:02:37 +0300454 WARN_ONCE(intel_irqs_enabled(dev_priv),
455 "Interrupts not disabled yet.\n");
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530456
457 /*
458 * TODO: check for the following to verify the conditions to enter DC9
459 * state are satisfied:
460 * 1] Check relevant display engine registers to verify if mode set
461 * disable sequence was followed.
462 * 2] Check if display uninitialize sequence is initialized.
463 */
464}
465
466static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
467{
Imre Deakbfcdabe2016-04-01 16:02:37 +0300468 WARN_ONCE(intel_irqs_enabled(dev_priv),
469 "Interrupts not disabled yet.\n");
470 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
471 "DC5 still not disabled.\n");
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530472
473 /*
474 * TODO: check for the following to verify DC9 state was indeed
475 * entered before programming to disable it:
476 * 1] Check relevant display engine registers to verify if mode
477 * set disable sequence was followed.
478 * 2] Check if display uninitialize sequence is initialized.
479 */
480}
481
Mika Kuoppala779cb5d2016-02-18 17:58:09 +0200482static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
483 u32 state)
484{
485 int rewrites = 0;
486 int rereads = 0;
487 u32 v;
488
489 I915_WRITE(DC_STATE_EN, state);
490
491 /* It has been observed that disabling the dc6 state sometimes
492 * doesn't stick and dmc keeps returning old value. Make sure
493 * the write really sticks enough times and also force rewrite until
494 * we are confident that state is exactly what we want.
495 */
496 do {
497 v = I915_READ(DC_STATE_EN);
498
499 if (v != state) {
500 I915_WRITE(DC_STATE_EN, state);
501 rewrites++;
502 rereads = 0;
503 } else if (rereads++ > 5) {
504 break;
505 }
506
507 } while (rewrites < 100);
508
509 if (v != state)
510 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
511 state, v);
512
513 /* Most of the times we need one retry, avoid spam */
514 if (rewrites > 1)
515 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
516 state, rewrites);
517}
518
Imre Deakda2f41d2016-04-20 20:27:56 +0300519static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530520{
Imre Deakda2f41d2016-04-20 20:27:56 +0300521 u32 mask;
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530522
Imre Deak13ae3a02015-11-04 19:24:16 +0200523 mask = DC_STATE_EN_UPTO_DC5;
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200524 if (IS_GEN9_LP(dev_priv))
Imre Deak13ae3a02015-11-04 19:24:16 +0200525 mask |= DC_STATE_EN_DC9;
526 else
527 mask |= DC_STATE_EN_UPTO_DC6;
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530528
Imre Deakda2f41d2016-04-20 20:27:56 +0300529 return mask;
530}
531
532void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
533{
534 u32 val;
535
536 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
537
538 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
539 dev_priv->csr.dc_state, val);
540 dev_priv->csr.dc_state = val;
541}
542
543static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
544{
545 uint32_t val;
546 uint32_t mask;
547
Imre Deaka37baf32016-02-29 22:49:03 +0200548 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
549 state &= dev_priv->csr.allowed_dc_mask;
Patrik Jakobsson443646c2015-11-16 15:01:06 +0100550
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530551 val = I915_READ(DC_STATE_EN);
Imre Deakda2f41d2016-04-20 20:27:56 +0300552 mask = gen9_dc_mask(dev_priv);
Imre Deak13ae3a02015-11-04 19:24:16 +0200553 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
554 val & mask, state);
Patrik Jakobsson832dba82016-02-18 17:21:11 +0200555
556 /* Check if DMC is ignoring our DC state requests */
557 if ((val & mask) != dev_priv->csr.dc_state)
558 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
559 dev_priv->csr.dc_state, val & mask);
560
Imre Deak13ae3a02015-11-04 19:24:16 +0200561 val &= ~mask;
562 val |= state;
Mika Kuoppala779cb5d2016-02-18 17:58:09 +0200563
564 gen9_write_dc_state(dev_priv, val);
Patrik Jakobsson832dba82016-02-18 17:21:11 +0200565
566 dev_priv->csr.dc_state = val & mask;
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530567}
568
Imre Deak13ae3a02015-11-04 19:24:16 +0200569void bxt_enable_dc9(struct drm_i915_private *dev_priv)
570{
571 assert_can_enable_dc9(dev_priv);
572
573 DRM_DEBUG_KMS("Enabling DC9\n");
574
Imre Deak78597992016-06-16 16:37:20 +0300575 intel_power_sequencer_reset(dev_priv);
Imre Deak13ae3a02015-11-04 19:24:16 +0200576 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
577}
578
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530579void bxt_disable_dc9(struct drm_i915_private *dev_priv)
580{
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530581 assert_can_disable_dc9(dev_priv);
582
583 DRM_DEBUG_KMS("Disabling DC9\n");
584
Imre Deak13ae3a02015-11-04 19:24:16 +0200585 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
Imre Deak8090ba82016-08-10 14:07:33 +0300586
587 intel_pps_unlock_regs_wa(dev_priv);
A.Sunil Kamath664326f2014-11-24 13:37:44 +0530588}
589
Daniel Vetteraf5fead2015-10-28 23:58:57 +0200590static void assert_csr_loaded(struct drm_i915_private *dev_priv)
591{
592 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
593 "CSR program storage start is NULL\n");
594 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
595 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
596}
597
Suketu Shah5aefb232015-04-16 14:22:10 +0530598static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
Suketu Shahdc174302015-04-17 19:46:16 +0530599{
Suketu Shah5aefb232015-04-16 14:22:10 +0530600 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
601 SKL_DISP_PW_2);
602
Jesse Barnes6ff8ab02015-09-10 08:20:28 -0700603 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
Suketu Shah5aefb232015-04-16 14:22:10 +0530604
Jesse Barnes6ff8ab02015-09-10 08:20:28 -0700605 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
606 "DC5 already programmed to be enabled.\n");
Imre Deakc9b88462015-12-15 20:10:34 +0200607 assert_rpm_wakelock_held(dev_priv);
Suketu Shah5aefb232015-04-16 14:22:10 +0530608
609 assert_csr_loaded(dev_priv);
610}
611
Imre Deakf62c79b2016-04-20 20:27:57 +0300612void gen9_enable_dc5(struct drm_i915_private *dev_priv)
Suketu Shah5aefb232015-04-16 14:22:10 +0530613{
Suketu Shah5aefb232015-04-16 14:22:10 +0530614 assert_can_enable_dc5(dev_priv);
A.Sunil Kamath6b457d32015-04-16 14:22:09 +0530615
616 DRM_DEBUG_KMS("Enabling DC5\n");
617
Lucas De Marchi53421c22017-12-04 15:22:10 -0800618 /* Wa Display #1183: skl,kbl,cfl */
619 if (IS_GEN9_BC(dev_priv))
620 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
621 SKL_SELECT_ALTERNATE_DC_EXIT);
622
Imre Deak13ae3a02015-11-04 19:24:16 +0200623 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
Suketu Shahdc174302015-04-17 19:46:16 +0530624}
625
Suketu Shah93c7cb62015-04-16 14:22:13 +0530626static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
Suketu Shahf75a1982015-04-16 14:22:11 +0530627{
Jesse Barnes6ff8ab02015-09-10 08:20:28 -0700628 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
629 "Backlight is not disabled.\n");
630 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
631 "DC6 already programmed to be enabled.\n");
Suketu Shah93c7cb62015-04-16 14:22:13 +0530632
633 assert_csr_loaded(dev_priv);
634}
635
Animesh Manna0a9d2be2015-09-29 11:01:59 +0530636void skl_enable_dc6(struct drm_i915_private *dev_priv)
Suketu Shah93c7cb62015-04-16 14:22:13 +0530637{
Suketu Shah93c7cb62015-04-16 14:22:13 +0530638 assert_can_enable_dc6(dev_priv);
A.Sunil Kamath74b4f372015-04-16 14:22:12 +0530639
640 DRM_DEBUG_KMS("Enabling DC6\n");
641
Imre Deak13ae3a02015-11-04 19:24:16 +0200642 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
643
Suketu Shahf75a1982015-04-16 14:22:11 +0530644}
645
Animesh Manna0a9d2be2015-09-29 11:01:59 +0530646void skl_disable_dc6(struct drm_i915_private *dev_priv)
Suketu Shahf75a1982015-04-16 14:22:11 +0530647{
A.Sunil Kamath74b4f372015-04-16 14:22:12 +0530648 DRM_DEBUG_KMS("Disabling DC6\n");
649
Lucas De Marchi53421c22017-12-04 15:22:10 -0800650 /* Wa Display #1183: skl,kbl,cfl */
651 if (IS_GEN9_BC(dev_priv))
652 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
653 SKL_SELECT_ALTERNATE_DC_EXIT);
654
Imre Deak13ae3a02015-11-04 19:24:16 +0200655 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
Suketu Shahf75a1982015-04-16 14:22:11 +0530656}
657
Daniel Vetter9c065a72014-09-30 10:56:38 +0200658static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
659 struct i915_power_well *power_well)
660{
Imre Deak1af474f2017-07-06 17:40:34 +0300661 enum i915_power_well_id id = power_well->id;
662 u32 mask = HSW_PWR_WELL_CTL_REQ(id);
Imre Deak9c3a16c2017-08-14 18:15:30 +0300663 u32 bios_req = I915_READ(HSW_PWR_WELL_CTL_BIOS(id));
Imre Deak1af474f2017-07-06 17:40:34 +0300664
Imre Deak16e84912017-02-17 17:39:45 +0200665 /* Take over the request bit if set by BIOS. */
Imre Deak1af474f2017-07-06 17:40:34 +0300666 if (bios_req & mask) {
Imre Deak9c3a16c2017-08-14 18:15:30 +0300667 u32 drv_req = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
Imre Deak1af474f2017-07-06 17:40:34 +0300668
669 if (!(drv_req & mask))
Imre Deak9c3a16c2017-08-14 18:15:30 +0300670 I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), drv_req | mask);
671 I915_WRITE(HSW_PWR_WELL_CTL_BIOS(id), bios_req & ~mask);
Imre Deak16e84912017-02-17 17:39:45 +0200672 }
Daniel Vetter9c065a72014-09-30 10:56:38 +0200673}
674
Imre Deak9c8d0b82016-06-13 16:44:34 +0300675static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
676 struct i915_power_well *power_well)
677{
Imre Deakb5565a22017-07-06 17:40:29 +0300678 bxt_ddi_phy_init(dev_priv, power_well->bxt.phy);
Imre Deak9c8d0b82016-06-13 16:44:34 +0300679}
680
681static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
682 struct i915_power_well *power_well)
683{
Imre Deakb5565a22017-07-06 17:40:29 +0300684 bxt_ddi_phy_uninit(dev_priv, power_well->bxt.phy);
Imre Deak9c8d0b82016-06-13 16:44:34 +0300685}
686
687static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
688 struct i915_power_well *power_well)
689{
Imre Deakb5565a22017-07-06 17:40:29 +0300690 return bxt_ddi_phy_is_enabled(dev_priv, power_well->bxt.phy);
Imre Deak9c8d0b82016-06-13 16:44:34 +0300691}
692
Imre Deak9c8d0b82016-06-13 16:44:34 +0300693static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
694{
695 struct i915_power_well *power_well;
696
697 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
698 if (power_well->count > 0)
Imre Deakb5565a22017-07-06 17:40:29 +0300699 bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
Imre Deak9c8d0b82016-06-13 16:44:34 +0300700
701 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
702 if (power_well->count > 0)
Imre Deakb5565a22017-07-06 17:40:29 +0300703 bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +0200704
705 if (IS_GEMINILAKE(dev_priv)) {
706 power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
707 if (power_well->count > 0)
Imre Deakb5565a22017-07-06 17:40:29 +0300708 bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +0200709 }
Imre Deak9c8d0b82016-06-13 16:44:34 +0300710}
711
Patrik Jakobsson9f836f92015-11-16 16:20:01 +0100712static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
713 struct i915_power_well *power_well)
714{
715 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
716}
717
Ville Syrjälä18a80672016-05-16 16:59:40 +0300718static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
719{
720 u32 tmp = I915_READ(DBUF_CTL);
721
722 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
723 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
724 "Unexpected DBuf power power state (0x%08x)\n", tmp);
725}
726
Patrik Jakobsson9f836f92015-11-16 16:20:01 +0100727static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
728 struct i915_power_well *power_well)
729{
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200730 struct intel_cdclk_state cdclk_state = {};
731
Imre Deak5b773eb2016-02-29 22:49:05 +0200732 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
Imre Deakadc7f042016-04-04 17:27:10 +0300733
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200734 dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
Ville Syrjälä64600bd2017-10-24 12:52:08 +0300735 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
736 WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
Ville Syrjälä342be922016-05-13 23:41:39 +0300737
Ville Syrjälä18a80672016-05-16 16:59:40 +0300738 gen9_assert_dbuf_enabled(dev_priv);
739
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200740 if (IS_GEN9_LP(dev_priv))
Imre Deak9c8d0b82016-06-13 16:44:34 +0300741 bxt_verify_ddi_phy_power_wells(dev_priv);
Patrik Jakobsson9f836f92015-11-16 16:20:01 +0100742}
743
744static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
745 struct i915_power_well *power_well)
746{
Imre Deakf74ed082016-04-18 14:48:21 +0300747 if (!dev_priv->csr.dmc_payload)
748 return;
749
Imre Deaka37baf32016-02-29 22:49:03 +0200750 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
Patrik Jakobsson9f836f92015-11-16 16:20:01 +0100751 skl_enable_dc6(dev_priv);
Imre Deaka37baf32016-02-29 22:49:03 +0200752 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
Patrik Jakobsson9f836f92015-11-16 16:20:01 +0100753 gen9_enable_dc5(dev_priv);
754}
755
Imre Deak3c1b38e2017-02-17 17:39:42 +0200756static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
757 struct i915_power_well *power_well)
Patrik Jakobsson9f836f92015-11-16 16:20:01 +0100758{
Patrik Jakobsson9f836f92015-11-16 16:20:01 +0100759}
760
Daniel Vetter9c065a72014-09-30 10:56:38 +0200761static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
762 struct i915_power_well *power_well)
763{
764}
765
766static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
767 struct i915_power_well *power_well)
768{
769 return true;
770}
771
Ville Syrjälä2ee0da12017-06-01 17:36:16 +0300772static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
773 struct i915_power_well *power_well)
774{
775 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
776 i830_enable_pipe(dev_priv, PIPE_A);
777 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
778 i830_enable_pipe(dev_priv, PIPE_B);
779}
780
781static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
782 struct i915_power_well *power_well)
783{
784 i830_disable_pipe(dev_priv, PIPE_B);
785 i830_disable_pipe(dev_priv, PIPE_A);
786}
787
788static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
789 struct i915_power_well *power_well)
790{
791 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
792 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
793}
794
795static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
796 struct i915_power_well *power_well)
797{
798 if (power_well->count > 0)
799 i830_pipes_power_well_enable(dev_priv, power_well);
800 else
801 i830_pipes_power_well_disable(dev_priv, power_well);
802}
803
Daniel Vetter9c065a72014-09-30 10:56:38 +0200804static void vlv_set_power_well(struct drm_i915_private *dev_priv,
805 struct i915_power_well *power_well, bool enable)
806{
Imre Deak438b8dc2017-07-11 23:42:30 +0300807 enum i915_power_well_id power_well_id = power_well->id;
Daniel Vetter9c065a72014-09-30 10:56:38 +0200808 u32 mask;
809 u32 state;
810 u32 ctrl;
811
812 mask = PUNIT_PWRGT_MASK(power_well_id);
813 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
814 PUNIT_PWRGT_PWR_GATE(power_well_id);
815
Sagar Arun Kamble9f817502017-10-10 22:30:05 +0100816 mutex_lock(&dev_priv->pcu_lock);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200817
818#define COND \
819 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
820
821 if (COND)
822 goto out;
823
824 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
825 ctrl &= ~mask;
826 ctrl |= state;
827 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
828
829 if (wait_for(COND, 100))
Masanari Iida7e35ab82015-05-10 01:00:23 +0900830 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
Daniel Vetter9c065a72014-09-30 10:56:38 +0200831 state,
832 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
833
834#undef COND
835
836out:
Sagar Arun Kamble9f817502017-10-10 22:30:05 +0100837 mutex_unlock(&dev_priv->pcu_lock);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200838}
839
Daniel Vetter9c065a72014-09-30 10:56:38 +0200840static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
841 struct i915_power_well *power_well)
842{
843 vlv_set_power_well(dev_priv, power_well, true);
844}
845
846static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
847 struct i915_power_well *power_well)
848{
849 vlv_set_power_well(dev_priv, power_well, false);
850}
851
852static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
853 struct i915_power_well *power_well)
854{
Imre Deak438b8dc2017-07-11 23:42:30 +0300855 enum i915_power_well_id power_well_id = power_well->id;
Daniel Vetter9c065a72014-09-30 10:56:38 +0200856 bool enabled = false;
857 u32 mask;
858 u32 state;
859 u32 ctrl;
860
861 mask = PUNIT_PWRGT_MASK(power_well_id);
862 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
863
Sagar Arun Kamble9f817502017-10-10 22:30:05 +0100864 mutex_lock(&dev_priv->pcu_lock);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200865
866 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
867 /*
868 * We only ever set the power-on and power-gate states, anything
869 * else is unexpected.
870 */
871 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
872 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
873 if (state == ctrl)
874 enabled = true;
875
876 /*
877 * A transient state at this point would mean some unexpected party
878 * is poking at the power controls too.
879 */
880 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
881 WARN_ON(ctrl != state);
882
Sagar Arun Kamble9f817502017-10-10 22:30:05 +0100883 mutex_unlock(&dev_priv->pcu_lock);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200884
885 return enabled;
886}
887
Ville Syrjälä766078d2016-04-11 16:56:30 +0300888static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
889{
Hans de Goede721d4842016-12-02 15:29:04 +0100890 u32 val;
891
892 /*
893 * On driver load, a pipe may be active and driving a DSI display.
894 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
895 * (and never recovering) in this case. intel_dsi_post_disable() will
896 * clear it when we turn off the display.
897 */
898 val = I915_READ(DSPCLK_GATE_D);
899 val &= DPOUNIT_CLOCK_GATE_DISABLE;
900 val |= VRHUNIT_CLOCK_GATE_DISABLE;
901 I915_WRITE(DSPCLK_GATE_D, val);
Ville Syrjälä766078d2016-04-11 16:56:30 +0300902
903 /*
904 * Disable trickle feed and enable pnd deadline calculation
905 */
906 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
907 I915_WRITE(CBR1_VLV, 0);
Ville Syrjälä19ab4ed2016-04-27 17:43:22 +0300908
909 WARN_ON(dev_priv->rawclk_freq == 0);
910
911 I915_WRITE(RAWCLK_FREQ_VLV,
912 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
Ville Syrjälä766078d2016-04-11 16:56:30 +0300913}
914
Ville Syrjälä2be7d542015-06-29 15:25:51 +0300915static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
Daniel Vetter9c065a72014-09-30 10:56:38 +0200916{
Lyude9504a892016-06-21 17:03:42 -0400917 struct intel_encoder *encoder;
Ville Syrjälä5a8fbb72015-06-29 15:25:53 +0300918 enum pipe pipe;
919
920 /*
921 * Enable the CRI clock source so we can get at the
922 * display and the reference clock for VGA
923 * hotplug / manual detection. Supposedly DSI also
924 * needs the ref clock up and running.
925 *
926 * CHV DPLL B/C have some issues if VGA mode is enabled.
927 */
Tvrtko Ursulin801388c2016-11-16 08:55:44 +0000928 for_each_pipe(dev_priv, pipe) {
Ville Syrjälä5a8fbb72015-06-29 15:25:53 +0300929 u32 val = I915_READ(DPLL(pipe));
930
931 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
932 if (pipe != PIPE_A)
933 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
934
935 I915_WRITE(DPLL(pipe), val);
936 }
Daniel Vetter9c065a72014-09-30 10:56:38 +0200937
Ville Syrjälä766078d2016-04-11 16:56:30 +0300938 vlv_init_display_clock_gating(dev_priv);
939
Daniel Vetter9c065a72014-09-30 10:56:38 +0200940 spin_lock_irq(&dev_priv->irq_lock);
941 valleyview_enable_display_irqs(dev_priv);
942 spin_unlock_irq(&dev_priv->irq_lock);
943
944 /*
945 * During driver initialization/resume we can avoid restoring the
946 * part of the HW/SW state that will be inited anyway explicitly.
947 */
948 if (dev_priv->power_domains.initializing)
949 return;
950
Daniel Vetterb9632912014-09-30 10:56:44 +0200951 intel_hpd_init(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200952
Lyude9504a892016-06-21 17:03:42 -0400953 /* Re-enable the ADPA, if we have one */
954 for_each_intel_encoder(&dev_priv->drm, encoder) {
955 if (encoder->type == INTEL_OUTPUT_ANALOG)
956 intel_crt_reset(&encoder->base);
957 }
958
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +0000959 i915_redisable_vga_power_on(dev_priv);
Imre Deak8090ba82016-08-10 14:07:33 +0300960
961 intel_pps_unlock_regs_wa(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200962}
963
Ville Syrjälä2be7d542015-06-29 15:25:51 +0300964static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
965{
966 spin_lock_irq(&dev_priv->irq_lock);
967 valleyview_disable_display_irqs(dev_priv);
968 spin_unlock_irq(&dev_priv->irq_lock);
969
Ville Syrjälä2230fde2016-02-19 18:41:52 +0200970 /* make sure we're done processing display irqs */
Chris Wilson91c8a322016-07-05 10:40:23 +0100971 synchronize_irq(dev_priv->drm.irq);
Ville Syrjälä2230fde2016-02-19 18:41:52 +0200972
Imre Deak78597992016-06-16 16:37:20 +0300973 intel_power_sequencer_reset(dev_priv);
Lyude19625e82016-06-21 17:03:44 -0400974
Lyudeb64b5402016-10-26 12:36:09 -0400975 /* Prevent us from re-enabling polling on accident in late suspend */
976 if (!dev_priv->drm.dev->power.is_suspended)
977 intel_hpd_poll_init(dev_priv);
Ville Syrjälä2be7d542015-06-29 15:25:51 +0300978}
979
980static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
981 struct i915_power_well *power_well)
982{
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +0300983 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
Ville Syrjälä2be7d542015-06-29 15:25:51 +0300984
985 vlv_set_power_well(dev_priv, power_well, true);
986
987 vlv_display_power_well_init(dev_priv);
988}
989
Daniel Vetter9c065a72014-09-30 10:56:38 +0200990static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
991 struct i915_power_well *power_well)
992{
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +0300993 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200994
Ville Syrjälä2be7d542015-06-29 15:25:51 +0300995 vlv_display_power_well_deinit(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200996
997 vlv_set_power_well(dev_priv, power_well, false);
Daniel Vetter9c065a72014-09-30 10:56:38 +0200998}
999
1000static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1001 struct i915_power_well *power_well)
1002{
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001003 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001004
Ville Syrjälä5a8fbb72015-06-29 15:25:53 +03001005 /* since ref/cri clock was enabled */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001006 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1007
1008 vlv_set_power_well(dev_priv, power_well, true);
1009
1010 /*
1011 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1012 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1013 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1014 * b. The other bits such as sfr settings / modesel may all
1015 * be set to 0.
1016 *
1017 * This should only be done on init and resume from S3 with
1018 * both PLLs disabled, or we risk losing DPIO and PLL
1019 * synchronization.
1020 */
1021 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1022}
1023
1024static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1025 struct i915_power_well *power_well)
1026{
1027 enum pipe pipe;
1028
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001029 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001030
1031 for_each_pipe(dev_priv, pipe)
1032 assert_pll_disabled(dev_priv, pipe);
1033
1034 /* Assert common reset */
1035 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1036
1037 vlv_set_power_well(dev_priv, power_well, false);
1038}
1039
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001040#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
Ville Syrjälä30142272015-07-08 23:46:01 +03001041
Imre Deak438b8dc2017-07-11 23:42:30 +03001042static struct i915_power_well *
1043lookup_power_well(struct drm_i915_private *dev_priv,
1044 enum i915_power_well_id power_well_id)
Ville Syrjälä30142272015-07-08 23:46:01 +03001045{
1046 struct i915_power_domains *power_domains = &dev_priv->power_domains;
Ville Syrjälä30142272015-07-08 23:46:01 +03001047 int i;
1048
Imre Deakfc17f222015-11-04 19:24:11 +02001049 for (i = 0; i < power_domains->power_well_count; i++) {
1050 struct i915_power_well *power_well;
1051
1052 power_well = &power_domains->power_wells[i];
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001053 if (power_well->id == power_well_id)
Ville Syrjälä30142272015-07-08 23:46:01 +03001054 return power_well;
1055 }
1056
1057 return NULL;
1058}
1059
1060#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1061
1062static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1063{
1064 struct i915_power_well *cmn_bc =
1065 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1066 struct i915_power_well *cmn_d =
1067 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1068 u32 phy_control = dev_priv->chv_phy_control;
1069 u32 phy_status = 0;
Ville Syrjälä3be60de2015-09-08 18:05:45 +03001070 u32 phy_status_mask = 0xffffffff;
Ville Syrjälä30142272015-07-08 23:46:01 +03001071
Ville Syrjälä3be60de2015-09-08 18:05:45 +03001072 /*
1073 * The BIOS can leave the PHY is some weird state
1074 * where it doesn't fully power down some parts.
1075 * Disable the asserts until the PHY has been fully
1076 * reset (ie. the power well has been disabled at
1077 * least once).
1078 */
1079 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1080 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1081 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1082 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1083 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1084 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1085 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1086
1087 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1088 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1089 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1090 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1091
Ville Syrjälä30142272015-07-08 23:46:01 +03001092 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1093 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1094
1095 /* this assumes override is only used to enable lanes */
1096 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1097 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1098
1099 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1100 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1101
1102 /* CL1 is on whenever anything is on in either channel */
1103 if (BITS_SET(phy_control,
1104 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1105 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1106 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1107
1108 /*
1109 * The DPLLB check accounts for the pipe B + port A usage
1110 * with CL2 powered up but all the lanes in the second channel
1111 * powered down.
1112 */
1113 if (BITS_SET(phy_control,
1114 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1115 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1116 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1117
1118 if (BITS_SET(phy_control,
1119 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1120 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1121 if (BITS_SET(phy_control,
1122 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1123 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1124
1125 if (BITS_SET(phy_control,
1126 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1127 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1128 if (BITS_SET(phy_control,
1129 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1130 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1131 }
1132
1133 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1134 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1135
1136 /* this assumes override is only used to enable lanes */
1137 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1138 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1139
1140 if (BITS_SET(phy_control,
1141 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1142 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1143
1144 if (BITS_SET(phy_control,
1145 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1146 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1147 if (BITS_SET(phy_control,
1148 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1149 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1150 }
1151
Ville Syrjälä3be60de2015-09-08 18:05:45 +03001152 phy_status &= phy_status_mask;
1153
Ville Syrjälä30142272015-07-08 23:46:01 +03001154 /*
1155 * The PHY may be busy with some initial calibration and whatnot,
1156 * so the power state can take a while to actually change.
1157 */
Chris Wilson919fcd52016-06-30 15:33:35 +01001158 if (intel_wait_for_register(dev_priv,
1159 DISPLAY_PHY_STATUS,
1160 phy_status_mask,
1161 phy_status,
1162 10))
1163 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1164 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1165 phy_status, dev_priv->chv_phy_control);
Ville Syrjälä30142272015-07-08 23:46:01 +03001166}
1167
1168#undef BITS_SET
1169
Daniel Vetter9c065a72014-09-30 10:56:38 +02001170static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1171 struct i915_power_well *power_well)
1172{
1173 enum dpio_phy phy;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001174 enum pipe pipe;
1175 uint32_t tmp;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001176
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001177 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1178 power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001179
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001180 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001181 pipe = PIPE_A;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001182 phy = DPIO_PHY0;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001183 } else {
1184 pipe = PIPE_C;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001185 phy = DPIO_PHY1;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001186 }
Ville Syrjälä5a8fbb72015-06-29 15:25:53 +03001187
1188 /* since ref/cri clock was enabled */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001189 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1190 vlv_set_power_well(dev_priv, power_well, true);
1191
1192 /* Poll for phypwrgood signal */
Chris Wilsonffebb832016-06-30 15:33:36 +01001193 if (intel_wait_for_register(dev_priv,
1194 DISPLAY_PHY_STATUS,
1195 PHY_POWERGOOD(phy),
1196 PHY_POWERGOOD(phy),
1197 1))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001198 DRM_ERROR("Display PHY %d is not power up\n", phy);
1199
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001200 mutex_lock(&dev_priv->sb_lock);
1201
1202 /* Enable dynamic power down */
1203 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
Ville Syrjäläee279212015-07-08 23:45:57 +03001204 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1205 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001206 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1207
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001208 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001209 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1210 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1211 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
Ville Syrjälä3e288782015-07-08 23:45:58 +03001212 } else {
1213 /*
1214 * Force the non-existing CL2 off. BXT does this
1215 * too, so maybe it saves some power even though
1216 * CL2 doesn't exist?
1217 */
1218 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1219 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1220 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001221 }
1222
1223 mutex_unlock(&dev_priv->sb_lock);
1224
Ville Syrjälä70722462015-04-10 18:21:28 +03001225 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1226 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001227
1228 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1229 phy, dev_priv->chv_phy_control);
Ville Syrjälä30142272015-07-08 23:46:01 +03001230
1231 assert_chv_phy_status(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001232}
1233
1234static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1235 struct i915_power_well *power_well)
1236{
1237 enum dpio_phy phy;
1238
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001239 WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1240 power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001241
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03001242 if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
Daniel Vetter9c065a72014-09-30 10:56:38 +02001243 phy = DPIO_PHY0;
1244 assert_pll_disabled(dev_priv, PIPE_A);
1245 assert_pll_disabled(dev_priv, PIPE_B);
1246 } else {
1247 phy = DPIO_PHY1;
1248 assert_pll_disabled(dev_priv, PIPE_C);
1249 }
1250
Ville Syrjälä70722462015-04-10 18:21:28 +03001251 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1252 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001253
1254 vlv_set_power_well(dev_priv, power_well, false);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001255
1256 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1257 phy, dev_priv->chv_phy_control);
Ville Syrjälä30142272015-07-08 23:46:01 +03001258
Ville Syrjälä3be60de2015-09-08 18:05:45 +03001259 /* PHY is fully reset now, so we can enable the PHY state asserts */
1260 dev_priv->chv_phy_assert[phy] = true;
1261
Ville Syrjälä30142272015-07-08 23:46:01 +03001262 assert_chv_phy_status(dev_priv);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001263}
1264
Ville Syrjälä6669e392015-07-08 23:46:00 +03001265static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1266 enum dpio_channel ch, bool override, unsigned int mask)
1267{
1268 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1269 u32 reg, val, expected, actual;
1270
Ville Syrjälä3be60de2015-09-08 18:05:45 +03001271 /*
1272 * The BIOS can leave the PHY is some weird state
1273 * where it doesn't fully power down some parts.
1274 * Disable the asserts until the PHY has been fully
1275 * reset (ie. the power well has been disabled at
1276 * least once).
1277 */
1278 if (!dev_priv->chv_phy_assert[phy])
1279 return;
1280
Ville Syrjälä6669e392015-07-08 23:46:00 +03001281 if (ch == DPIO_CH0)
1282 reg = _CHV_CMN_DW0_CH0;
1283 else
1284 reg = _CHV_CMN_DW6_CH1;
1285
1286 mutex_lock(&dev_priv->sb_lock);
1287 val = vlv_dpio_read(dev_priv, pipe, reg);
1288 mutex_unlock(&dev_priv->sb_lock);
1289
1290 /*
1291 * This assumes !override is only used when the port is disabled.
1292 * All lanes should power down even without the override when
1293 * the port is disabled.
1294 */
1295 if (!override || mask == 0xf) {
1296 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1297 /*
1298 * If CH1 common lane is not active anymore
1299 * (eg. for pipe B DPLL) the entire channel will
1300 * shut down, which causes the common lane registers
1301 * to read as 0. That means we can't actually check
1302 * the lane power down status bits, but as the entire
1303 * register reads as 0 it's a good indication that the
1304 * channel is indeed entirely powered down.
1305 */
1306 if (ch == DPIO_CH1 && val == 0)
1307 expected = 0;
1308 } else if (mask != 0x0) {
1309 expected = DPIO_ANYDL_POWERDOWN;
1310 } else {
1311 expected = 0;
1312 }
1313
1314 if (ch == DPIO_CH0)
1315 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1316 else
1317 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1318 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1319
1320 WARN(actual != expected,
1321 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1322 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1323 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1324 reg, val);
1325}
1326
Ville Syrjäläb0b33842015-07-08 23:45:55 +03001327bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1328 enum dpio_channel ch, bool override)
1329{
1330 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1331 bool was_override;
1332
1333 mutex_lock(&power_domains->lock);
1334
1335 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1336
1337 if (override == was_override)
1338 goto out;
1339
1340 if (override)
1341 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1342 else
1343 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1344
1345 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1346
1347 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1348 phy, ch, dev_priv->chv_phy_control);
1349
Ville Syrjälä30142272015-07-08 23:46:01 +03001350 assert_chv_phy_status(dev_priv);
1351
Ville Syrjäläb0b33842015-07-08 23:45:55 +03001352out:
1353 mutex_unlock(&power_domains->lock);
1354
1355 return was_override;
1356}
1357
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001358void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1359 bool override, unsigned int mask)
1360{
1361 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1362 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1363 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1364 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1365
1366 mutex_lock(&power_domains->lock);
1367
1368 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1369 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1370
1371 if (override)
1372 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1373 else
1374 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1375
1376 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1377
1378 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1379 phy, ch, mask, dev_priv->chv_phy_control);
1380
Ville Syrjälä30142272015-07-08 23:46:01 +03001381 assert_chv_phy_status(dev_priv);
1382
Ville Syrjälä6669e392015-07-08 23:46:00 +03001383 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1384
Ville Syrjäläe0fce782015-07-08 23:45:54 +03001385 mutex_unlock(&power_domains->lock);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001386}
1387
1388static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1389 struct i915_power_well *power_well)
1390{
Imre Deakf49193c2017-07-06 17:40:23 +03001391 enum pipe pipe = PIPE_A;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001392 bool enabled;
1393 u32 state, ctrl;
1394
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001395 mutex_lock(&dev_priv->pcu_lock);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001396
1397 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1398 /*
1399 * We only ever set the power-on and power-gate states, anything
1400 * else is unexpected.
1401 */
1402 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1403 enabled = state == DP_SSS_PWR_ON(pipe);
1404
1405 /*
1406 * A transient state at this point would mean some unexpected party
1407 * is poking at the power controls too.
1408 */
1409 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1410 WARN_ON(ctrl << 16 != state);
1411
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001412 mutex_unlock(&dev_priv->pcu_lock);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001413
1414 return enabled;
1415}
1416
1417static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1418 struct i915_power_well *power_well,
1419 bool enable)
1420{
Imre Deakf49193c2017-07-06 17:40:23 +03001421 enum pipe pipe = PIPE_A;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001422 u32 state;
1423 u32 ctrl;
1424
1425 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1426
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001427 mutex_lock(&dev_priv->pcu_lock);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001428
1429#define COND \
1430 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1431
1432 if (COND)
1433 goto out;
1434
1435 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1436 ctrl &= ~DP_SSC_MASK(pipe);
1437 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1438 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1439
1440 if (wait_for(COND, 100))
Masanari Iida7e35ab82015-05-10 01:00:23 +09001441 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
Daniel Vetter9c065a72014-09-30 10:56:38 +02001442 state,
1443 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1444
1445#undef COND
1446
1447out:
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001448 mutex_unlock(&dev_priv->pcu_lock);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001449}
1450
Daniel Vetter9c065a72014-09-30 10:56:38 +02001451static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1452 struct i915_power_well *power_well)
1453{
Imre Deakf49193c2017-07-06 17:40:23 +03001454 WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001455
1456 chv_set_pipe_power_well(dev_priv, power_well, true);
Ville Syrjäläafd62752014-10-30 19:43:03 +02001457
Ville Syrjälä2be7d542015-06-29 15:25:51 +03001458 vlv_display_power_well_init(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001459}
1460
1461static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1462 struct i915_power_well *power_well)
1463{
Imre Deakf49193c2017-07-06 17:40:23 +03001464 WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001465
Ville Syrjälä2be7d542015-06-29 15:25:51 +03001466 vlv_display_power_well_deinit(dev_priv);
Ville Syrjäläafd62752014-10-30 19:43:03 +02001467
Daniel Vetter9c065a72014-09-30 10:56:38 +02001468 chv_set_pipe_power_well(dev_priv, power_well, false);
1469}
1470
Imre Deak09731282016-02-17 14:17:42 +02001471static void
1472__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1473 enum intel_display_power_domain domain)
1474{
1475 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1476 struct i915_power_well *power_well;
Imre Deak09731282016-02-17 14:17:42 +02001477
Imre Deak75ccb2e2017-02-17 17:39:43 +02001478 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
Imre Deakb409ca92016-06-13 16:44:33 +03001479 intel_power_well_get(dev_priv, power_well);
Imre Deak09731282016-02-17 14:17:42 +02001480
1481 power_domains->domain_use_count[domain]++;
1482}
1483
Daniel Vettere4e76842014-09-30 10:56:42 +02001484/**
1485 * intel_display_power_get - grab a power domain reference
1486 * @dev_priv: i915 device instance
1487 * @domain: power domain to reference
1488 *
1489 * This function grabs a power domain reference for @domain and ensures that the
1490 * power domain and all its parents are powered up. Therefore users should only
1491 * grab a reference to the innermost power domain they need.
1492 *
1493 * Any power domain reference obtained by this function must have a symmetric
1494 * call to intel_display_power_put() to release the reference again.
1495 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001496void intel_display_power_get(struct drm_i915_private *dev_priv,
1497 enum intel_display_power_domain domain)
1498{
Imre Deak09731282016-02-17 14:17:42 +02001499 struct i915_power_domains *power_domains = &dev_priv->power_domains;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001500
1501 intel_runtime_pm_get(dev_priv);
1502
Imre Deak09731282016-02-17 14:17:42 +02001503 mutex_lock(&power_domains->lock);
1504
1505 __intel_display_power_get_domain(dev_priv, domain);
1506
1507 mutex_unlock(&power_domains->lock);
1508}
1509
1510/**
1511 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1512 * @dev_priv: i915 device instance
1513 * @domain: power domain to reference
1514 *
1515 * This function grabs a power domain reference for @domain and ensures that the
1516 * power domain and all its parents are powered up. Therefore users should only
1517 * grab a reference to the innermost power domain they need.
1518 *
1519 * Any power domain reference obtained by this function must have a symmetric
1520 * call to intel_display_power_put() to release the reference again.
1521 */
1522bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1523 enum intel_display_power_domain domain)
1524{
1525 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1526 bool is_enabled;
1527
1528 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1529 return false;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001530
1531 mutex_lock(&power_domains->lock);
1532
Imre Deak09731282016-02-17 14:17:42 +02001533 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1534 __intel_display_power_get_domain(dev_priv, domain);
1535 is_enabled = true;
1536 } else {
1537 is_enabled = false;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001538 }
1539
Daniel Vetter9c065a72014-09-30 10:56:38 +02001540 mutex_unlock(&power_domains->lock);
Imre Deak09731282016-02-17 14:17:42 +02001541
1542 if (!is_enabled)
1543 intel_runtime_pm_put(dev_priv);
1544
1545 return is_enabled;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001546}
1547
Daniel Vettere4e76842014-09-30 10:56:42 +02001548/**
1549 * intel_display_power_put - release a power domain reference
1550 * @dev_priv: i915 device instance
1551 * @domain: power domain to reference
1552 *
1553 * This function drops the power domain reference obtained by
1554 * intel_display_power_get() and might power down the corresponding hardware
1555 * block right away if this is the last reference.
1556 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02001557void intel_display_power_put(struct drm_i915_private *dev_priv,
1558 enum intel_display_power_domain domain)
1559{
1560 struct i915_power_domains *power_domains;
1561 struct i915_power_well *power_well;
Daniel Vetter9c065a72014-09-30 10:56:38 +02001562
1563 power_domains = &dev_priv->power_domains;
1564
1565 mutex_lock(&power_domains->lock);
1566
Daniel Stone11c86db2015-11-20 15:55:34 +00001567 WARN(!power_domains->domain_use_count[domain],
1568 "Use count on domain %s is already zero\n",
1569 intel_display_power_domain_str(domain));
Daniel Vetter9c065a72014-09-30 10:56:38 +02001570 power_domains->domain_use_count[domain]--;
1571
Imre Deak75ccb2e2017-02-17 17:39:43 +02001572 for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
Imre Deakb409ca92016-06-13 16:44:33 +03001573 intel_power_well_put(dev_priv, power_well);
Daniel Vetter9c065a72014-09-30 10:56:38 +02001574
1575 mutex_unlock(&power_domains->lock);
1576
1577 intel_runtime_pm_put(dev_priv);
1578}
1579
Imre Deak965a79a2017-07-06 17:40:40 +03001580#define I830_PIPES_POWER_DOMAINS ( \
1581 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1582 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1583 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1584 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1585 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1586 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001587 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001588
Ville Syrjälä465ac0c2016-04-18 14:02:27 +03001589#define VLV_DISPLAY_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001590 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1591 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1592 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1593 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1594 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1595 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1596 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1597 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1598 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
1599 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1600 BIT_ULL(POWER_DOMAIN_VGA) | \
1601 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1602 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1603 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1604 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1605 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001606
1607#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001608 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1609 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1610 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
1611 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1612 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1613 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001614
1615#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001616 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1617 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1618 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001619
1620#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001621 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1622 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1623 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001624
1625#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001626 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1627 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1628 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001629
1630#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001631 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1632 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1633 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001634
Ville Syrjälä465ac0c2016-04-18 14:02:27 +03001635#define CHV_DISPLAY_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001636 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
1637 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1638 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1639 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1640 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1641 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1642 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1643 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1644 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1645 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1646 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1647 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1648 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
1649 BIT_ULL(POWER_DOMAIN_VGA) | \
1650 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1651 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1652 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1653 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1654 BIT_ULL(POWER_DOMAIN_GMBUS) | \
1655 BIT_ULL(POWER_DOMAIN_INIT))
Ville Syrjälä465ac0c2016-04-18 14:02:27 +03001656
Daniel Vetter9c065a72014-09-30 10:56:38 +02001657#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001658 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1659 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1660 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1661 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1662 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001663
1664#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02001665 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1666 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1667 BIT_ULL(POWER_DOMAIN_INIT))
Daniel Vetter9c065a72014-09-30 10:56:38 +02001668
Imre Deak965a79a2017-07-06 17:40:40 +03001669#define HSW_DISPLAY_POWER_DOMAINS ( \
1670 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1671 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1672 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1673 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1674 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1675 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1676 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1677 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1678 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1679 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1680 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1681 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
1682 BIT_ULL(POWER_DOMAIN_VGA) | \
1683 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1684 BIT_ULL(POWER_DOMAIN_INIT))
1685
1686#define BDW_DISPLAY_POWER_DOMAINS ( \
1687 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1688 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1689 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1690 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1691 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1692 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1693 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1694 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1695 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1696 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1697 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
1698 BIT_ULL(POWER_DOMAIN_VGA) | \
1699 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1700 BIT_ULL(POWER_DOMAIN_INIT))
1701
1702#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
1703 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1704 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1705 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1706 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1707 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1708 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1709 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1710 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1711 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1712 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
1713 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
1714 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1715 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1716 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1717 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1718 BIT_ULL(POWER_DOMAIN_VGA) | \
1719 BIT_ULL(POWER_DOMAIN_INIT))
1720#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
1721 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
1722 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
1723 BIT_ULL(POWER_DOMAIN_INIT))
1724#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
1725 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
1726 BIT_ULL(POWER_DOMAIN_INIT))
1727#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
1728 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
1729 BIT_ULL(POWER_DOMAIN_INIT))
1730#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
1731 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
1732 BIT_ULL(POWER_DOMAIN_INIT))
1733#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
1734 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
Tvrtko Ursulinb6876372017-12-05 13:28:54 +00001735 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
Imre Deak965a79a2017-07-06 17:40:40 +03001736 BIT_ULL(POWER_DOMAIN_MODESET) | \
1737 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1738 BIT_ULL(POWER_DOMAIN_INIT))
1739
1740#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
1741 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1742 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1743 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1744 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1745 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1746 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1747 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1748 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1749 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1750 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1751 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1752 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1753 BIT_ULL(POWER_DOMAIN_VGA) | \
Imre Deak965a79a2017-07-06 17:40:40 +03001754 BIT_ULL(POWER_DOMAIN_INIT))
1755#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
1756 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
Tvrtko Ursulinb6876372017-12-05 13:28:54 +00001757 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
Imre Deak965a79a2017-07-06 17:40:40 +03001758 BIT_ULL(POWER_DOMAIN_MODESET) | \
1759 BIT_ULL(POWER_DOMAIN_AUX_A) | \
Ville Syrjälä54c105d2017-12-08 23:37:37 +02001760 BIT_ULL(POWER_DOMAIN_GMBUS) | \
Imre Deak965a79a2017-07-06 17:40:40 +03001761 BIT_ULL(POWER_DOMAIN_INIT))
1762#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
1763 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
1764 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1765 BIT_ULL(POWER_DOMAIN_INIT))
1766#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
1767 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1768 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1769 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1770 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1771 BIT_ULL(POWER_DOMAIN_INIT))
1772
1773#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
1774 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1775 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1776 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1777 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1778 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1779 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1780 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1781 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1782 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1783 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1784 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1785 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1786 BIT_ULL(POWER_DOMAIN_VGA) | \
1787 BIT_ULL(POWER_DOMAIN_INIT))
1788#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
1789 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
1790#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
1791 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
1792#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
1793 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
1794#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
1795 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
1796 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1797 BIT_ULL(POWER_DOMAIN_INIT))
1798#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
1799 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1800 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1801 BIT_ULL(POWER_DOMAIN_INIT))
1802#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
1803 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1804 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1805 BIT_ULL(POWER_DOMAIN_INIT))
1806#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
1807 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1808 BIT_ULL(POWER_DOMAIN_INIT))
1809#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
1810 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1811 BIT_ULL(POWER_DOMAIN_INIT))
1812#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
1813 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1814 BIT_ULL(POWER_DOMAIN_INIT))
1815#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
1816 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
Tvrtko Ursulinb6876372017-12-05 13:28:54 +00001817 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
Imre Deak965a79a2017-07-06 17:40:40 +03001818 BIT_ULL(POWER_DOMAIN_MODESET) | \
1819 BIT_ULL(POWER_DOMAIN_AUX_A) | \
Ville Syrjälä156961a2017-12-08 23:37:36 +02001820 BIT_ULL(POWER_DOMAIN_GMBUS) | \
Imre Deak965a79a2017-07-06 17:40:40 +03001821 BIT_ULL(POWER_DOMAIN_INIT))
1822
1823#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
1824 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
1825 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
1826 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
1827 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
1828 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
1829 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1830 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1831 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
1832 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
1833 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
Rodrigo Vivi9787e832018-01-29 15:22:22 -08001834 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
Imre Deak965a79a2017-07-06 17:40:40 +03001835 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1836 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1837 BIT_ULL(POWER_DOMAIN_AUX_D) | \
Rodrigo Vivia324fca2018-01-29 15:22:15 -08001838 BIT_ULL(POWER_DOMAIN_AUX_F) | \
Imre Deak965a79a2017-07-06 17:40:40 +03001839 BIT_ULL(POWER_DOMAIN_AUDIO) | \
1840 BIT_ULL(POWER_DOMAIN_VGA) | \
1841 BIT_ULL(POWER_DOMAIN_INIT))
1842#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
1843 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
Imre Deak965a79a2017-07-06 17:40:40 +03001844 BIT_ULL(POWER_DOMAIN_INIT))
1845#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
1846 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
1847 BIT_ULL(POWER_DOMAIN_INIT))
1848#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
1849 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
1850 BIT_ULL(POWER_DOMAIN_INIT))
1851#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
1852 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
1853 BIT_ULL(POWER_DOMAIN_INIT))
1854#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
1855 BIT_ULL(POWER_DOMAIN_AUX_A) | \
1856 BIT_ULL(POWER_DOMAIN_INIT))
1857#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
1858 BIT_ULL(POWER_DOMAIN_AUX_B) | \
1859 BIT_ULL(POWER_DOMAIN_INIT))
1860#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
1861 BIT_ULL(POWER_DOMAIN_AUX_C) | \
1862 BIT_ULL(POWER_DOMAIN_INIT))
1863#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
1864 BIT_ULL(POWER_DOMAIN_AUX_D) | \
1865 BIT_ULL(POWER_DOMAIN_INIT))
Rodrigo Vivia324fca2018-01-29 15:22:15 -08001866#define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
1867 BIT_ULL(POWER_DOMAIN_AUX_F) | \
1868 BIT_ULL(POWER_DOMAIN_INIT))
Rodrigo Vivi9787e832018-01-29 15:22:22 -08001869#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
1870 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
1871 BIT_ULL(POWER_DOMAIN_INIT))
Imre Deak965a79a2017-07-06 17:40:40 +03001872#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
1873 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
Tvrtko Ursulin6e7a3f52018-01-11 08:24:17 +00001874 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
Imre Deak965a79a2017-07-06 17:40:40 +03001875 BIT_ULL(POWER_DOMAIN_MODESET) | \
1876 BIT_ULL(POWER_DOMAIN_AUX_A) | \
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03001877 BIT_ULL(POWER_DOMAIN_INIT))
1878
Daniel Vetter9c065a72014-09-30 10:56:38 +02001879static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02001880 .sync_hw = i9xx_power_well_sync_hw_noop,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001881 .enable = i9xx_always_on_power_well_noop,
1882 .disable = i9xx_always_on_power_well_noop,
1883 .is_enabled = i9xx_always_on_power_well_enabled,
1884};
1885
1886static const struct i915_power_well_ops chv_pipe_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02001887 .sync_hw = i9xx_power_well_sync_hw_noop,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001888 .enable = chv_pipe_power_well_enable,
1889 .disable = chv_pipe_power_well_disable,
1890 .is_enabled = chv_pipe_power_well_enabled,
1891};
1892
1893static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02001894 .sync_hw = i9xx_power_well_sync_hw_noop,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001895 .enable = chv_dpio_cmn_power_well_enable,
1896 .disable = chv_dpio_cmn_power_well_disable,
1897 .is_enabled = vlv_power_well_enabled,
1898};
1899
1900static struct i915_power_well i9xx_always_on_power_well[] = {
1901 {
1902 .name = "always-on",
1903 .always_on = 1,
1904 .domains = POWER_DOMAIN_MASK,
1905 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03001906 .id = I915_DISP_PW_ALWAYS_ON,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001907 },
1908};
1909
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03001910static const struct i915_power_well_ops i830_pipes_power_well_ops = {
1911 .sync_hw = i830_pipes_power_well_sync_hw,
1912 .enable = i830_pipes_power_well_enable,
1913 .disable = i830_pipes_power_well_disable,
1914 .is_enabled = i830_pipes_power_well_enabled,
1915};
1916
1917static struct i915_power_well i830_power_wells[] = {
1918 {
1919 .name = "always-on",
1920 .always_on = 1,
1921 .domains = POWER_DOMAIN_MASK,
1922 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03001923 .id = I915_DISP_PW_ALWAYS_ON,
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03001924 },
1925 {
1926 .name = "pipes",
1927 .domains = I830_PIPES_POWER_DOMAINS,
1928 .ops = &i830_pipes_power_well_ops,
Imre Deak120b56a2017-07-11 23:42:31 +03001929 .id = I830_DISP_PW_PIPES,
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03001930 },
1931};
1932
Daniel Vetter9c065a72014-09-30 10:56:38 +02001933static const struct i915_power_well_ops hsw_power_well_ops = {
1934 .sync_hw = hsw_power_well_sync_hw,
1935 .enable = hsw_power_well_enable,
1936 .disable = hsw_power_well_disable,
1937 .is_enabled = hsw_power_well_enabled,
1938};
1939
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01001940static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02001941 .sync_hw = i9xx_power_well_sync_hw_noop,
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01001942 .enable = gen9_dc_off_power_well_enable,
1943 .disable = gen9_dc_off_power_well_disable,
1944 .is_enabled = gen9_dc_off_power_well_enabled,
1945};
1946
Imre Deak9c8d0b82016-06-13 16:44:34 +03001947static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02001948 .sync_hw = i9xx_power_well_sync_hw_noop,
Imre Deak9c8d0b82016-06-13 16:44:34 +03001949 .enable = bxt_dpio_cmn_power_well_enable,
1950 .disable = bxt_dpio_cmn_power_well_disable,
1951 .is_enabled = bxt_dpio_cmn_power_well_enabled,
1952};
1953
Daniel Vetter9c065a72014-09-30 10:56:38 +02001954static struct i915_power_well hsw_power_wells[] = {
1955 {
1956 .name = "always-on",
1957 .always_on = 1,
Ville Syrjälä998bd662016-04-18 14:02:26 +03001958 .domains = POWER_DOMAIN_MASK,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001959 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03001960 .id = I915_DISP_PW_ALWAYS_ON,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001961 },
1962 {
1963 .name = "display",
1964 .domains = HSW_DISPLAY_POWER_DOMAINS,
1965 .ops = &hsw_power_well_ops,
Imre Deakfb9248e2017-07-11 23:42:32 +03001966 .id = HSW_DISP_PW_GLOBAL,
Imre Deak0a445942017-08-14 18:15:29 +03001967 {
1968 .hsw.has_vga = true,
1969 },
Daniel Vetter9c065a72014-09-30 10:56:38 +02001970 },
1971};
1972
1973static struct i915_power_well bdw_power_wells[] = {
1974 {
1975 .name = "always-on",
1976 .always_on = 1,
Ville Syrjälä998bd662016-04-18 14:02:26 +03001977 .domains = POWER_DOMAIN_MASK,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001978 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03001979 .id = I915_DISP_PW_ALWAYS_ON,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001980 },
1981 {
1982 .name = "display",
1983 .domains = BDW_DISPLAY_POWER_DOMAINS,
1984 .ops = &hsw_power_well_ops,
Imre Deakfb9248e2017-07-11 23:42:32 +03001985 .id = HSW_DISP_PW_GLOBAL,
Imre Deak0a445942017-08-14 18:15:29 +03001986 {
1987 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
1988 .hsw.has_vga = true,
1989 },
Daniel Vetter9c065a72014-09-30 10:56:38 +02001990 },
1991};
1992
1993static const struct i915_power_well_ops vlv_display_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02001994 .sync_hw = i9xx_power_well_sync_hw_noop,
Daniel Vetter9c065a72014-09-30 10:56:38 +02001995 .enable = vlv_display_power_well_enable,
1996 .disable = vlv_display_power_well_disable,
1997 .is_enabled = vlv_power_well_enabled,
1998};
1999
2000static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02002001 .sync_hw = i9xx_power_well_sync_hw_noop,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002002 .enable = vlv_dpio_cmn_power_well_enable,
2003 .disable = vlv_dpio_cmn_power_well_disable,
2004 .is_enabled = vlv_power_well_enabled,
2005};
2006
2007static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
Imre Deak3c1b38e2017-02-17 17:39:42 +02002008 .sync_hw = i9xx_power_well_sync_hw_noop,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002009 .enable = vlv_power_well_enable,
2010 .disable = vlv_power_well_disable,
2011 .is_enabled = vlv_power_well_enabled,
2012};
2013
2014static struct i915_power_well vlv_power_wells[] = {
2015 {
2016 .name = "always-on",
2017 .always_on = 1,
Ville Syrjälä998bd662016-04-18 14:02:26 +03002018 .domains = POWER_DOMAIN_MASK,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002019 .ops = &i9xx_always_on_power_well_ops,
Imre Deak438b8dc2017-07-11 23:42:30 +03002020 .id = I915_DISP_PW_ALWAYS_ON,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002021 },
2022 {
2023 .name = "display",
2024 .domains = VLV_DISPLAY_POWER_DOMAINS,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002025 .id = PUNIT_POWER_WELL_DISP2D,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002026 .ops = &vlv_display_power_well_ops,
2027 },
2028 {
2029 .name = "dpio-tx-b-01",
2030 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2031 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2032 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2033 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2034 .ops = &vlv_dpio_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002035 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002036 },
2037 {
2038 .name = "dpio-tx-b-23",
2039 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2040 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2041 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2042 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2043 .ops = &vlv_dpio_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002044 .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002045 },
2046 {
2047 .name = "dpio-tx-c-01",
2048 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2049 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2050 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2051 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2052 .ops = &vlv_dpio_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002053 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002054 },
2055 {
2056 .name = "dpio-tx-c-23",
2057 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2058 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2059 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2060 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2061 .ops = &vlv_dpio_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002062 .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002063 },
2064 {
2065 .name = "dpio-common",
2066 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002067 .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002068 .ops = &vlv_dpio_cmn_power_well_ops,
2069 },
2070};
2071
2072static struct i915_power_well chv_power_wells[] = {
2073 {
2074 .name = "always-on",
2075 .always_on = 1,
Ville Syrjälä998bd662016-04-18 14:02:26 +03002076 .domains = POWER_DOMAIN_MASK,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002077 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03002078 .id = I915_DISP_PW_ALWAYS_ON,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002079 },
Daniel Vetter9c065a72014-09-30 10:56:38 +02002080 {
2081 .name = "display",
Ville Syrjäläbaa4e572014-10-27 16:07:32 +02002082 /*
Ville Syrjäläfde61e42015-05-26 20:22:39 +03002083 * Pipe A power well is the new disp2d well. Pipe B and C
2084 * power wells don't actually exist. Pipe A power well is
2085 * required for any pipe to work.
Ville Syrjäläbaa4e572014-10-27 16:07:32 +02002086 */
Ville Syrjälä465ac0c2016-04-18 14:02:27 +03002087 .domains = CHV_DISPLAY_POWER_DOMAINS,
Imre Deakf49193c2017-07-06 17:40:23 +03002088 .id = CHV_DISP_PW_PIPE_A,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002089 .ops = &chv_pipe_power_well_ops,
2090 },
Daniel Vetter9c065a72014-09-30 10:56:38 +02002091 {
2092 .name = "dpio-common-bc",
Ville Syrjälä71849b62015-04-10 18:21:29 +03002093 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002094 .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002095 .ops = &chv_dpio_cmn_power_well_ops,
2096 },
2097 {
2098 .name = "dpio-common-d",
Ville Syrjälä71849b62015-04-10 18:21:29 +03002099 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002100 .id = PUNIT_POWER_WELL_DPIO_CMN_D,
Daniel Vetter9c065a72014-09-30 10:56:38 +02002101 .ops = &chv_dpio_cmn_power_well_ops,
2102 },
Daniel Vetter9c065a72014-09-30 10:56:38 +02002103};
2104
Suketu Shah5aefb232015-04-16 14:22:10 +05302105bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
Imre Deak438b8dc2017-07-11 23:42:30 +03002106 enum i915_power_well_id power_well_id)
Suketu Shah5aefb232015-04-16 14:22:10 +05302107{
2108 struct i915_power_well *power_well;
2109 bool ret;
2110
2111 power_well = lookup_power_well(dev_priv, power_well_id);
2112 ret = power_well->ops->is_enabled(dev_priv, power_well);
2113
2114 return ret;
2115}
2116
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002117static struct i915_power_well skl_power_wells[] = {
2118 {
2119 .name = "always-on",
2120 .always_on = 1,
Ville Syrjälä998bd662016-04-18 14:02:26 +03002121 .domains = POWER_DOMAIN_MASK,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002122 .ops = &i9xx_always_on_power_well_ops,
Imre Deak438b8dc2017-07-11 23:42:30 +03002123 .id = I915_DISP_PW_ALWAYS_ON,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002124 },
2125 {
2126 .name = "power well 1",
Imre Deak4a76f292015-11-04 19:24:15 +02002127 /* Handled by the DMC firmware */
2128 .domains = 0,
Imre Deak4196b912017-07-11 23:42:36 +03002129 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002130 .id = SKL_DISP_PW_1,
Imre Deak0a445942017-08-14 18:15:29 +03002131 {
2132 .hsw.has_fuses = true,
2133 },
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002134 },
2135 {
2136 .name = "MISC IO power well",
Imre Deak4a76f292015-11-04 19:24:15 +02002137 /* Handled by the DMC firmware */
2138 .domains = 0,
Imre Deak4196b912017-07-11 23:42:36 +03002139 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002140 .id = SKL_DISP_PW_MISC_IO,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002141 },
2142 {
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01002143 .name = "DC off",
2144 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2145 .ops = &gen9_dc_off_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002146 .id = SKL_DISP_PW_DC_OFF,
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01002147 },
2148 {
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002149 .name = "power well 2",
2150 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002151 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002152 .id = SKL_DISP_PW_2,
Imre Deak0a445942017-08-14 18:15:29 +03002153 {
2154 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2155 .hsw.has_vga = true,
2156 .hsw.has_fuses = true,
2157 },
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002158 },
2159 {
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +02002160 .name = "DDI A/E IO power well",
2161 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002162 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002163 .id = SKL_DISP_PW_DDI_A_E,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002164 },
2165 {
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +02002166 .name = "DDI B IO power well",
2167 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002168 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002169 .id = SKL_DISP_PW_DDI_B,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002170 },
2171 {
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +02002172 .name = "DDI C IO power well",
2173 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002174 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002175 .id = SKL_DISP_PW_DDI_C,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002176 },
2177 {
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +02002178 .name = "DDI D IO power well",
2179 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002180 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002181 .id = SKL_DISP_PW_DDI_D,
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002182 },
2183};
2184
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302185static struct i915_power_well bxt_power_wells[] = {
2186 {
2187 .name = "always-on",
2188 .always_on = 1,
Ville Syrjälä998bd662016-04-18 14:02:26 +03002189 .domains = POWER_DOMAIN_MASK,
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302190 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03002191 .id = I915_DISP_PW_ALWAYS_ON,
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302192 },
2193 {
2194 .name = "power well 1",
Imre Deakd7d7c9e2016-04-01 16:02:42 +03002195 .domains = 0,
Imre Deak4196b912017-07-11 23:42:36 +03002196 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002197 .id = SKL_DISP_PW_1,
Imre Deak0a445942017-08-14 18:15:29 +03002198 {
2199 .hsw.has_fuses = true,
2200 },
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302201 },
2202 {
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01002203 .name = "DC off",
2204 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2205 .ops = &gen9_dc_off_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002206 .id = SKL_DISP_PW_DC_OFF,
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01002207 },
2208 {
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302209 .name = "power well 2",
2210 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002211 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002212 .id = SKL_DISP_PW_2,
Imre Deak0a445942017-08-14 18:15:29 +03002213 {
2214 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2215 .hsw.has_vga = true,
2216 .hsw.has_fuses = true,
2217 },
Patrik Jakobsson9f836f92015-11-16 16:20:01 +01002218 },
Imre Deak9c8d0b82016-06-13 16:44:34 +03002219 {
2220 .name = "dpio-common-a",
2221 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2222 .ops = &bxt_dpio_cmn_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002223 .id = BXT_DPIO_CMN_A,
Imre Deak0a445942017-08-14 18:15:29 +03002224 {
2225 .bxt.phy = DPIO_PHY1,
2226 },
Imre Deak9c8d0b82016-06-13 16:44:34 +03002227 },
2228 {
2229 .name = "dpio-common-bc",
2230 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2231 .ops = &bxt_dpio_cmn_power_well_ops,
Ander Conselvan de Oliveira01c3faa2016-10-06 19:22:14 +03002232 .id = BXT_DPIO_CMN_BC,
Imre Deak0a445942017-08-14 18:15:29 +03002233 {
2234 .bxt.phy = DPIO_PHY0,
2235 },
Imre Deak9c8d0b82016-06-13 16:44:34 +03002236 },
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302237};
2238
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002239static struct i915_power_well glk_power_wells[] = {
2240 {
2241 .name = "always-on",
2242 .always_on = 1,
2243 .domains = POWER_DOMAIN_MASK,
2244 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03002245 .id = I915_DISP_PW_ALWAYS_ON,
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002246 },
2247 {
2248 .name = "power well 1",
2249 /* Handled by the DMC firmware */
2250 .domains = 0,
Imre Deak4196b912017-07-11 23:42:36 +03002251 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002252 .id = SKL_DISP_PW_1,
Imre Deak0a445942017-08-14 18:15:29 +03002253 {
2254 .hsw.has_fuses = true,
2255 },
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002256 },
2257 {
2258 .name = "DC off",
2259 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2260 .ops = &gen9_dc_off_power_well_ops,
2261 .id = SKL_DISP_PW_DC_OFF,
2262 },
2263 {
2264 .name = "power well 2",
2265 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002266 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002267 .id = SKL_DISP_PW_2,
Imre Deak0a445942017-08-14 18:15:29 +03002268 {
2269 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2270 .hsw.has_vga = true,
2271 .hsw.has_fuses = true,
2272 },
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002273 },
2274 {
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +02002275 .name = "dpio-common-a",
2276 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2277 .ops = &bxt_dpio_cmn_power_well_ops,
2278 .id = BXT_DPIO_CMN_A,
Imre Deak0a445942017-08-14 18:15:29 +03002279 {
2280 .bxt.phy = DPIO_PHY1,
2281 },
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +02002282 },
2283 {
2284 .name = "dpio-common-b",
2285 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2286 .ops = &bxt_dpio_cmn_power_well_ops,
2287 .id = BXT_DPIO_CMN_BC,
Imre Deak0a445942017-08-14 18:15:29 +03002288 {
2289 .bxt.phy = DPIO_PHY0,
2290 },
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +02002291 },
2292 {
2293 .name = "dpio-common-c",
2294 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2295 .ops = &bxt_dpio_cmn_power_well_ops,
2296 .id = GLK_DPIO_CMN_C,
Imre Deak0a445942017-08-14 18:15:29 +03002297 {
2298 .bxt.phy = DPIO_PHY2,
2299 },
Ander Conselvan de Oliveira0a116ce2016-12-02 10:23:51 +02002300 },
2301 {
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002302 .name = "AUX A",
2303 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002304 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002305 .id = GLK_DISP_PW_AUX_A,
2306 },
2307 {
2308 .name = "AUX B",
2309 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002310 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002311 .id = GLK_DISP_PW_AUX_B,
2312 },
2313 {
2314 .name = "AUX C",
2315 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002316 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002317 .id = GLK_DISP_PW_AUX_C,
2318 },
2319 {
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +02002320 .name = "DDI A IO power well",
2321 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002322 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002323 .id = GLK_DISP_PW_DDI_A,
2324 },
2325 {
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +02002326 .name = "DDI B IO power well",
2327 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002328 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002329 .id = SKL_DISP_PW_DDI_B,
2330 },
2331 {
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +02002332 .name = "DDI C IO power well",
2333 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002334 .ops = &hsw_power_well_ops,
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002335 .id = SKL_DISP_PW_DDI_C,
2336 },
2337};
2338
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002339static struct i915_power_well cnl_power_wells[] = {
2340 {
2341 .name = "always-on",
2342 .always_on = 1,
2343 .domains = POWER_DOMAIN_MASK,
2344 .ops = &i9xx_always_on_power_well_ops,
Imre Deak029d80d2017-07-06 17:40:25 +03002345 .id = I915_DISP_PW_ALWAYS_ON,
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002346 },
2347 {
2348 .name = "power well 1",
2349 /* Handled by the DMC firmware */
2350 .domains = 0,
Imre Deak4196b912017-07-11 23:42:36 +03002351 .ops = &hsw_power_well_ops,
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002352 .id = SKL_DISP_PW_1,
Imre Deak0a445942017-08-14 18:15:29 +03002353 {
2354 .hsw.has_fuses = true,
2355 },
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002356 },
2357 {
2358 .name = "AUX A",
2359 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002360 .ops = &hsw_power_well_ops,
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002361 .id = CNL_DISP_PW_AUX_A,
2362 },
2363 {
2364 .name = "AUX B",
2365 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002366 .ops = &hsw_power_well_ops,
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002367 .id = CNL_DISP_PW_AUX_B,
2368 },
2369 {
2370 .name = "AUX C",
2371 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002372 .ops = &hsw_power_well_ops,
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002373 .id = CNL_DISP_PW_AUX_C,
2374 },
2375 {
2376 .name = "AUX D",
2377 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002378 .ops = &hsw_power_well_ops,
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002379 .id = CNL_DISP_PW_AUX_D,
2380 },
2381 {
2382 .name = "DC off",
2383 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2384 .ops = &gen9_dc_off_power_well_ops,
2385 .id = SKL_DISP_PW_DC_OFF,
2386 },
2387 {
2388 .name = "power well 2",
2389 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002390 .ops = &hsw_power_well_ops,
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002391 .id = SKL_DISP_PW_2,
Imre Deak0a445942017-08-14 18:15:29 +03002392 {
2393 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2394 .hsw.has_vga = true,
2395 .hsw.has_fuses = true,
2396 },
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002397 },
2398 {
2399 .name = "DDI A IO power well",
2400 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002401 .ops = &hsw_power_well_ops,
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002402 .id = CNL_DISP_PW_DDI_A,
2403 },
2404 {
2405 .name = "DDI B IO power well",
2406 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002407 .ops = &hsw_power_well_ops,
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002408 .id = SKL_DISP_PW_DDI_B,
2409 },
2410 {
2411 .name = "DDI C IO power well",
2412 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002413 .ops = &hsw_power_well_ops,
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002414 .id = SKL_DISP_PW_DDI_C,
2415 },
2416 {
2417 .name = "DDI D IO power well",
2418 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
Imre Deak4196b912017-07-11 23:42:36 +03002419 .ops = &hsw_power_well_ops,
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002420 .id = SKL_DISP_PW_DDI_D,
2421 },
Rodrigo Vivia324fca2018-01-29 15:22:15 -08002422 {
Rodrigo Vivi9787e832018-01-29 15:22:22 -08002423 .name = "DDI F IO power well",
2424 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
2425 .ops = &hsw_power_well_ops,
2426 .id = CNL_DISP_PW_DDI_F,
2427 },
2428 {
Rodrigo Vivia324fca2018-01-29 15:22:15 -08002429 .name = "AUX F",
2430 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
2431 .ops = &hsw_power_well_ops,
2432 .id = CNL_DISP_PW_AUX_F,
2433 },
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002434};
2435
Imre Deak1b0e3a02015-11-05 23:04:11 +02002436static int
2437sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2438 int disable_power_well)
2439{
2440 if (disable_power_well >= 0)
2441 return !!disable_power_well;
2442
Imre Deak1b0e3a02015-11-05 23:04:11 +02002443 return 1;
2444}
2445
Imre Deaka37baf32016-02-29 22:49:03 +02002446static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2447 int enable_dc)
2448{
2449 uint32_t mask;
2450 int requested_dc;
2451 int max_dc;
2452
Rodrigo Vivi6d6a8972017-07-06 13:45:08 -07002453 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
Imre Deaka37baf32016-02-29 22:49:03 +02002454 max_dc = 2;
2455 mask = 0;
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02002456 } else if (IS_GEN9_LP(dev_priv)) {
Imre Deaka37baf32016-02-29 22:49:03 +02002457 max_dc = 1;
2458 /*
2459 * DC9 has a separate HW flow from the rest of the DC states,
2460 * not depending on the DMC firmware. It's needed by system
2461 * suspend/resume, so allow it unconditionally.
2462 */
2463 mask = DC_STATE_EN_DC9;
2464 } else {
2465 max_dc = 0;
2466 mask = 0;
2467 }
2468
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00002469 if (!i915_modparams.disable_power_well)
Imre Deak66e2c4c2016-02-29 22:49:04 +02002470 max_dc = 0;
2471
Imre Deaka37baf32016-02-29 22:49:03 +02002472 if (enable_dc >= 0 && enable_dc <= max_dc) {
2473 requested_dc = enable_dc;
2474 } else if (enable_dc == -1) {
2475 requested_dc = max_dc;
2476 } else if (enable_dc > max_dc && enable_dc <= 2) {
2477 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2478 enable_dc, max_dc);
2479 requested_dc = max_dc;
2480 } else {
2481 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2482 requested_dc = max_dc;
2483 }
2484
2485 if (requested_dc > 1)
2486 mask |= DC_STATE_EN_UPTO_DC6;
2487 if (requested_dc > 0)
2488 mask |= DC_STATE_EN_UPTO_DC5;
2489
2490 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2491
2492 return mask;
2493}
2494
Imre Deak21792c62017-07-11 23:42:33 +03002495static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv)
2496{
2497 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2498 u64 power_well_ids;
2499 int i;
2500
2501 power_well_ids = 0;
2502 for (i = 0; i < power_domains->power_well_count; i++) {
2503 enum i915_power_well_id id = power_domains->power_wells[i].id;
2504
2505 WARN_ON(id >= sizeof(power_well_ids) * 8);
2506 WARN_ON(power_well_ids & BIT_ULL(id));
2507 power_well_ids |= BIT_ULL(id);
2508 }
2509}
2510
Daniel Vetter9c065a72014-09-30 10:56:38 +02002511#define set_power_wells(power_domains, __power_wells) ({ \
2512 (power_domains)->power_wells = (__power_wells); \
2513 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
2514})
2515
Daniel Vettere4e76842014-09-30 10:56:42 +02002516/**
2517 * intel_power_domains_init - initializes the power domain structures
2518 * @dev_priv: i915 device instance
2519 *
2520 * Initializes the power domain structures for @dev_priv depending upon the
2521 * supported platform.
2522 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02002523int intel_power_domains_init(struct drm_i915_private *dev_priv)
2524{
2525 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2526
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00002527 i915_modparams.disable_power_well =
2528 sanitize_disable_power_well_option(dev_priv,
2529 i915_modparams.disable_power_well);
2530 dev_priv->csr.allowed_dc_mask =
2531 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
Imre Deak1b0e3a02015-11-05 23:04:11 +02002532
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02002533 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
Ville Syrjäläf0ab43e2015-11-09 16:48:19 +01002534
Daniel Vetter9c065a72014-09-30 10:56:38 +02002535 mutex_init(&power_domains->lock);
2536
2537 /*
2538 * The enabling order will be from lower to higher indexed wells,
2539 * the disabling order is reversed.
2540 */
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002541 if (IS_HASWELL(dev_priv)) {
Daniel Vetter9c065a72014-09-30 10:56:38 +02002542 set_power_wells(power_domains, hsw_power_wells);
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002543 } else if (IS_BROADWELL(dev_priv)) {
Daniel Vetter9c065a72014-09-30 10:56:38 +02002544 set_power_wells(power_domains, bdw_power_wells);
Rodrigo Vivib976dc52017-01-23 10:32:37 -08002545 } else if (IS_GEN9_BC(dev_priv)) {
Satheeshakrishna M94dd5132015-02-04 13:57:44 +00002546 set_power_wells(power_domains, skl_power_wells);
Ville Syrjälä8bcd3dd2017-06-06 13:30:39 -07002547 } else if (IS_CANNONLAKE(dev_priv)) {
2548 set_power_wells(power_domains, cnl_power_wells);
Rodrigo Vivia324fca2018-01-29 15:22:15 -08002549
2550 /*
Rodrigo Vivi9787e832018-01-29 15:22:22 -08002551 * DDI and Aux IO are getting enabled for all ports
Rodrigo Vivia324fca2018-01-29 15:22:15 -08002552 * regardless the presence or use. So, in order to avoid
Rodrigo Vivi9787e832018-01-29 15:22:22 -08002553 * timeouts, lets remove them from the list
Rodrigo Vivia324fca2018-01-29 15:22:15 -08002554 * for the SKUs without port F.
2555 */
2556 if (!IS_CNL_WITH_PORT_F(dev_priv))
Rodrigo Vivi9787e832018-01-29 15:22:22 -08002557 power_domains->power_well_count -= 2;
Rodrigo Vivia324fca2018-01-29 15:22:15 -08002558
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002559 } else if (IS_BROXTON(dev_priv)) {
Satheeshakrishna M0b4a2a32014-07-11 14:51:13 +05302560 set_power_wells(power_domains, bxt_power_wells);
Ander Conselvan de Oliveira0d039262016-12-02 10:23:50 +02002561 } else if (IS_GEMINILAKE(dev_priv)) {
2562 set_power_wells(power_domains, glk_power_wells);
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002563 } else if (IS_CHERRYVIEW(dev_priv)) {
Daniel Vetter9c065a72014-09-30 10:56:38 +02002564 set_power_wells(power_domains, chv_power_wells);
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03002565 } else if (IS_VALLEYVIEW(dev_priv)) {
Daniel Vetter9c065a72014-09-30 10:56:38 +02002566 set_power_wells(power_domains, vlv_power_wells);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03002567 } else if (IS_I830(dev_priv)) {
2568 set_power_wells(power_domains, i830_power_wells);
Daniel Vetter9c065a72014-09-30 10:56:38 +02002569 } else {
2570 set_power_wells(power_domains, i9xx_always_on_power_well);
2571 }
2572
Imre Deak21792c62017-07-11 23:42:33 +03002573 assert_power_well_ids_unique(dev_priv);
2574
Daniel Vetter9c065a72014-09-30 10:56:38 +02002575 return 0;
2576}
2577
Daniel Vettere4e76842014-09-30 10:56:42 +02002578/**
2579 * intel_power_domains_fini - finalizes the power domain structures
2580 * @dev_priv: i915 device instance
2581 *
2582 * Finalizes the power domain structures for @dev_priv depending upon the
2583 * supported platform. This function also disables runtime pm and ensures that
2584 * the device stays powered up so that the driver can be reloaded.
2585 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002586void intel_power_domains_fini(struct drm_i915_private *dev_priv)
Daniel Vetter9c065a72014-09-30 10:56:38 +02002587{
David Weinehallc49d13e2016-08-22 13:32:42 +03002588 struct device *kdev = &dev_priv->drm.pdev->dev;
Imre Deak25b181b2015-12-17 13:44:56 +02002589
Imre Deakaabee1b2015-12-15 20:10:29 +02002590 /*
2591 * The i915.ko module is still not prepared to be loaded when
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002592 * the power well is not enabled, so just enable it in case
Imre Deakaabee1b2015-12-15 20:10:29 +02002593 * we're going to unload/reload.
2594 * The following also reacquires the RPM reference the core passed
2595 * to the driver during loading, which is dropped in
2596 * intel_runtime_pm_enable(). We have to hand back the control of the
2597 * device to the core with this reference held.
2598 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002599 intel_display_set_init_power(dev_priv, true);
Imre Deakd314cd42015-11-17 17:44:23 +02002600
2601 /* Remove the refcount we took to keep power well support disabled. */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00002602 if (!i915_modparams.disable_power_well)
Imre Deakd314cd42015-11-17 17:44:23 +02002603 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Imre Deak25b181b2015-12-17 13:44:56 +02002604
2605 /*
2606 * Remove the refcount we took in intel_runtime_pm_enable() in case
2607 * the platform doesn't support runtime PM.
2608 */
2609 if (!HAS_RUNTIME_PM(dev_priv))
David Weinehallc49d13e2016-08-22 13:32:42 +03002610 pm_runtime_put(kdev);
Daniel Vetter9c065a72014-09-30 10:56:38 +02002611}
2612
Imre Deak30eade12015-11-04 19:24:13 +02002613static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
Daniel Vetter9c065a72014-09-30 10:56:38 +02002614{
2615 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2616 struct i915_power_well *power_well;
Daniel Vetter9c065a72014-09-30 10:56:38 +02002617
2618 mutex_lock(&power_domains->lock);
Imre Deak75ccb2e2017-02-17 17:39:43 +02002619 for_each_power_well(dev_priv, power_well) {
Daniel Vetter9c065a72014-09-30 10:56:38 +02002620 power_well->ops->sync_hw(dev_priv, power_well);
2621 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2622 power_well);
2623 }
2624 mutex_unlock(&power_domains->lock);
2625}
2626
Ville Syrjälä70c2c182016-05-13 23:41:30 +03002627static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2628{
2629 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2630 POSTING_READ(DBUF_CTL);
2631
2632 udelay(10);
2633
2634 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2635 DRM_ERROR("DBuf power enable timeout\n");
2636}
2637
2638static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2639{
2640 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2641 POSTING_READ(DBUF_CTL);
2642
2643 udelay(10);
2644
2645 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2646 DRM_ERROR("DBuf power disable timeout!\n");
2647}
2648
Mahesh Kumar746edf82018-02-05 13:40:44 -02002649/*
2650 * TODO: we shouldn't always enable DBUF_CTL_S2, we should only enable it when
2651 * needed and keep it disabled as much as possible.
2652 */
2653static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
2654{
2655 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
2656 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
2657 POSTING_READ(DBUF_CTL_S2);
2658
2659 udelay(10);
2660
2661 if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
2662 !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
2663 DRM_ERROR("DBuf power enable timeout\n");
2664}
2665
2666static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
2667{
2668 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
2669 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
2670 POSTING_READ(DBUF_CTL_S2);
2671
2672 udelay(10);
2673
2674 if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
2675 (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
2676 DRM_ERROR("DBuf power disable timeout!\n");
2677}
2678
Mahesh Kumar4cb45852018-02-05 13:40:45 -02002679static void icl_mbus_init(struct drm_i915_private *dev_priv)
2680{
2681 uint32_t val;
2682
2683 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
2684 MBUS_ABOX_BT_CREDIT_POOL2(16) |
2685 MBUS_ABOX_B_CREDIT(1) |
2686 MBUS_ABOX_BW_CREDIT(1);
2687
2688 I915_WRITE(MBUS_ABOX_CTL, val);
2689}
2690
Imre Deak73dfc222015-11-17 17:33:53 +02002691static void skl_display_core_init(struct drm_i915_private *dev_priv,
Imre Deak443a93a2016-04-04 15:42:57 +03002692 bool resume)
Imre Deak73dfc222015-11-17 17:33:53 +02002693{
2694 struct i915_power_domains *power_domains = &dev_priv->power_domains;
Imre Deak443a93a2016-04-04 15:42:57 +03002695 struct i915_power_well *well;
Imre Deak73dfc222015-11-17 17:33:53 +02002696 uint32_t val;
2697
Imre Deakd26fa1d2015-11-04 19:24:17 +02002698 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2699
Imre Deak73dfc222015-11-17 17:33:53 +02002700 /* enable PCH reset handshake */
2701 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2702 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2703
2704 /* enable PG1 and Misc I/O */
2705 mutex_lock(&power_domains->lock);
Imre Deak443a93a2016-04-04 15:42:57 +03002706
2707 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2708 intel_power_well_enable(dev_priv, well);
2709
2710 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2711 intel_power_well_enable(dev_priv, well);
2712
Imre Deak73dfc222015-11-17 17:33:53 +02002713 mutex_unlock(&power_domains->lock);
2714
Imre Deak73dfc222015-11-17 17:33:53 +02002715 skl_init_cdclk(dev_priv);
2716
Ville Syrjälä70c2c182016-05-13 23:41:30 +03002717 gen9_dbuf_enable(dev_priv);
2718
Ville Syrjälä9f7eb312016-05-13 23:41:29 +03002719 if (resume && dev_priv->csr.dmc_payload)
Imre Deak2abc5252016-03-04 21:57:41 +02002720 intel_csr_load_program(dev_priv);
Imre Deak73dfc222015-11-17 17:33:53 +02002721}
2722
2723static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2724{
2725 struct i915_power_domains *power_domains = &dev_priv->power_domains;
Imre Deak443a93a2016-04-04 15:42:57 +03002726 struct i915_power_well *well;
Imre Deak73dfc222015-11-17 17:33:53 +02002727
Imre Deakd26fa1d2015-11-04 19:24:17 +02002728 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2729
Ville Syrjälä70c2c182016-05-13 23:41:30 +03002730 gen9_dbuf_disable(dev_priv);
2731
Imre Deak73dfc222015-11-17 17:33:53 +02002732 skl_uninit_cdclk(dev_priv);
2733
2734 /* The spec doesn't call for removing the reset handshake flag */
2735 /* disable PG1 and Misc I/O */
Imre Deak443a93a2016-04-04 15:42:57 +03002736
Imre Deak73dfc222015-11-17 17:33:53 +02002737 mutex_lock(&power_domains->lock);
Imre Deak443a93a2016-04-04 15:42:57 +03002738
Imre Deakedfda8e2017-06-29 18:36:59 +03002739 /*
2740 * BSpec says to keep the MISC IO power well enabled here, only
2741 * remove our request for power well 1.
Imre Deak42d93662017-06-29 18:37:01 +03002742 * Note that even though the driver's request is removed power well 1
2743 * may stay enabled after this due to DMC's own request on it.
Imre Deakedfda8e2017-06-29 18:36:59 +03002744 */
Imre Deak443a93a2016-04-04 15:42:57 +03002745 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2746 intel_power_well_disable(dev_priv, well);
2747
Imre Deak73dfc222015-11-17 17:33:53 +02002748 mutex_unlock(&power_domains->lock);
Imre Deak846c6b22017-06-29 18:36:58 +03002749
2750 usleep_range(10, 30); /* 10 us delay per Bspec */
Imre Deak73dfc222015-11-17 17:33:53 +02002751}
2752
Imre Deakd7d7c9e2016-04-01 16:02:42 +03002753void bxt_display_core_init(struct drm_i915_private *dev_priv,
2754 bool resume)
2755{
2756 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2757 struct i915_power_well *well;
2758 uint32_t val;
2759
2760 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2761
2762 /*
2763 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
2764 * or else the reset will hang because there is no PCH to respond.
2765 * Move the handshake programming to initialization sequence.
2766 * Previously was left up to BIOS.
2767 */
2768 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2769 val &= ~RESET_PCH_HANDSHAKE_ENABLE;
2770 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2771
2772 /* Enable PG1 */
2773 mutex_lock(&power_domains->lock);
2774
2775 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2776 intel_power_well_enable(dev_priv, well);
2777
2778 mutex_unlock(&power_domains->lock);
2779
Imre Deak324513c2016-06-13 16:44:36 +03002780 bxt_init_cdclk(dev_priv);
Ville Syrjälä70c2c182016-05-13 23:41:30 +03002781
2782 gen9_dbuf_enable(dev_priv);
2783
Imre Deakd7d7c9e2016-04-01 16:02:42 +03002784 if (resume && dev_priv->csr.dmc_payload)
2785 intel_csr_load_program(dev_priv);
2786}
2787
2788void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2789{
2790 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2791 struct i915_power_well *well;
2792
2793 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2794
Ville Syrjälä70c2c182016-05-13 23:41:30 +03002795 gen9_dbuf_disable(dev_priv);
2796
Imre Deak324513c2016-06-13 16:44:36 +03002797 bxt_uninit_cdclk(dev_priv);
Imre Deakd7d7c9e2016-04-01 16:02:42 +03002798
2799 /* The spec doesn't call for removing the reset handshake flag */
2800
Imre Deak42d93662017-06-29 18:37:01 +03002801 /*
2802 * Disable PW1 (PG1).
2803 * Note that even though the driver's request is removed power well 1
2804 * may stay enabled after this due to DMC's own request on it.
2805 */
Imre Deakd7d7c9e2016-04-01 16:02:42 +03002806 mutex_lock(&power_domains->lock);
2807
2808 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2809 intel_power_well_disable(dev_priv, well);
2810
2811 mutex_unlock(&power_domains->lock);
Imre Deak846c6b22017-06-29 18:36:58 +03002812
2813 usleep_range(10, 30); /* 10 us delay per Bspec */
Imre Deakd7d7c9e2016-04-01 16:02:42 +03002814}
2815
Paulo Zanonie0b8acf2017-08-21 17:03:55 -07002816enum {
2817 PROCMON_0_85V_DOT_0,
2818 PROCMON_0_95V_DOT_0,
2819 PROCMON_0_95V_DOT_1,
2820 PROCMON_1_05V_DOT_0,
2821 PROCMON_1_05V_DOT_1,
2822};
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002823
2824static const struct cnl_procmon {
2825 u32 dw1, dw9, dw10;
Paulo Zanonie0b8acf2017-08-21 17:03:55 -07002826} cnl_procmon_values[] = {
2827 [PROCMON_0_85V_DOT_0] =
2828 { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
2829 [PROCMON_0_95V_DOT_0] =
2830 { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
2831 [PROCMON_0_95V_DOT_1] =
2832 { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
2833 [PROCMON_1_05V_DOT_0] =
2834 { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
2835 [PROCMON_1_05V_DOT_1] =
2836 { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002837};
2838
Paulo Zanoni62d4a5e2018-02-05 13:40:41 -02002839/*
2840 * CNL has just one set of registers, while ICL has two sets: one for port A and
2841 * the other for port B. The CNL registers are equivalent to the ICL port A
2842 * registers, that's why we call the ICL macros even though the function has CNL
2843 * on its name.
2844 */
2845static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
2846 enum port port)
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002847{
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002848 const struct cnl_procmon *procmon;
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002849 u32 val;
2850
Paulo Zanoni62d4a5e2018-02-05 13:40:41 -02002851 val = I915_READ(ICL_PORT_COMP_DW3(port));
Paulo Zanonie0b8acf2017-08-21 17:03:55 -07002852 switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
2853 default:
2854 MISSING_CASE(val);
2855 case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
2856 procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
2857 break;
2858 case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
2859 procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
2860 break;
2861 case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
2862 procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
2863 break;
2864 case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
2865 procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
2866 break;
2867 case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
2868 procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
2869 break;
2870 }
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002871
Paulo Zanoni62d4a5e2018-02-05 13:40:41 -02002872 val = I915_READ(ICL_PORT_COMP_DW1(port));
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002873 val &= ~((0xff << 16) | 0xff);
2874 val |= procmon->dw1;
Paulo Zanoni62d4a5e2018-02-05 13:40:41 -02002875 I915_WRITE(ICL_PORT_COMP_DW1(port), val);
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002876
Paulo Zanoni62d4a5e2018-02-05 13:40:41 -02002877 I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
2878 I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
Paulo Zanoniade5ee72017-08-21 17:03:56 -07002879}
2880
2881static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
2882{
2883 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2884 struct i915_power_well *well;
2885 u32 val;
2886
2887 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2888
2889 /* 1. Enable PCH Reset Handshake */
2890 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2891 val |= RESET_PCH_HANDSHAKE_ENABLE;
2892 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2893
2894 /* 2. Enable Comp */
2895 val = I915_READ(CHICKEN_MISC_2);
2896 val &= ~CNL_COMP_PWR_DOWN;
2897 I915_WRITE(CHICKEN_MISC_2, val);
2898
Paulo Zanoni62d4a5e2018-02-05 13:40:41 -02002899 /* Dummy PORT_A to get the correct CNL register from the ICL macro */
2900 cnl_set_procmon_ref_values(dev_priv, PORT_A);
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002901
2902 val = I915_READ(CNL_PORT_COMP_DW0);
2903 val |= COMP_INIT;
2904 I915_WRITE(CNL_PORT_COMP_DW0, val);
2905
2906 /* 3. */
2907 val = I915_READ(CNL_PORT_CL1CM_DW5);
2908 val |= CL_POWER_DOWN_ENABLE;
2909 I915_WRITE(CNL_PORT_CL1CM_DW5, val);
2910
Imre Deakb38131f2017-06-29 18:37:02 +03002911 /*
2912 * 4. Enable Power Well 1 (PG1).
2913 * The AUX IO power wells will be enabled on demand.
2914 */
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002915 mutex_lock(&power_domains->lock);
2916 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2917 intel_power_well_enable(dev_priv, well);
2918 mutex_unlock(&power_domains->lock);
2919
2920 /* 5. Enable CD clock */
2921 cnl_init_cdclk(dev_priv);
2922
2923 /* 6. Enable DBUF */
2924 gen9_dbuf_enable(dev_priv);
Imre Deak57522c42017-10-03 12:51:58 +03002925
2926 if (resume && dev_priv->csr.dmc_payload)
2927 intel_csr_load_program(dev_priv);
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002928}
2929
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002930static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
2931{
2932 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2933 struct i915_power_well *well;
2934 u32 val;
2935
2936 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2937
2938 /* 1. Disable all display engine functions -> aready done */
2939
2940 /* 2. Disable DBUF */
2941 gen9_dbuf_disable(dev_priv);
2942
2943 /* 3. Disable CD clock */
2944 cnl_uninit_cdclk(dev_priv);
2945
Imre Deakb38131f2017-06-29 18:37:02 +03002946 /*
2947 * 4. Disable Power Well 1 (PG1).
2948 * The AUX IO power wells are toggled on demand, so they are already
2949 * disabled at this point.
2950 */
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002951 mutex_lock(&power_domains->lock);
2952 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2953 intel_power_well_disable(dev_priv, well);
2954 mutex_unlock(&power_domains->lock);
2955
Imre Deak846c6b22017-06-29 18:36:58 +03002956 usleep_range(10, 30); /* 10 us delay per Bspec */
2957
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002958 /* 5. Disable Comp */
2959 val = I915_READ(CHICKEN_MISC_2);
Paulo Zanoni746a5172017-07-14 14:52:28 -03002960 val |= CNL_COMP_PWR_DOWN;
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07002961 I915_WRITE(CHICKEN_MISC_2, val);
2962}
2963
Paulo Zanoniad186f32018-02-05 13:40:43 -02002964static void icl_display_core_init(struct drm_i915_private *dev_priv,
2965 bool resume)
2966{
2967 enum port port;
2968 u32 val;
2969
2970 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2971
2972 /* 1. Enable PCH reset handshake. */
2973 val = I915_READ(HSW_NDE_RSTWRN_OPT);
2974 val |= RESET_PCH_HANDSHAKE_ENABLE;
2975 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2976
2977 for (port = PORT_A; port <= PORT_B; port++) {
2978 /* 2. Enable DDI combo PHY comp. */
2979 val = I915_READ(ICL_PHY_MISC(port));
2980 val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
2981 I915_WRITE(ICL_PHY_MISC(port), val);
2982
2983 cnl_set_procmon_ref_values(dev_priv, port);
2984
2985 val = I915_READ(ICL_PORT_COMP_DW0(port));
2986 val |= COMP_INIT;
2987 I915_WRITE(ICL_PORT_COMP_DW0(port), val);
2988
2989 /* 3. Set power down enable. */
2990 val = I915_READ(ICL_PORT_CL_DW5(port));
2991 val |= CL_POWER_DOWN_ENABLE;
2992 I915_WRITE(ICL_PORT_CL_DW5(port), val);
2993 }
2994
2995 /* 4. Enable power well 1 (PG1) and aux IO power. */
2996 /* FIXME: ICL power wells code not here yet. */
2997
2998 /* 5. Enable CDCLK. */
2999 icl_init_cdclk(dev_priv);
3000
3001 /* 6. Enable DBUF. */
Mahesh Kumar746edf82018-02-05 13:40:44 -02003002 icl_dbuf_enable(dev_priv);
Paulo Zanoniad186f32018-02-05 13:40:43 -02003003
3004 /* 7. Setup MBUS. */
Mahesh Kumar4cb45852018-02-05 13:40:45 -02003005 icl_mbus_init(dev_priv);
Paulo Zanoniad186f32018-02-05 13:40:43 -02003006
3007 /* 8. CHICKEN_DCPR_1 */
3008 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
3009 CNL_DDI_CLOCK_REG_ACCESS_ON);
3010}
3011
3012static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3013{
3014 enum port port;
3015 u32 val;
3016
3017 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3018
3019 /* 1. Disable all display engine functions -> aready done */
3020
3021 /* 2. Disable DBUF */
Mahesh Kumar746edf82018-02-05 13:40:44 -02003022 icl_dbuf_disable(dev_priv);
Paulo Zanoniad186f32018-02-05 13:40:43 -02003023
3024 /* 3. Disable CD clock */
3025 icl_uninit_cdclk(dev_priv);
3026
3027 /* 4. Disable Power Well 1 (PG1) and Aux IO Power */
3028 /* FIXME: ICL power wells code not here yet. */
3029
3030 /* 5. Disable Comp */
3031 for (port = PORT_A; port <= PORT_B; port++) {
3032 val = I915_READ(ICL_PHY_MISC(port));
3033 val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3034 I915_WRITE(ICL_PHY_MISC(port), val);
3035 }
3036}
3037
Ville Syrjälä70722462015-04-10 18:21:28 +03003038static void chv_phy_control_init(struct drm_i915_private *dev_priv)
3039{
3040 struct i915_power_well *cmn_bc =
3041 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
3042 struct i915_power_well *cmn_d =
3043 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
3044
3045 /*
3046 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
3047 * workaround never ever read DISPLAY_PHY_CONTROL, and
3048 * instead maintain a shadow copy ourselves. Use the actual
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003049 * power well state and lane status to reconstruct the
3050 * expected initial value.
Ville Syrjälä70722462015-04-10 18:21:28 +03003051 */
3052 dev_priv->chv_phy_control =
Ville Syrjäläbc284542015-05-26 20:22:38 +03003053 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
3054 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003055 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
3056 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
3057 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
3058
3059 /*
3060 * If all lanes are disabled we leave the override disabled
3061 * with all power down bits cleared to match the state we
3062 * would use after disabling the port. Otherwise enable the
3063 * override and set the lane powerdown bits accding to the
3064 * current lane status.
3065 */
3066 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
3067 uint32_t status = I915_READ(DPLL(PIPE_A));
3068 unsigned int mask;
3069
3070 mask = status & DPLL_PORTB_READY_MASK;
3071 if (mask == 0xf)
3072 mask = 0x0;
3073 else
3074 dev_priv->chv_phy_control |=
3075 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
3076
3077 dev_priv->chv_phy_control |=
3078 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
3079
3080 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
3081 if (mask == 0xf)
3082 mask = 0x0;
3083 else
3084 dev_priv->chv_phy_control |=
3085 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
3086
3087 dev_priv->chv_phy_control |=
3088 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
3089
Ville Syrjälä70722462015-04-10 18:21:28 +03003090 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
Ville Syrjälä3be60de2015-09-08 18:05:45 +03003091
3092 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
3093 } else {
3094 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003095 }
3096
3097 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
3098 uint32_t status = I915_READ(DPIO_PHY_STATUS);
3099 unsigned int mask;
3100
3101 mask = status & DPLL_PORTD_READY_MASK;
3102
3103 if (mask == 0xf)
3104 mask = 0x0;
3105 else
3106 dev_priv->chv_phy_control |=
3107 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
3108
3109 dev_priv->chv_phy_control |=
3110 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
3111
Ville Syrjälä70722462015-04-10 18:21:28 +03003112 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
Ville Syrjälä3be60de2015-09-08 18:05:45 +03003113
3114 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
3115 } else {
3116 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003117 }
3118
3119 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
3120
3121 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
3122 dev_priv->chv_phy_control);
Ville Syrjälä70722462015-04-10 18:21:28 +03003123}
3124
Daniel Vetter9c065a72014-09-30 10:56:38 +02003125static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
3126{
3127 struct i915_power_well *cmn =
3128 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
3129 struct i915_power_well *disp2d =
3130 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
3131
Daniel Vetter9c065a72014-09-30 10:56:38 +02003132 /* If the display might be already active skip this */
Ville Syrjälä5d93a6e2014-10-16 20:52:33 +03003133 if (cmn->ops->is_enabled(dev_priv, cmn) &&
3134 disp2d->ops->is_enabled(dev_priv, disp2d) &&
Daniel Vetter9c065a72014-09-30 10:56:38 +02003135 I915_READ(DPIO_CTL) & DPIO_CMNRST)
3136 return;
3137
3138 DRM_DEBUG_KMS("toggling display PHY side reset\n");
3139
3140 /* cmnlane needs DPLL registers */
3141 disp2d->ops->enable(dev_priv, disp2d);
3142
3143 /*
3144 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3145 * Need to assert and de-assert PHY SB reset by gating the
3146 * common lane power, then un-gating it.
3147 * Simply ungating isn't enough to reset the PHY enough to get
3148 * ports and lanes running.
3149 */
3150 cmn->ops->disable(dev_priv, cmn);
3151}
3152
Daniel Vettere4e76842014-09-30 10:56:42 +02003153/**
3154 * intel_power_domains_init_hw - initialize hardware power domain state
3155 * @dev_priv: i915 device instance
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003156 * @resume: Called from resume code paths or not
Daniel Vettere4e76842014-09-30 10:56:42 +02003157 *
3158 * This function initializes the hardware power domain state and enables all
Imre Deak8d8c3862017-02-17 17:39:46 +02003159 * power wells belonging to the INIT power domain. Power wells in other
3160 * domains (and not in the INIT domain) are referenced or disabled during the
3161 * modeset state HW readout. After that the reference count of each power well
3162 * must match its HW enabled state, see intel_power_domains_verify_state().
Daniel Vettere4e76842014-09-30 10:56:42 +02003163 */
Imre Deak73dfc222015-11-17 17:33:53 +02003164void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
Daniel Vetter9c065a72014-09-30 10:56:38 +02003165{
Daniel Vetter9c065a72014-09-30 10:56:38 +02003166 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3167
3168 power_domains->initializing = true;
3169
Paulo Zanoniad186f32018-02-05 13:40:43 -02003170 if (IS_ICELAKE(dev_priv)) {
3171 icl_display_core_init(dev_priv, resume);
3172 } else if (IS_CANNONLAKE(dev_priv)) {
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07003173 cnl_display_core_init(dev_priv, resume);
3174 } else if (IS_GEN9_BC(dev_priv)) {
Imre Deak73dfc222015-11-17 17:33:53 +02003175 skl_display_core_init(dev_priv, resume);
Ander Conselvan de Oliveirab817c442016-12-02 10:23:56 +02003176 } else if (IS_GEN9_LP(dev_priv)) {
Imre Deakd7d7c9e2016-04-01 16:02:42 +03003177 bxt_display_core_init(dev_priv, resume);
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01003178 } else if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä770effb2015-07-08 23:45:51 +03003179 mutex_lock(&power_domains->lock);
Ville Syrjälä70722462015-04-10 18:21:28 +03003180 chv_phy_control_init(dev_priv);
Ville Syrjälä770effb2015-07-08 23:45:51 +03003181 mutex_unlock(&power_domains->lock);
Tvrtko Ursulin11a914c2016-10-13 11:03:08 +01003182 } else if (IS_VALLEYVIEW(dev_priv)) {
Daniel Vetter9c065a72014-09-30 10:56:38 +02003183 mutex_lock(&power_domains->lock);
3184 vlv_cmnlane_wa(dev_priv);
3185 mutex_unlock(&power_domains->lock);
3186 }
3187
3188 /* For now, we need the power well to be always enabled. */
3189 intel_display_set_init_power(dev_priv, true);
Imre Deakd314cd42015-11-17 17:44:23 +02003190 /* Disable power support if the user asked so. */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00003191 if (!i915_modparams.disable_power_well)
Imre Deakd314cd42015-11-17 17:44:23 +02003192 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Imre Deak30eade12015-11-04 19:24:13 +02003193 intel_power_domains_sync_hw(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +02003194 power_domains->initializing = false;
3195}
3196
Daniel Vettere4e76842014-09-30 10:56:42 +02003197/**
Imre Deak73dfc222015-11-17 17:33:53 +02003198 * intel_power_domains_suspend - suspend power domain state
3199 * @dev_priv: i915 device instance
3200 *
3201 * This function prepares the hardware power domain state before entering
3202 * system suspend. It must be paired with intel_power_domains_init_hw().
3203 */
3204void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
3205{
Imre Deakd314cd42015-11-17 17:44:23 +02003206 /*
3207 * Even if power well support was disabled we still want to disable
3208 * power wells while we are system suspended.
3209 */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00003210 if (!i915_modparams.disable_power_well)
Imre Deakd314cd42015-11-17 17:44:23 +02003211 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Imre Deak2622d792016-02-29 22:49:02 +02003212
Paulo Zanoniad186f32018-02-05 13:40:43 -02003213 if (IS_ICELAKE(dev_priv))
3214 icl_display_core_uninit(dev_priv);
3215 else if (IS_CANNONLAKE(dev_priv))
Ville Syrjäläd8d4a512017-06-09 15:26:00 -07003216 cnl_display_core_uninit(dev_priv);
3217 else if (IS_GEN9_BC(dev_priv))
Imre Deak2622d792016-02-29 22:49:02 +02003218 skl_display_core_uninit(dev_priv);
Ander Conselvan de Oliveirab817c442016-12-02 10:23:56 +02003219 else if (IS_GEN9_LP(dev_priv))
Imre Deakd7d7c9e2016-04-01 16:02:42 +03003220 bxt_display_core_uninit(dev_priv);
Imre Deak73dfc222015-11-17 17:33:53 +02003221}
3222
Imre Deak8d8c3862017-02-17 17:39:46 +02003223static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
3224{
3225 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3226 struct i915_power_well *power_well;
3227
3228 for_each_power_well(dev_priv, power_well) {
3229 enum intel_display_power_domain domain;
3230
3231 DRM_DEBUG_DRIVER("%-25s %d\n",
3232 power_well->name, power_well->count);
3233
3234 for_each_power_domain(domain, power_well->domains)
3235 DRM_DEBUG_DRIVER(" %-23s %d\n",
3236 intel_display_power_domain_str(domain),
3237 power_domains->domain_use_count[domain]);
3238 }
3239}
3240
3241/**
3242 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
3243 * @dev_priv: i915 device instance
3244 *
3245 * Verify if the reference count of each power well matches its HW enabled
3246 * state and the total refcount of the domains it belongs to. This must be
3247 * called after modeset HW state sanitization, which is responsible for
3248 * acquiring reference counts for any power wells in use and disabling the
3249 * ones left on by BIOS but not required by any active output.
3250 */
3251void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3252{
3253 struct i915_power_domains *power_domains = &dev_priv->power_domains;
3254 struct i915_power_well *power_well;
3255 bool dump_domain_info;
3256
3257 mutex_lock(&power_domains->lock);
3258
3259 dump_domain_info = false;
3260 for_each_power_well(dev_priv, power_well) {
3261 enum intel_display_power_domain domain;
3262 int domains_count;
3263 bool enabled;
3264
3265 /*
3266 * Power wells not belonging to any domain (like the MISC_IO
3267 * and PW1 power wells) are under FW control, so ignore them,
3268 * since their state can change asynchronously.
3269 */
3270 if (!power_well->domains)
3271 continue;
3272
3273 enabled = power_well->ops->is_enabled(dev_priv, power_well);
3274 if ((power_well->count || power_well->always_on) != enabled)
3275 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
3276 power_well->name, power_well->count, enabled);
3277
3278 domains_count = 0;
3279 for_each_power_domain(domain, power_well->domains)
3280 domains_count += power_domains->domain_use_count[domain];
3281
3282 if (power_well->count != domains_count) {
3283 DRM_ERROR("power well %s refcount/domain refcount mismatch "
3284 "(refcount %d/domains refcount %d)\n",
3285 power_well->name, power_well->count,
3286 domains_count);
3287 dump_domain_info = true;
3288 }
3289 }
3290
3291 if (dump_domain_info) {
3292 static bool dumped;
3293
3294 if (!dumped) {
3295 intel_power_domains_dump_info(dev_priv);
3296 dumped = true;
3297 }
3298 }
3299
3300 mutex_unlock(&power_domains->lock);
3301}
3302
Imre Deak73dfc222015-11-17 17:33:53 +02003303/**
Daniel Vettere4e76842014-09-30 10:56:42 +02003304 * intel_runtime_pm_get - grab a runtime pm reference
3305 * @dev_priv: i915 device instance
3306 *
3307 * This function grabs a device-level runtime pm reference (mostly used for GEM
3308 * code to ensure the GTT or GT is on) and ensures that it is powered up.
3309 *
3310 * Any runtime pm reference obtained by this function must have a symmetric
3311 * call to intel_runtime_pm_put() to release the reference again.
3312 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02003313void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
3314{
David Weinehall52a05c32016-08-22 13:32:44 +03003315 struct pci_dev *pdev = dev_priv->drm.pdev;
3316 struct device *kdev = &pdev->dev;
Imre Deakf5073822017-03-28 12:38:55 +03003317 int ret;
Daniel Vetter9c065a72014-09-30 10:56:38 +02003318
Imre Deakf5073822017-03-28 12:38:55 +03003319 ret = pm_runtime_get_sync(kdev);
3320 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
Imre Deak1f814da2015-12-16 02:52:19 +02003321
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01003322 atomic_inc(&dev_priv->runtime_pm.wakeref_count);
Imre Deakc9b88462015-12-15 20:10:34 +02003323 assert_rpm_wakelock_held(dev_priv);
Daniel Vetter9c065a72014-09-30 10:56:38 +02003324}
3325
Daniel Vettere4e76842014-09-30 10:56:42 +02003326/**
Imre Deak09731282016-02-17 14:17:42 +02003327 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
3328 * @dev_priv: i915 device instance
3329 *
3330 * This function grabs a device-level runtime pm reference if the device is
3331 * already in use and ensures that it is powered up.
3332 *
3333 * Any runtime pm reference obtained by this function must have a symmetric
3334 * call to intel_runtime_pm_put() to release the reference again.
3335 */
3336bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
3337{
David Weinehall52a05c32016-08-22 13:32:44 +03003338 struct pci_dev *pdev = dev_priv->drm.pdev;
3339 struct device *kdev = &pdev->dev;
Imre Deak09731282016-02-17 14:17:42 +02003340
Chris Wilson135dc792016-02-25 21:10:28 +00003341 if (IS_ENABLED(CONFIG_PM)) {
David Weinehallc49d13e2016-08-22 13:32:42 +03003342 int ret = pm_runtime_get_if_in_use(kdev);
Imre Deak09731282016-02-17 14:17:42 +02003343
Chris Wilson135dc792016-02-25 21:10:28 +00003344 /*
3345 * In cases runtime PM is disabled by the RPM core and we get
3346 * an -EINVAL return value we are not supposed to call this
3347 * function, since the power state is undefined. This applies
3348 * atm to the late/early system suspend/resume handlers.
3349 */
Imre Deakf5073822017-03-28 12:38:55 +03003350 WARN_ONCE(ret < 0,
3351 "pm_runtime_get_if_in_use() failed: %d\n", ret);
Chris Wilson135dc792016-02-25 21:10:28 +00003352 if (ret <= 0)
3353 return false;
3354 }
Imre Deak09731282016-02-17 14:17:42 +02003355
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01003356 atomic_inc(&dev_priv->runtime_pm.wakeref_count);
Imre Deak09731282016-02-17 14:17:42 +02003357 assert_rpm_wakelock_held(dev_priv);
3358
3359 return true;
3360}
3361
3362/**
Daniel Vettere4e76842014-09-30 10:56:42 +02003363 * intel_runtime_pm_get_noresume - grab a runtime pm reference
3364 * @dev_priv: i915 device instance
3365 *
3366 * This function grabs a device-level runtime pm reference (mostly used for GEM
3367 * code to ensure the GTT or GT is on).
3368 *
3369 * It will _not_ power up the device but instead only check that it's powered
3370 * on. Therefore it is only valid to call this functions from contexts where
3371 * the device is known to be powered up and where trying to power it up would
3372 * result in hilarity and deadlocks. That pretty much means only the system
3373 * suspend/resume code where this is used to grab runtime pm references for
3374 * delayed setup down in work items.
3375 *
3376 * Any runtime pm reference obtained by this function must have a symmetric
3377 * call to intel_runtime_pm_put() to release the reference again.
3378 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02003379void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
3380{
David Weinehall52a05c32016-08-22 13:32:44 +03003381 struct pci_dev *pdev = dev_priv->drm.pdev;
3382 struct device *kdev = &pdev->dev;
Daniel Vetter9c065a72014-09-30 10:56:38 +02003383
Imre Deakc9b88462015-12-15 20:10:34 +02003384 assert_rpm_wakelock_held(dev_priv);
David Weinehallc49d13e2016-08-22 13:32:42 +03003385 pm_runtime_get_noresume(kdev);
Imre Deak1f814da2015-12-16 02:52:19 +02003386
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01003387 atomic_inc(&dev_priv->runtime_pm.wakeref_count);
Daniel Vetter9c065a72014-09-30 10:56:38 +02003388}
3389
Daniel Vettere4e76842014-09-30 10:56:42 +02003390/**
3391 * intel_runtime_pm_put - release a runtime pm reference
3392 * @dev_priv: i915 device instance
3393 *
3394 * This function drops the device-level runtime pm reference obtained by
3395 * intel_runtime_pm_get() and might power down the corresponding
3396 * hardware block right away if this is the last reference.
3397 */
Daniel Vetter9c065a72014-09-30 10:56:38 +02003398void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
3399{
David Weinehall52a05c32016-08-22 13:32:44 +03003400 struct pci_dev *pdev = dev_priv->drm.pdev;
3401 struct device *kdev = &pdev->dev;
Daniel Vetter9c065a72014-09-30 10:56:38 +02003402
Imre Deak542db3c2015-12-15 20:10:36 +02003403 assert_rpm_wakelock_held(dev_priv);
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01003404 atomic_dec(&dev_priv->runtime_pm.wakeref_count);
Imre Deak1f814da2015-12-16 02:52:19 +02003405
David Weinehallc49d13e2016-08-22 13:32:42 +03003406 pm_runtime_mark_last_busy(kdev);
3407 pm_runtime_put_autosuspend(kdev);
Daniel Vetter9c065a72014-09-30 10:56:38 +02003408}
3409
Daniel Vettere4e76842014-09-30 10:56:42 +02003410/**
3411 * intel_runtime_pm_enable - enable runtime pm
3412 * @dev_priv: i915 device instance
3413 *
3414 * This function enables runtime pm at the end of the driver load sequence.
3415 *
3416 * Note that this function does currently not enable runtime pm for the
3417 * subordinate display power domains. That is only done on the first modeset
3418 * using intel_display_set_init_power().
3419 */
Daniel Vetterf458ebb2014-09-30 10:56:39 +02003420void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
Daniel Vetter9c065a72014-09-30 10:56:38 +02003421{
David Weinehall52a05c32016-08-22 13:32:44 +03003422 struct pci_dev *pdev = dev_priv->drm.pdev;
David Weinehall52a05c32016-08-22 13:32:44 +03003423 struct device *kdev = &pdev->dev;
Daniel Vetter9c065a72014-09-30 10:56:38 +02003424
David Weinehallc49d13e2016-08-22 13:32:42 +03003425 pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
3426 pm_runtime_mark_last_busy(kdev);
Imre Deakcbc68dc2015-12-17 19:04:33 +02003427
Imre Deak25b181b2015-12-17 13:44:56 +02003428 /*
3429 * Take a permanent reference to disable the RPM functionality and drop
3430 * it only when unloading the driver. Use the low level get/put helpers,
3431 * so the driver's own RPM reference tracking asserts also work on
3432 * platforms without RPM support.
3433 */
Tvrtko Ursulin6772ffe2016-10-13 11:02:55 +01003434 if (!HAS_RUNTIME_PM(dev_priv)) {
Imre Deakf5073822017-03-28 12:38:55 +03003435 int ret;
3436
David Weinehallc49d13e2016-08-22 13:32:42 +03003437 pm_runtime_dont_use_autosuspend(kdev);
Imre Deakf5073822017-03-28 12:38:55 +03003438 ret = pm_runtime_get_sync(kdev);
3439 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
Imre Deakcbc68dc2015-12-17 19:04:33 +02003440 } else {
David Weinehallc49d13e2016-08-22 13:32:42 +03003441 pm_runtime_use_autosuspend(kdev);
Imre Deakcbc68dc2015-12-17 19:04:33 +02003442 }
Daniel Vetter9c065a72014-09-30 10:56:38 +02003443
Imre Deakaabee1b2015-12-15 20:10:29 +02003444 /*
3445 * The core calls the driver load handler with an RPM reference held.
3446 * We drop that here and will reacquire it during unloading in
3447 * intel_power_domains_fini().
3448 */
David Weinehallc49d13e2016-08-22 13:32:42 +03003449 pm_runtime_put_autosuspend(kdev);
Daniel Vetter9c065a72014-09-30 10:56:38 +02003450}