blob: b7cc6dd45c9e25498bb410299695717738c919a3 [file] [log] [blame]
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
Rodrigo Vivib2b89f52014-11-14 08:52:29 -080024/**
25 * DOC: Panel Self Refresh (PSR/SRD)
26 *
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
33 *
34 * Panel Self Refresh must be supported by both Hardware (source) and
35 * Panel (sink).
36 *
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
40 *
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
52 */
53
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -080054#include <drm/drmP.h>
55
56#include "intel_drv.h"
57#include "i915_drv.h"
58
Dhinakaran Pandiyanb891d5e2018-02-23 14:15:15 -080059static inline enum intel_display_power_domain
60psr_aux_domain(struct intel_dp *intel_dp)
61{
62 /* CNL HW requires corresponding AUX IOs to be powered up for PSR.
63 * However, for non-A AUX ports the corresponding non-EDP transcoders
64 * would have already enabled power well 2 and DC_OFF. This means we can
65 * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
66 * specific AUX_IO reference without powering up any extra wells.
67 * Note that PSR is enabled only on Port A even though this function
68 * returns the correct domain for other ports too.
69 */
70 return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
71 intel_dp->aux_power_domain;
72}
73
74static void psr_aux_io_power_get(struct intel_dp *intel_dp)
75{
76 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
77 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
78
79 if (INTEL_GEN(dev_priv) < 10)
80 return;
81
82 intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
83}
84
85static void psr_aux_io_power_put(struct intel_dp *intel_dp)
86{
87 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
88 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
89
90 if (INTEL_GEN(dev_priv) < 10)
91 return;
92
93 intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
94}
95
Dhinakaran Pandiyan77fe36f2018-02-23 14:15:17 -080096static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
97{
98 uint8_t psr_caps = 0;
99
100 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
101 return false;
102 return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
103}
104
105static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
106{
107 uint8_t dprx = 0;
108
109 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
110 &dprx) != 1)
111 return false;
112 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
113}
114
115static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
116{
117 uint8_t alpm_caps = 0;
118
119 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
120 &alpm_caps) != 1)
121 return false;
122 return alpm_caps & DP_ALPM_CAP;
123}
124
125void intel_psr_init_dpcd(struct intel_dp *intel_dp)
126{
127 struct drm_i915_private *dev_priv =
128 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
129
130 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
131 sizeof(intel_dp->psr_dpcd));
132
133 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
134 dev_priv->psr.sink_support = true;
135 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
136 }
137
138 if (INTEL_GEN(dev_priv) >= 9 &&
139 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
140 uint8_t frame_sync_cap;
141
142 dev_priv->psr.sink_support = true;
143 if (drm_dp_dpcd_readb(&intel_dp->aux,
144 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
145 &frame_sync_cap) != 1)
146 frame_sync_cap = 0;
Dhinakaran Pandiyane2770e22018-02-23 14:15:18 -0800147 dev_priv->psr.aux_frame_sync = frame_sync_cap & DP_AUX_FRAME_SYNC_CAP;
Dhinakaran Pandiyan77fe36f2018-02-23 14:15:17 -0800148 /* PSR2 needs frame sync as well */
149 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
150 DRM_DEBUG_KMS("PSR2 %s on sink",
151 dev_priv->psr.psr2_support ? "supported" : "not supported");
152
153 if (dev_priv->psr.psr2_support) {
154 dev_priv->psr.y_cord_support =
155 intel_dp_get_y_cord_status(intel_dp);
156 dev_priv->psr.colorimetry_support =
157 intel_dp_get_colorimetry_status(intel_dp);
158 dev_priv->psr.alpm =
159 intel_dp_get_alpm_status(intel_dp);
160 }
161 }
162}
163
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800164static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
165{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100166 struct drm_i915_private *dev_priv = to_i915(dev);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800167 uint32_t val;
168
169 val = I915_READ(VLV_PSRSTAT(pipe)) &
170 VLV_EDP_PSR_CURR_STATE_MASK;
171 return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
172 (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
173}
174
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300175static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
176 const struct intel_crtc_state *crtc_state)
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800177{
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300178 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
179 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800180 uint32_t val;
181
182 /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300183 val = I915_READ(VLV_VSCSDP(crtc->pipe));
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800184 val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
185 val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300186 I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800187}
188
Rodrigo Vivi2ce4df82017-09-07 16:00:35 -0700189static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
190 const struct intel_crtc_state *crtc_state)
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530191{
Nagaraju, Vathsala97da2ef2017-01-02 17:00:55 +0530192 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300193 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
194 struct edp_vsc_psr psr_vsc;
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530195
Rodrigo Vivi2ce4df82017-09-07 16:00:35 -0700196 if (dev_priv->psr.psr2_support) {
197 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
198 memset(&psr_vsc, 0, sizeof(psr_vsc));
199 psr_vsc.sdp_header.HB0 = 0;
200 psr_vsc.sdp_header.HB1 = 0x7;
201 if (dev_priv->psr.colorimetry_support &&
202 dev_priv->psr.y_cord_support) {
203 psr_vsc.sdp_header.HB2 = 0x5;
204 psr_vsc.sdp_header.HB3 = 0x13;
205 } else if (dev_priv->psr.y_cord_support) {
206 psr_vsc.sdp_header.HB2 = 0x4;
207 psr_vsc.sdp_header.HB3 = 0xe;
208 } else {
209 psr_vsc.sdp_header.HB2 = 0x3;
210 psr_vsc.sdp_header.HB3 = 0xc;
211 }
Nagaraju, Vathsala97da2ef2017-01-02 17:00:55 +0530212 } else {
Rodrigo Vivi2ce4df82017-09-07 16:00:35 -0700213 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
214 memset(&psr_vsc, 0, sizeof(psr_vsc));
215 psr_vsc.sdp_header.HB0 = 0;
216 psr_vsc.sdp_header.HB1 = 0x7;
217 psr_vsc.sdp_header.HB2 = 0x2;
218 psr_vsc.sdp_header.HB3 = 0x8;
Nagaraju, Vathsala97da2ef2017-01-02 17:00:55 +0530219 }
220
Ville Syrjälä1d776532017-10-13 22:40:51 +0300221 intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
222 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530223}
224
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800225static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
226{
227 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
Durgadoss R670b90d2015-03-27 17:21:32 +0530228 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800229}
230
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200231static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
232 enum port port)
Ville Syrjälä1f380892015-11-11 20:34:16 +0200233{
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +0000234 if (INTEL_GEN(dev_priv) >= 9)
Ville Syrjälä1f380892015-11-11 20:34:16 +0200235 return DP_AUX_CH_CTL(port);
236 else
237 return EDP_PSR_AUX_CTL;
238}
239
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200240static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
241 enum port port, int index)
Ville Syrjälä1f380892015-11-11 20:34:16 +0200242{
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +0000243 if (INTEL_GEN(dev_priv) >= 9)
Ville Syrjälä1f380892015-11-11 20:34:16 +0200244 return DP_AUX_CH_DATA(port, index);
245 else
246 return EDP_PSR_AUX_DATA(index);
247}
248
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800249static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800250{
251 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
252 struct drm_device *dev = dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100253 struct drm_i915_private *dev_priv = to_i915(dev);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800254 uint32_t aux_clock_divider;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200255 i915_reg_t aux_ctl_reg;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800256 static const uint8_t aux_msg[] = {
257 [0] = DP_AUX_NATIVE_WRITE << 4,
258 [1] = DP_SET_POWER >> 8,
259 [2] = DP_SET_POWER & 0xff,
260 [3] = 1 - 1,
261 [4] = DP_SET_POWER_D0,
262 };
Ville Syrjälä8f4f2792017-11-09 17:24:34 +0200263 enum port port = dig_port->base.port;
Daniel Vetterd4dcbdc2016-05-18 18:47:15 +0200264 u32 aux_ctl;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800265 int i;
266
267 BUILD_BUG_ON(sizeof(aux_msg) > 20);
268
269 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
270
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530271 /* Enable AUX frame sync at sink */
272 if (dev_priv->psr.aux_frame_sync)
273 drm_dp_dpcd_writeb(&intel_dp->aux,
274 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
275 DP_AUX_FRAME_SYNC_ENABLE);
Nagaraju, Vathsala340c93c2017-01-02 17:00:58 +0530276 /* Enable ALPM at sink for psr2 */
277 if (dev_priv->psr.psr2_support && dev_priv->psr.alpm)
278 drm_dp_dpcd_writeb(&intel_dp->aux,
279 DP_RECEIVER_ALPM_CONFIG,
280 DP_ALPM_ENABLE);
Daniel Vetter6f32ea72016-05-18 18:47:14 +0200281 if (dev_priv->psr.link_standby)
282 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
283 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
284 else
285 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
286 DP_PSR_ENABLE);
287
Ville Syrjälä1f380892015-11-11 20:34:16 +0200288 aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
Sonika Jindale3d99842015-01-22 14:30:54 +0530289
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800290 /* Setup AUX registers */
291 for (i = 0; i < sizeof(aux_msg); i += 4)
Ville Syrjälä1f380892015-11-11 20:34:16 +0200292 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800293 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
294
Daniel Vetterd4dcbdc2016-05-18 18:47:15 +0200295 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
296 aux_clock_divider);
297 I915_WRITE(aux_ctl_reg, aux_ctl);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800298}
299
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300300static void vlv_psr_enable_source(struct intel_dp *intel_dp,
301 const struct intel_crtc_state *crtc_state)
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800302{
303 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300304 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
305 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800306
Rodrigo Vivi0d0c2792017-09-12 11:30:59 -0700307 /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300308 I915_WRITE(VLV_PSRCTL(crtc->pipe),
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800309 VLV_EDP_PSR_MODE_SW_TIMER |
310 VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
311 VLV_EDP_PSR_ENABLE);
312}
313
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800314static void vlv_psr_activate(struct intel_dp *intel_dp)
315{
316 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
317 struct drm_device *dev = dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100318 struct drm_i915_private *dev_priv = to_i915(dev);
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800319 struct drm_crtc *crtc = dig_port->base.base.crtc;
320 enum pipe pipe = to_intel_crtc(crtc)->pipe;
321
Rodrigo Vivi0d0c2792017-09-12 11:30:59 -0700322 /*
323 * Let's do the transition from PSR_state 1 (inactive) to
324 * PSR_state 2 (transition to active - static frame transmission).
325 * Then Hardware is responsible for the transition to
326 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800327 */
328 I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
329 VLV_EDP_PSR_ACTIVE_ENTRY);
330}
331
Rodrigo Vivied63d242017-09-07 16:00:33 -0700332static void hsw_activate_psr1(struct intel_dp *intel_dp)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800333{
334 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
335 struct drm_device *dev = dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100336 struct drm_i915_private *dev_priv = to_i915(dev);
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530337
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800338 uint32_t max_sleep_time = 0x1f;
Rodrigo Vivi40918e02016-09-07 17:42:31 -0700339 /*
340 * Let's respect VBT in case VBT asks a higher idle_frame value.
341 * Let's use 6 as the minimum to cover all known cases including
342 * the off-by-one issue that HW has in some cases. Also there are
343 * cases where sink should be able to train
344 * with the 5 or 6 idle patterns.
Rodrigo Vivid44b4dc2014-11-14 08:52:31 -0800345 */
Rodrigo Vivi40918e02016-09-07 17:42:31 -0700346 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
Daniel Vetter50db1392016-05-18 18:47:11 +0200347 uint32_t val = EDP_PSR_ENABLE;
348
349 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
350 val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
Rodrigo Vivi7370c682015-12-11 16:31:31 -0800351
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +0100352 if (IS_HASWELL(dev_priv))
Rodrigo Vivi7370c682015-12-11 16:31:31 -0800353 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800354
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -0800355 if (dev_priv->psr.link_standby)
356 val |= EDP_PSR_LINK_STANDBY;
357
Daniel Vetter50db1392016-05-18 18:47:11 +0200358 if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
359 val |= EDP_PSR_TP1_TIME_2500us;
360 else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
361 val |= EDP_PSR_TP1_TIME_500us;
362 else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
363 val |= EDP_PSR_TP1_TIME_100us;
364 else
365 val |= EDP_PSR_TP1_TIME_0us;
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530366
Daniel Vetter50db1392016-05-18 18:47:11 +0200367 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
368 val |= EDP_PSR_TP2_TP3_TIME_2500us;
369 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
370 val |= EDP_PSR_TP2_TP3_TIME_500us;
371 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
372 val |= EDP_PSR_TP2_TP3_TIME_100us;
373 else
374 val |= EDP_PSR_TP2_TP3_TIME_0us;
375
376 if (intel_dp_source_supports_hbr2(intel_dp) &&
377 drm_dp_tps3_supported(intel_dp->dpcd))
378 val |= EDP_PSR_TP1_TP3_SEL;
379 else
380 val |= EDP_PSR_TP1_TP2_SEL;
381
Jim Bride912d6412017-08-08 14:51:34 -0700382 val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
Daniel Vetter50db1392016-05-18 18:47:11 +0200383 I915_WRITE(EDP_PSR_CTL, val);
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530384}
Daniel Vetter50db1392016-05-18 18:47:11 +0200385
Rodrigo Vivied63d242017-09-07 16:00:33 -0700386static void hsw_activate_psr2(struct intel_dp *intel_dp)
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530387{
388 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
389 struct drm_device *dev = dig_port->base.base.dev;
390 struct drm_i915_private *dev_priv = to_i915(dev);
391 /*
392 * Let's respect VBT in case VBT asks a higher idle_frame value.
393 * Let's use 6 as the minimum to cover all known cases including
394 * the off-by-one issue that HW has in some cases. Also there are
395 * cases where sink should be able to train
396 * with the 5 or 6 idle patterns.
397 */
398 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
399 uint32_t val;
vathsala nagaraju977da082017-09-26 15:29:13 +0530400 uint8_t sink_latency;
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530401
402 val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
Daniel Vetter50db1392016-05-18 18:47:11 +0200403
404 /* FIXME: selective update is probably totally broken because it doesn't
405 * mesh at all with our frontbuffer tracking. And the hw alone isn't
406 * good enough. */
Nagaraju, Vathsala64332262017-01-13 06:01:24 +0530407 val |= EDP_PSR2_ENABLE |
vathsala nagaraju977da082017-09-26 15:29:13 +0530408 EDP_SU_TRACK_ENABLE;
409
410 if (drm_dp_dpcd_readb(&intel_dp->aux,
411 DP_SYNCHRONIZATION_LATENCY_IN_SINK,
412 &sink_latency) == 1) {
413 sink_latency &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
414 } else {
415 sink_latency = 0;
416 }
417 val |= EDP_PSR2_FRAME_BEFORE_SU(sink_latency + 1);
Daniel Vetter50db1392016-05-18 18:47:11 +0200418
419 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
420 val |= EDP_PSR2_TP2_TIME_2500;
421 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
422 val |= EDP_PSR2_TP2_TIME_500;
423 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
424 val |= EDP_PSR2_TP2_TIME_100;
425 else
426 val |= EDP_PSR2_TP2_TIME_50;
427
428 I915_WRITE(EDP_PSR2_CTL, val);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800429}
430
Rodrigo Vivied63d242017-09-07 16:00:33 -0700431static void hsw_psr_activate(struct intel_dp *intel_dp)
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530432{
433 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
434 struct drm_device *dev = dig_port->base.base.dev;
435 struct drm_i915_private *dev_priv = to_i915(dev);
436
Rodrigo Vivied63d242017-09-07 16:00:33 -0700437 /* On HSW+ after we enable PSR on source it will activate it
438 * as soon as it match configure idle_frame count. So
439 * we just actually enable it here on activation time.
440 */
441
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530442 /* psr1 and psr2 are mutually exclusive.*/
443 if (dev_priv->psr.psr2_support)
Rodrigo Vivied63d242017-09-07 16:00:33 -0700444 hsw_activate_psr2(intel_dp);
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530445 else
Rodrigo Vivied63d242017-09-07 16:00:33 -0700446 hsw_activate_psr1(intel_dp);
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530447}
448
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300449void intel_psr_compute_config(struct intel_dp *intel_dp,
450 struct intel_crtc_state *crtc_state)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800451{
452 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300453 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
Ville Syrjälädfd2e9a2016-05-18 11:34:38 +0300454 const struct drm_display_mode *adjusted_mode =
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300455 &crtc_state->base.adjusted_mode;
Ville Syrjälädfd2e9a2016-05-18 11:34:38 +0300456 int psr_setup_time;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800457
Dhinakaran Pandiyan4371d892018-01-03 13:38:23 -0800458 if (!CAN_PSR(dev_priv))
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300459 return;
460
461 if (!i915_modparams.enable_psr) {
462 DRM_DEBUG_KMS("PSR disable by flag\n");
463 return;
464 }
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800465
Rodrigo Vividc9b5a02016-02-01 12:02:06 -0800466 /*
467 * HSW spec explicitly says PSR is tied to port A.
468 * BDW+ platforms with DDI implementation of PSR have different
469 * PSR registers per transcoder and we only implement transcoder EDP
470 * ones. Since by Display design transcoder EDP is tied to port A
471 * we can safely escape based on the port A.
472 */
Ville Syrjälä8f4f2792017-11-09 17:24:34 +0200473 if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
Rodrigo Vividc9b5a02016-02-01 12:02:06 -0800474 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300475 return;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800476 }
477
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +0100478 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
Rodrigo Vivi65f61b42016-02-01 12:02:08 -0800479 !dev_priv->psr.link_standby) {
480 DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300481 return;
Rodrigo Vivi65f61b42016-02-01 12:02:08 -0800482 }
483
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +0100484 if (IS_HASWELL(dev_priv) &&
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300485 I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
Rodrigo Vivic8e68b72015-01-12 10:14:29 -0800486 S3D_ENABLE) {
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800487 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300488 return;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800489 }
490
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +0100491 if (IS_HASWELL(dev_priv) &&
Ville Syrjälädfd2e9a2016-05-18 11:34:38 +0300492 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800493 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300494 return;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800495 }
496
Ville Syrjälädfd2e9a2016-05-18 11:34:38 +0300497 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
498 if (psr_setup_time < 0) {
499 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
500 intel_dp->psr_dpcd[1]);
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300501 return;
Ville Syrjälädfd2e9a2016-05-18 11:34:38 +0300502 }
503
504 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
505 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
506 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
507 psr_setup_time);
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300508 return;
509 }
510
511 /*
512 * FIXME psr2_support is messed up. It's both computed
513 * dynamically during PSR enable, and extracted from sink
514 * caps during eDP detection.
515 */
516 if (!dev_priv->psr.psr2_support) {
517 crtc_state->has_psr = true;
518 return;
Ville Syrjälädfd2e9a2016-05-18 11:34:38 +0300519 }
520
Nagaraju, Vathsalaacf45d12017-01-10 12:32:26 +0530521 /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300522 if (adjusted_mode->crtc_hdisplay > 3200 ||
523 adjusted_mode->crtc_vdisplay > 2000) {
524 DRM_DEBUG_KMS("PSR2 disabled, panel resolution too big\n");
525 return;
Nagaraju, Vathsalaacf45d12017-01-10 12:32:26 +0530526 }
527
Nagaraju, Vathsala18b9bf32017-01-12 03:58:30 +0530528 /*
529 * FIXME:enable psr2 only for y-cordinate psr2 panels
530 * After gtc implementation , remove this restriction.
531 */
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300532 if (!dev_priv->psr.y_cord_support) {
Nagaraju, Vathsala18b9bf32017-01-12 03:58:30 +0530533 DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300534 return;
Nagaraju, Vathsala18b9bf32017-01-12 03:58:30 +0530535 }
536
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300537 crtc_state->has_psr = true;
538 crtc_state->has_psr2 = true;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800539}
540
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800541static void intel_psr_activate(struct intel_dp *intel_dp)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800542{
543 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
544 struct drm_device *dev = intel_dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100545 struct drm_i915_private *dev_priv = to_i915(dev);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800546
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530547 if (dev_priv->psr.psr2_support)
548 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
549 else
550 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800551 WARN_ON(dev_priv->psr.active);
552 lockdep_assert_held(&dev_priv->psr.lock);
553
Rodrigo Vivie3702ac2017-09-07 16:00:34 -0700554 dev_priv->psr.activate(intel_dp);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800555 dev_priv->psr.active = true;
556}
557
Rodrigo Vivi4d1fa222017-09-07 16:00:36 -0700558static void hsw_psr_enable_source(struct intel_dp *intel_dp,
559 const struct intel_crtc_state *crtc_state)
560{
561 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
562 struct drm_device *dev = dig_port->base.base.dev;
563 struct drm_i915_private *dev_priv = to_i915(dev);
564 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
565 u32 chicken;
566
Dhinakaran Pandiyanb891d5e2018-02-23 14:15:15 -0800567 psr_aux_io_power_get(intel_dp);
568
Rodrigo Vivi4d1fa222017-09-07 16:00:36 -0700569 if (dev_priv->psr.psr2_support) {
570 chicken = PSR2_VSC_ENABLE_PROG_HEADER;
571 if (dev_priv->psr.y_cord_support)
572 chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
573 I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
574
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -0800575 I915_WRITE(EDP_PSR_DEBUG,
Rodrigo Vivi4d1fa222017-09-07 16:00:36 -0700576 EDP_PSR_DEBUG_MASK_MEMUP |
577 EDP_PSR_DEBUG_MASK_HPD |
578 EDP_PSR_DEBUG_MASK_LPSP |
579 EDP_PSR_DEBUG_MASK_MAX_SLEEP |
580 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
581 } else {
582 /*
583 * Per Spec: Avoid continuous PSR exit by masking MEMUP
584 * and HPD. also mask LPSP to avoid dependency on other
585 * drivers that might block runtime_pm besides
586 * preventing other hw tracking issues now we can rely
587 * on frontbuffer tracking.
588 */
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -0800589 I915_WRITE(EDP_PSR_DEBUG,
Rodrigo Vivi4d1fa222017-09-07 16:00:36 -0700590 EDP_PSR_DEBUG_MASK_MEMUP |
591 EDP_PSR_DEBUG_MASK_HPD |
592 EDP_PSR_DEBUG_MASK_LPSP);
593 }
594}
595
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800596/**
597 * intel_psr_enable - Enable PSR
598 * @intel_dp: Intel DP
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300599 * @crtc_state: new CRTC state
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800600 *
601 * This function can only be called after the pipe is fully trained and enabled.
602 */
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300603void intel_psr_enable(struct intel_dp *intel_dp,
604 const struct intel_crtc_state *crtc_state)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800605{
606 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
607 struct drm_device *dev = intel_dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100608 struct drm_i915_private *dev_priv = to_i915(dev);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800609
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300610 if (!crtc_state->has_psr)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800611 return;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800612
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -0800613 if (WARN_ON(!CAN_PSR(dev_priv)))
614 return;
615
Radhakrishna Sripadada83ef82017-09-14 11:16:41 -0700616 WARN_ON(dev_priv->drrs.dp);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800617 mutex_lock(&dev_priv->psr.lock);
618 if (dev_priv->psr.enabled) {
619 DRM_DEBUG_KMS("PSR already in use\n");
620 goto unlock;
621 }
622
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300623 dev_priv->psr.psr2_support = crtc_state->has_psr2;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800624 dev_priv->psr.busy_frontbuffer_bits = 0;
625
Rodrigo Vivi2a5db872017-09-07 16:00:39 -0700626 dev_priv->psr.setup_vsc(intel_dp, crtc_state);
Rodrigo Vivi49ad3162017-09-07 16:00:40 -0700627 dev_priv->psr.enable_sink(intel_dp);
Rodrigo Vivid0d5e0d2017-09-07 16:00:41 -0700628 dev_priv->psr.enable_source(intel_dp, crtc_state);
Rodrigo Vivi29d1efe2017-09-07 16:00:38 -0700629 dev_priv->psr.enabled = intel_dp;
630
631 if (INTEL_GEN(dev_priv) >= 9) {
632 intel_psr_activate(intel_dp);
633 } else {
634 /*
635 * FIXME: Activation should happen immediately since this
636 * function is just called after pipe is fully trained and
637 * enabled.
638 * However on some platforms we face issues when first
639 * activation follows a modeset so quickly.
640 * - On VLV/CHV we get bank screen on first activation
641 * - On HSW/BDW we get a recoverable frozen screen until
642 * next exit-activate sequence.
643 */
Rodrigo Vivid0ac8962015-11-11 11:37:07 -0800644 schedule_delayed_work(&dev_priv->psr.work,
645 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
Rodrigo Vivi29d1efe2017-09-07 16:00:38 -0700646 }
Rodrigo Vivid0ac8962015-11-11 11:37:07 -0800647
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800648unlock:
649 mutex_unlock(&dev_priv->psr.lock);
650}
651
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300652static void vlv_psr_disable(struct intel_dp *intel_dp,
653 const struct intel_crtc_state *old_crtc_state)
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800654{
655 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
656 struct drm_device *dev = intel_dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100657 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300658 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800659 uint32_t val;
660
661 if (dev_priv->psr.active) {
Rodrigo Vivi0d0c2792017-09-12 11:30:59 -0700662 /* Put VLV PSR back to PSR_state 0 (disabled). */
Chris Wilsoneb0241c2016-06-30 15:33:26 +0100663 if (intel_wait_for_register(dev_priv,
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300664 VLV_PSRSTAT(crtc->pipe),
Chris Wilsoneb0241c2016-06-30 15:33:26 +0100665 VLV_EDP_PSR_IN_TRANS,
666 0,
667 1))
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800668 WARN(1, "PSR transition took longer than expected\n");
669
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300670 val = I915_READ(VLV_PSRCTL(crtc->pipe));
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800671 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
672 val &= ~VLV_EDP_PSR_ENABLE;
673 val &= ~VLV_EDP_PSR_MODE_MASK;
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300674 I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800675
676 dev_priv->psr.active = false;
677 } else {
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300678 WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800679 }
680}
681
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300682static void hsw_psr_disable(struct intel_dp *intel_dp,
683 const struct intel_crtc_state *old_crtc_state)
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800684{
685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686 struct drm_device *dev = intel_dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100687 struct drm_i915_private *dev_priv = to_i915(dev);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800688
689 if (dev_priv->psr.active) {
Dhinakaran Pandiyan14c65472017-12-19 20:35:20 -0800690 i915_reg_t psr_status;
Chris Wilson77affa32017-01-16 13:06:22 +0000691 u32 psr_status_mask;
692
Nagaraju, Vathsalaf40c4842017-01-11 20:44:33 +0530693 if (dev_priv->psr.aux_frame_sync)
694 drm_dp_dpcd_writeb(&intel_dp->aux,
695 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
696 0);
697
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530698 if (dev_priv->psr.psr2_support) {
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -0800699 psr_status = EDP_PSR2_STATUS;
Chris Wilson77affa32017-01-16 13:06:22 +0000700 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
701
Dhinakaran Pandiyan14c65472017-12-19 20:35:20 -0800702 I915_WRITE(EDP_PSR2_CTL,
703 I915_READ(EDP_PSR2_CTL) &
Chris Wilson77affa32017-01-16 13:06:22 +0000704 ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
705
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530706 } else {
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -0800707 psr_status = EDP_PSR_STATUS;
Chris Wilson77affa32017-01-16 13:06:22 +0000708 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
709
Dhinakaran Pandiyan14c65472017-12-19 20:35:20 -0800710 I915_WRITE(EDP_PSR_CTL,
711 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530712 }
Chris Wilson77affa32017-01-16 13:06:22 +0000713
714 /* Wait till PSR is idle */
715 if (intel_wait_for_register(dev_priv,
Dhinakaran Pandiyan14c65472017-12-19 20:35:20 -0800716 psr_status, psr_status_mask, 0,
Chris Wilson77affa32017-01-16 13:06:22 +0000717 2000))
718 DRM_ERROR("Timed out waiting for PSR Idle State\n");
719
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800720 dev_priv->psr.active = false;
721 } else {
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530722 if (dev_priv->psr.psr2_support)
723 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
724 else
725 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800726 }
Dhinakaran Pandiyanb891d5e2018-02-23 14:15:15 -0800727
728 psr_aux_io_power_put(intel_dp);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800729}
730
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800731/**
732 * intel_psr_disable - Disable PSR
733 * @intel_dp: Intel DP
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300734 * @old_crtc_state: old CRTC state
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800735 *
736 * This function needs to be called before disabling pipe.
737 */
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300738void intel_psr_disable(struct intel_dp *intel_dp,
739 const struct intel_crtc_state *old_crtc_state)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800740{
741 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
742 struct drm_device *dev = intel_dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100743 struct drm_i915_private *dev_priv = to_i915(dev);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800744
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300745 if (!old_crtc_state->has_psr)
Rodrigo Vivi0f328da2017-09-07 16:00:31 -0700746 return;
747
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -0800748 if (WARN_ON(!CAN_PSR(dev_priv)))
749 return;
750
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800751 mutex_lock(&dev_priv->psr.lock);
752 if (!dev_priv->psr.enabled) {
753 mutex_unlock(&dev_priv->psr.lock);
754 return;
755 }
756
Rodrigo Vivi424644c2017-09-07 16:00:32 -0700757 dev_priv->psr.disable_source(intel_dp, old_crtc_state);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800758
Rodrigo Vivib6e4d532015-11-23 14:19:32 -0800759 /* Disable PSR on Sink */
760 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
761
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800762 dev_priv->psr.enabled = NULL;
763 mutex_unlock(&dev_priv->psr.lock);
764
765 cancel_delayed_work_sync(&dev_priv->psr.work);
766}
767
768static void intel_psr_work(struct work_struct *work)
769{
770 struct drm_i915_private *dev_priv =
771 container_of(work, typeof(*dev_priv), psr.work.work);
772 struct intel_dp *intel_dp = dev_priv->psr.enabled;
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800773 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
774 enum pipe pipe = to_intel_crtc(crtc)->pipe;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800775
776 /* We have to make sure PSR is ready for re-enable
777 * otherwise it keeps disabled until next full enable/disable cycle.
778 * PSR might take some time to get fully disabled
779 * and be ready for re-enable.
780 */
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +0300781 if (HAS_DDI(dev_priv)) {
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530782 if (dev_priv->psr.psr2_support) {
783 if (intel_wait_for_register(dev_priv,
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -0800784 EDP_PSR2_STATUS,
785 EDP_PSR2_STATUS_STATE_MASK,
786 0,
787 50)) {
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530788 DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
789 return;
790 }
791 } else {
792 if (intel_wait_for_register(dev_priv,
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -0800793 EDP_PSR_STATUS,
794 EDP_PSR_STATUS_STATE_MASK,
795 0,
796 50)) {
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530797 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
798 return;
799 }
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800800 }
801 } else {
Chris Wilson12bb6312016-06-30 15:33:28 +0100802 if (intel_wait_for_register(dev_priv,
803 VLV_PSRSTAT(pipe),
804 VLV_EDP_PSR_IN_TRANS,
805 0,
806 1)) {
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800807 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
808 return;
809 }
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800810 }
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800811 mutex_lock(&dev_priv->psr.lock);
812 intel_dp = dev_priv->psr.enabled;
813
814 if (!intel_dp)
815 goto unlock;
816
817 /*
818 * The delayed work can race with an invalidate hence we need to
819 * recheck. Since psr_flush first clears this and then reschedules we
820 * won't ever miss a flush when bailing out here.
821 */
822 if (dev_priv->psr.busy_frontbuffer_bits)
823 goto unlock;
824
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800825 intel_psr_activate(intel_dp);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800826unlock:
827 mutex_unlock(&dev_priv->psr.lock);
828}
829
Chris Wilson5748b6a2016-08-04 16:32:38 +0100830static void intel_psr_exit(struct drm_i915_private *dev_priv)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800831{
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800832 struct intel_dp *intel_dp = dev_priv->psr.enabled;
833 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
834 enum pipe pipe = to_intel_crtc(crtc)->pipe;
835 u32 val;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800836
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800837 if (!dev_priv->psr.active)
838 return;
839
Chris Wilson5748b6a2016-08-04 16:32:38 +0100840 if (HAS_DDI(dev_priv)) {
Nagaraju, Vathsalaf40c4842017-01-11 20:44:33 +0530841 if (dev_priv->psr.aux_frame_sync)
842 drm_dp_dpcd_writeb(&intel_dp->aux,
843 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
844 0);
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530845 if (dev_priv->psr.psr2_support) {
846 val = I915_READ(EDP_PSR2_CTL);
847 WARN_ON(!(val & EDP_PSR2_ENABLE));
848 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
849 } else {
850 val = I915_READ(EDP_PSR_CTL);
851 WARN_ON(!(val & EDP_PSR_ENABLE));
852 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
853 }
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800854 } else {
855 val = I915_READ(VLV_PSRCTL(pipe));
856
Rodrigo Vivi0d0c2792017-09-12 11:30:59 -0700857 /*
858 * Here we do the transition drirectly from
859 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
860 * PSR_state 5 (exit).
861 * PSR State 4 (active with single frame update) can be skipped.
862 * On PSR_state 5 (exit) Hardware is responsible to transition
863 * back to PSR_state 1 (inactive).
864 * Now we are at Same state after vlv_psr_enable_source.
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800865 */
866 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
867 I915_WRITE(VLV_PSRCTL(pipe), val);
868
Rodrigo Vivi0d0c2792017-09-12 11:30:59 -0700869 /*
870 * Send AUX wake up - Spec says after transitioning to PSR
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800871 * active we have to send AUX wake up by writing 01h in DPCD
872 * 600h of sink device.
873 * XXX: This might slow down the transition, but without this
874 * HW doesn't complete the transition to PSR_state 1 and we
875 * never get the screen updated.
876 */
877 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
878 DP_SET_POWER_D0);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800879 }
880
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800881 dev_priv->psr.active = false;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800882}
883
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800884/**
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700885 * intel_psr_single_frame_update - Single Frame Update
Chris Wilson5748b6a2016-08-04 16:32:38 +0100886 * @dev_priv: i915 device
Daniel Vetter20c88382015-06-18 10:30:27 +0200887 * @frontbuffer_bits: frontbuffer plane tracking bits
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700888 *
889 * Some platforms support a single frame update feature that is used to
890 * send and update only one frame on Remote Frame Buffer.
891 * So far it is only implemented for Valleyview and Cherryview because
892 * hardware requires this to be done before a page flip.
893 */
Chris Wilson5748b6a2016-08-04 16:32:38 +0100894void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
Daniel Vetter20c88382015-06-18 10:30:27 +0200895 unsigned frontbuffer_bits)
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700896{
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700897 struct drm_crtc *crtc;
898 enum pipe pipe;
899 u32 val;
900
Dhinakaran Pandiyan4371d892018-01-03 13:38:23 -0800901 if (!CAN_PSR(dev_priv))
Rodrigo Vivi0f328da2017-09-07 16:00:31 -0700902 return;
903
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700904 /*
905 * Single frame update is already supported on BDW+ but it requires
906 * many W/A and it isn't really needed.
907 */
Chris Wilson5748b6a2016-08-04 16:32:38 +0100908 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700909 return;
910
911 mutex_lock(&dev_priv->psr.lock);
912 if (!dev_priv->psr.enabled) {
913 mutex_unlock(&dev_priv->psr.lock);
914 return;
915 }
916
917 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
918 pipe = to_intel_crtc(crtc)->pipe;
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700919
Daniel Vetter20c88382015-06-18 10:30:27 +0200920 if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
921 val = I915_READ(VLV_PSRCTL(pipe));
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700922
Daniel Vetter20c88382015-06-18 10:30:27 +0200923 /*
924 * We need to set this bit before writing registers for a flip.
925 * This bit will be self-clear when it gets to the PSR active state.
926 */
927 I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
928 }
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700929 mutex_unlock(&dev_priv->psr.lock);
930}
931
932/**
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800933 * intel_psr_invalidate - Invalidade PSR
Chris Wilson5748b6a2016-08-04 16:32:38 +0100934 * @dev_priv: i915 device
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800935 * @frontbuffer_bits: frontbuffer plane tracking bits
936 *
937 * Since the hardware frontbuffer tracking has gaps we need to integrate
938 * with the software frontbuffer tracking. This function gets called every
939 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
940 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
941 *
942 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
943 */
Chris Wilson5748b6a2016-08-04 16:32:38 +0100944void intel_psr_invalidate(struct drm_i915_private *dev_priv,
Daniel Vetter20c88382015-06-18 10:30:27 +0200945 unsigned frontbuffer_bits)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800946{
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800947 struct drm_crtc *crtc;
948 enum pipe pipe;
949
Dhinakaran Pandiyan4371d892018-01-03 13:38:23 -0800950 if (!CAN_PSR(dev_priv))
Rodrigo Vivi0f328da2017-09-07 16:00:31 -0700951 return;
952
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800953 mutex_lock(&dev_priv->psr.lock);
954 if (!dev_priv->psr.enabled) {
955 mutex_unlock(&dev_priv->psr.lock);
956 return;
957 }
958
959 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
960 pipe = to_intel_crtc(crtc)->pipe;
961
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800962 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800963 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
Daniel Vetterec76d622015-06-18 10:30:26 +0200964
965 if (frontbuffer_bits)
Chris Wilson5748b6a2016-08-04 16:32:38 +0100966 intel_psr_exit(dev_priv);
Daniel Vetterec76d622015-06-18 10:30:26 +0200967
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800968 mutex_unlock(&dev_priv->psr.lock);
969}
970
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800971/**
972 * intel_psr_flush - Flush PSR
Chris Wilson5748b6a2016-08-04 16:32:38 +0100973 * @dev_priv: i915 device
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800974 * @frontbuffer_bits: frontbuffer plane tracking bits
Rodrigo Vivi169de132015-07-08 16:21:31 -0700975 * @origin: which operation caused the flush
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800976 *
977 * Since the hardware frontbuffer tracking has gaps we need to integrate
978 * with the software frontbuffer tracking. This function gets called every
979 * time frontbuffer rendering has completed and flushed out to memory. PSR
980 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
981 *
982 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
983 */
Chris Wilson5748b6a2016-08-04 16:32:38 +0100984void intel_psr_flush(struct drm_i915_private *dev_priv,
Rodrigo Vivi169de132015-07-08 16:21:31 -0700985 unsigned frontbuffer_bits, enum fb_op_origin origin)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800986{
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800987 struct drm_crtc *crtc;
988 enum pipe pipe;
989
Dhinakaran Pandiyan4371d892018-01-03 13:38:23 -0800990 if (!CAN_PSR(dev_priv))
Rodrigo Vivi0f328da2017-09-07 16:00:31 -0700991 return;
992
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800993 mutex_lock(&dev_priv->psr.lock);
994 if (!dev_priv->psr.enabled) {
995 mutex_unlock(&dev_priv->psr.lock);
996 return;
997 }
998
999 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
1000 pipe = to_intel_crtc(crtc)->pipe;
Daniel Vetterec76d622015-06-18 10:30:26 +02001001
1002 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001003 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1004
Rodrigo Vivi921ec282015-11-18 11:21:12 -08001005 /* By definition flush = invalidate + flush */
1006 if (frontbuffer_bits)
Chris Wilson5748b6a2016-08-04 16:32:38 +01001007 intel_psr_exit(dev_priv);
Rodrigo Vivi995d3042014-11-19 07:37:47 -08001008
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001009 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
Rodrigo Vivid0ac8962015-11-11 11:37:07 -08001010 if (!work_busy(&dev_priv->psr.work.work))
1011 schedule_delayed_work(&dev_priv->psr.work,
Rodrigo Vivi20bb97f2015-11-11 11:37:08 -08001012 msecs_to_jiffies(100));
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001013 mutex_unlock(&dev_priv->psr.lock);
1014}
1015
Rodrigo Vivib2b89f52014-11-14 08:52:29 -08001016/**
1017 * intel_psr_init - Init basic PSR work and mutex.
Ander Conselvan de Oliveira93de0562016-11-29 13:48:47 +02001018 * @dev_priv: i915 device private
Rodrigo Vivib2b89f52014-11-14 08:52:29 -08001019 *
1020 * This function is called only once at driver load to initialize basic
1021 * PSR stuff.
1022 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02001023void intel_psr_init(struct drm_i915_private *dev_priv)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001024{
Rodrigo Vivi0f328da2017-09-07 16:00:31 -07001025 if (!HAS_PSR(dev_priv))
1026 return;
1027
Ville Syrjälä443a3892015-11-11 20:34:15 +02001028 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
1029 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
1030
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08001031 if (!dev_priv->psr.sink_support)
1032 return;
1033
Paulo Zanoni2ee7dc42016-12-13 18:57:44 -02001034 /* Per platform default: all disabled. */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001035 if (i915_modparams.enable_psr == -1)
1036 i915_modparams.enable_psr = 0;
Rodrigo Vivid94d6e82016-02-12 04:08:11 -08001037
Rodrigo Vivi65f61b42016-02-01 12:02:08 -08001038 /* Set link_standby x link_off defaults */
Tvrtko Ursulin86527442016-10-13 11:03:00 +01001039 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08001040 /* HSW and BDW require workarounds that we don't implement. */
1041 dev_priv->psr.link_standby = false;
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01001042 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08001043 /* On VLV and CHV only standby mode is supported. */
1044 dev_priv->psr.link_standby = true;
1045 else
1046 /* For new platforms let's respect VBT back again */
1047 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
1048
Rodrigo Vivi65f61b42016-02-01 12:02:08 -08001049 /* Override link_standby x link_off defaults */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001050 if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
Rodrigo Vivi65f61b42016-02-01 12:02:08 -08001051 DRM_DEBUG_KMS("PSR: Forcing link standby\n");
1052 dev_priv->psr.link_standby = true;
1053 }
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001054 if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
Rodrigo Vivi65f61b42016-02-01 12:02:08 -08001055 DRM_DEBUG_KMS("PSR: Forcing main link off\n");
1056 dev_priv->psr.link_standby = false;
1057 }
1058
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001059 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
1060 mutex_init(&dev_priv->psr.lock);
Rodrigo Vivi424644c2017-09-07 16:00:32 -07001061
1062 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Rodrigo Vivid0d5e0d2017-09-07 16:00:41 -07001063 dev_priv->psr.enable_source = vlv_psr_enable_source;
Rodrigo Vivi424644c2017-09-07 16:00:32 -07001064 dev_priv->psr.disable_source = vlv_psr_disable;
Rodrigo Vivi49ad3162017-09-07 16:00:40 -07001065 dev_priv->psr.enable_sink = vlv_psr_enable_sink;
Rodrigo Vivie3702ac2017-09-07 16:00:34 -07001066 dev_priv->psr.activate = vlv_psr_activate;
Rodrigo Vivi2a5db872017-09-07 16:00:39 -07001067 dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
Rodrigo Vivi424644c2017-09-07 16:00:32 -07001068 } else {
Rodrigo Vivid0d5e0d2017-09-07 16:00:41 -07001069 dev_priv->psr.enable_source = hsw_psr_enable_source;
Rodrigo Vivi424644c2017-09-07 16:00:32 -07001070 dev_priv->psr.disable_source = hsw_psr_disable;
Rodrigo Vivi49ad3162017-09-07 16:00:40 -07001071 dev_priv->psr.enable_sink = hsw_psr_enable_sink;
Rodrigo Vivie3702ac2017-09-07 16:00:34 -07001072 dev_priv->psr.activate = hsw_psr_activate;
Rodrigo Vivi2a5db872017-09-07 16:00:39 -07001073 dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
Rodrigo Vivi424644c2017-09-07 16:00:32 -07001074 }
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001075}