blob: 23175c5c4a5066f0c40c79e7e8f3711ae66373cb [file] [log] [blame]
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
Rodrigo Vivib2b89f52014-11-14 08:52:29 -080024/**
25 * DOC: Panel Self Refresh (PSR/SRD)
26 *
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
33 *
34 * Panel Self Refresh must be supported by both Hardware (source) and
35 * Panel (sink).
36 *
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
40 *
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
52 */
53
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -080054#include <drm/drmP.h>
55
56#include "intel_drv.h"
57#include "i915_drv.h"
58
Dhinakaran Pandiyanb891d5e2018-02-23 14:15:15 -080059static inline enum intel_display_power_domain
60psr_aux_domain(struct intel_dp *intel_dp)
61{
62 /* CNL HW requires corresponding AUX IOs to be powered up for PSR.
63 * However, for non-A AUX ports the corresponding non-EDP transcoders
64 * would have already enabled power well 2 and DC_OFF. This means we can
65 * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
66 * specific AUX_IO reference without powering up any extra wells.
67 * Note that PSR is enabled only on Port A even though this function
68 * returns the correct domain for other ports too.
69 */
70 return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
71 intel_dp->aux_power_domain;
72}
73
74static void psr_aux_io_power_get(struct intel_dp *intel_dp)
75{
76 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
77 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
78
79 if (INTEL_GEN(dev_priv) < 10)
80 return;
81
82 intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
83}
84
85static void psr_aux_io_power_put(struct intel_dp *intel_dp)
86{
87 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
88 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
89
90 if (INTEL_GEN(dev_priv) < 10)
91 return;
92
93 intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
94}
95
Dhinakaran Pandiyan77fe36f2018-02-23 14:15:17 -080096static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
97{
98 uint8_t psr_caps = 0;
99
100 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
101 return false;
102 return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
103}
104
105static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
106{
107 uint8_t dprx = 0;
108
109 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
110 &dprx) != 1)
111 return false;
112 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
113}
114
115static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
116{
117 uint8_t alpm_caps = 0;
118
119 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
120 &alpm_caps) != 1)
121 return false;
122 return alpm_caps & DP_ALPM_CAP;
123}
124
125void intel_psr_init_dpcd(struct intel_dp *intel_dp)
126{
127 struct drm_i915_private *dev_priv =
128 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
129
130 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
131 sizeof(intel_dp->psr_dpcd));
132
133 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
134 dev_priv->psr.sink_support = true;
135 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
136 }
137
138 if (INTEL_GEN(dev_priv) >= 9 &&
139 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
140 uint8_t frame_sync_cap;
141
142 dev_priv->psr.sink_support = true;
143 if (drm_dp_dpcd_readb(&intel_dp->aux,
144 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
145 &frame_sync_cap) != 1)
146 frame_sync_cap = 0;
Dhinakaran Pandiyane2770e22018-02-23 14:15:18 -0800147 dev_priv->psr.aux_frame_sync = frame_sync_cap & DP_AUX_FRAME_SYNC_CAP;
Dhinakaran Pandiyan77fe36f2018-02-23 14:15:17 -0800148 /* PSR2 needs frame sync as well */
149 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
150 DRM_DEBUG_KMS("PSR2 %s on sink",
151 dev_priv->psr.psr2_support ? "supported" : "not supported");
152
153 if (dev_priv->psr.psr2_support) {
154 dev_priv->psr.y_cord_support =
155 intel_dp_get_y_cord_status(intel_dp);
156 dev_priv->psr.colorimetry_support =
157 intel_dp_get_colorimetry_status(intel_dp);
158 dev_priv->psr.alpm =
159 intel_dp_get_alpm_status(intel_dp);
160 }
161 }
162}
163
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800164static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
165{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100166 struct drm_i915_private *dev_priv = to_i915(dev);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800167 uint32_t val;
168
169 val = I915_READ(VLV_PSRSTAT(pipe)) &
170 VLV_EDP_PSR_CURR_STATE_MASK;
171 return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
172 (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
173}
174
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300175static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
176 const struct intel_crtc_state *crtc_state)
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800177{
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300178 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
179 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800180 uint32_t val;
181
182 /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300183 val = I915_READ(VLV_VSCSDP(crtc->pipe));
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800184 val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
185 val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300186 I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800187}
188
Rodrigo Vivi2ce4df82017-09-07 16:00:35 -0700189static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
190 const struct intel_crtc_state *crtc_state)
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530191{
Nagaraju, Vathsala97da2ef2017-01-02 17:00:55 +0530192 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300193 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
194 struct edp_vsc_psr psr_vsc;
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530195
Rodrigo Vivi2ce4df82017-09-07 16:00:35 -0700196 if (dev_priv->psr.psr2_support) {
197 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
198 memset(&psr_vsc, 0, sizeof(psr_vsc));
199 psr_vsc.sdp_header.HB0 = 0;
200 psr_vsc.sdp_header.HB1 = 0x7;
201 if (dev_priv->psr.colorimetry_support &&
202 dev_priv->psr.y_cord_support) {
203 psr_vsc.sdp_header.HB2 = 0x5;
204 psr_vsc.sdp_header.HB3 = 0x13;
205 } else if (dev_priv->psr.y_cord_support) {
206 psr_vsc.sdp_header.HB2 = 0x4;
207 psr_vsc.sdp_header.HB3 = 0xe;
208 } else {
209 psr_vsc.sdp_header.HB2 = 0x3;
210 psr_vsc.sdp_header.HB3 = 0xc;
211 }
Nagaraju, Vathsala97da2ef2017-01-02 17:00:55 +0530212 } else {
Rodrigo Vivi2ce4df82017-09-07 16:00:35 -0700213 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
214 memset(&psr_vsc, 0, sizeof(psr_vsc));
215 psr_vsc.sdp_header.HB0 = 0;
216 psr_vsc.sdp_header.HB1 = 0x7;
217 psr_vsc.sdp_header.HB2 = 0x2;
218 psr_vsc.sdp_header.HB3 = 0x8;
Nagaraju, Vathsala97da2ef2017-01-02 17:00:55 +0530219 }
220
Ville Syrjälä1d776532017-10-13 22:40:51 +0300221 intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
222 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530223}
224
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800225static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
226{
227 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
Durgadoss R670b90d2015-03-27 17:21:32 +0530228 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800229}
230
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200231static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
232 enum port port)
Ville Syrjälä1f380892015-11-11 20:34:16 +0200233{
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +0000234 if (INTEL_GEN(dev_priv) >= 9)
Ville Syrjälä1f380892015-11-11 20:34:16 +0200235 return DP_AUX_CH_CTL(port);
236 else
237 return EDP_PSR_AUX_CTL;
238}
239
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200240static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
241 enum port port, int index)
Ville Syrjälä1f380892015-11-11 20:34:16 +0200242{
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +0000243 if (INTEL_GEN(dev_priv) >= 9)
Ville Syrjälä1f380892015-11-11 20:34:16 +0200244 return DP_AUX_CH_DATA(port, index);
245 else
246 return EDP_PSR_AUX_DATA(index);
247}
248
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800249static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800250{
251 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
252 struct drm_device *dev = dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100253 struct drm_i915_private *dev_priv = to_i915(dev);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800254 uint32_t aux_clock_divider;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200255 i915_reg_t aux_ctl_reg;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800256 static const uint8_t aux_msg[] = {
257 [0] = DP_AUX_NATIVE_WRITE << 4,
258 [1] = DP_SET_POWER >> 8,
259 [2] = DP_SET_POWER & 0xff,
260 [3] = 1 - 1,
261 [4] = DP_SET_POWER_D0,
262 };
Ville Syrjälä8f4f2792017-11-09 17:24:34 +0200263 enum port port = dig_port->base.port;
Daniel Vetterd4dcbdc2016-05-18 18:47:15 +0200264 u32 aux_ctl;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800265 int i;
266
267 BUILD_BUG_ON(sizeof(aux_msg) > 20);
268
269 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
270
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530271 /* Enable AUX frame sync at sink */
272 if (dev_priv->psr.aux_frame_sync)
273 drm_dp_dpcd_writeb(&intel_dp->aux,
274 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
275 DP_AUX_FRAME_SYNC_ENABLE);
Nagaraju, Vathsala340c93c2017-01-02 17:00:58 +0530276 /* Enable ALPM at sink for psr2 */
277 if (dev_priv->psr.psr2_support && dev_priv->psr.alpm)
278 drm_dp_dpcd_writeb(&intel_dp->aux,
279 DP_RECEIVER_ALPM_CONFIG,
280 DP_ALPM_ENABLE);
Daniel Vetter6f32ea72016-05-18 18:47:14 +0200281 if (dev_priv->psr.link_standby)
282 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
283 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
284 else
285 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
286 DP_PSR_ENABLE);
287
Ville Syrjälä1f380892015-11-11 20:34:16 +0200288 aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
Sonika Jindale3d99842015-01-22 14:30:54 +0530289
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800290 /* Setup AUX registers */
291 for (i = 0; i < sizeof(aux_msg); i += 4)
Ville Syrjälä1f380892015-11-11 20:34:16 +0200292 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800293 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
294
Daniel Vetterd4dcbdc2016-05-18 18:47:15 +0200295 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
296 aux_clock_divider);
297 I915_WRITE(aux_ctl_reg, aux_ctl);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800298}
299
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300300static void vlv_psr_enable_source(struct intel_dp *intel_dp,
301 const struct intel_crtc_state *crtc_state)
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800302{
303 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300304 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
305 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800306
Rodrigo Vivi0d0c2792017-09-12 11:30:59 -0700307 /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300308 I915_WRITE(VLV_PSRCTL(crtc->pipe),
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800309 VLV_EDP_PSR_MODE_SW_TIMER |
310 VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
311 VLV_EDP_PSR_ENABLE);
312}
313
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800314static void vlv_psr_activate(struct intel_dp *intel_dp)
315{
316 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
317 struct drm_device *dev = dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100318 struct drm_i915_private *dev_priv = to_i915(dev);
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800319 struct drm_crtc *crtc = dig_port->base.base.crtc;
320 enum pipe pipe = to_intel_crtc(crtc)->pipe;
321
Rodrigo Vivi0d0c2792017-09-12 11:30:59 -0700322 /*
323 * Let's do the transition from PSR_state 1 (inactive) to
324 * PSR_state 2 (transition to active - static frame transmission).
325 * Then Hardware is responsible for the transition to
326 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800327 */
328 I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
329 VLV_EDP_PSR_ACTIVE_ENTRY);
330}
331
Rodrigo Vivied63d242017-09-07 16:00:33 -0700332static void hsw_activate_psr1(struct intel_dp *intel_dp)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800333{
334 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
335 struct drm_device *dev = dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100336 struct drm_i915_private *dev_priv = to_i915(dev);
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530337
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800338 uint32_t max_sleep_time = 0x1f;
Rodrigo Vivi40918e02016-09-07 17:42:31 -0700339 /*
340 * Let's respect VBT in case VBT asks a higher idle_frame value.
341 * Let's use 6 as the minimum to cover all known cases including
342 * the off-by-one issue that HW has in some cases. Also there are
343 * cases where sink should be able to train
344 * with the 5 or 6 idle patterns.
Rodrigo Vivid44b4dc2014-11-14 08:52:31 -0800345 */
Rodrigo Vivi40918e02016-09-07 17:42:31 -0700346 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
Daniel Vetter50db1392016-05-18 18:47:11 +0200347 uint32_t val = EDP_PSR_ENABLE;
348
349 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
350 val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
Rodrigo Vivi7370c682015-12-11 16:31:31 -0800351
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +0100352 if (IS_HASWELL(dev_priv))
Rodrigo Vivi7370c682015-12-11 16:31:31 -0800353 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800354
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -0800355 if (dev_priv->psr.link_standby)
356 val |= EDP_PSR_LINK_STANDBY;
357
Daniel Vetter50db1392016-05-18 18:47:11 +0200358 if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
359 val |= EDP_PSR_TP1_TIME_2500us;
360 else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
361 val |= EDP_PSR_TP1_TIME_500us;
362 else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
363 val |= EDP_PSR_TP1_TIME_100us;
364 else
365 val |= EDP_PSR_TP1_TIME_0us;
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530366
Daniel Vetter50db1392016-05-18 18:47:11 +0200367 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
368 val |= EDP_PSR_TP2_TP3_TIME_2500us;
369 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
370 val |= EDP_PSR_TP2_TP3_TIME_500us;
371 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
372 val |= EDP_PSR_TP2_TP3_TIME_100us;
373 else
374 val |= EDP_PSR_TP2_TP3_TIME_0us;
375
376 if (intel_dp_source_supports_hbr2(intel_dp) &&
377 drm_dp_tps3_supported(intel_dp->dpcd))
378 val |= EDP_PSR_TP1_TP3_SEL;
379 else
380 val |= EDP_PSR_TP1_TP2_SEL;
381
Jim Bride912d6412017-08-08 14:51:34 -0700382 val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
Daniel Vetter50db1392016-05-18 18:47:11 +0200383 I915_WRITE(EDP_PSR_CTL, val);
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530384}
Daniel Vetter50db1392016-05-18 18:47:11 +0200385
Rodrigo Vivied63d242017-09-07 16:00:33 -0700386static void hsw_activate_psr2(struct intel_dp *intel_dp)
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530387{
388 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
389 struct drm_device *dev = dig_port->base.base.dev;
390 struct drm_i915_private *dev_priv = to_i915(dev);
391 /*
392 * Let's respect VBT in case VBT asks a higher idle_frame value.
393 * Let's use 6 as the minimum to cover all known cases including
394 * the off-by-one issue that HW has in some cases. Also there are
395 * cases where sink should be able to train
396 * with the 5 or 6 idle patterns.
397 */
398 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
399 uint32_t val;
vathsala nagaraju977da082017-09-26 15:29:13 +0530400 uint8_t sink_latency;
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530401
402 val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
Daniel Vetter50db1392016-05-18 18:47:11 +0200403
404 /* FIXME: selective update is probably totally broken because it doesn't
405 * mesh at all with our frontbuffer tracking. And the hw alone isn't
406 * good enough. */
Nagaraju, Vathsala64332262017-01-13 06:01:24 +0530407 val |= EDP_PSR2_ENABLE |
vathsala nagaraju977da082017-09-26 15:29:13 +0530408 EDP_SU_TRACK_ENABLE;
409
410 if (drm_dp_dpcd_readb(&intel_dp->aux,
411 DP_SYNCHRONIZATION_LATENCY_IN_SINK,
412 &sink_latency) == 1) {
413 sink_latency &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
414 } else {
415 sink_latency = 0;
416 }
417 val |= EDP_PSR2_FRAME_BEFORE_SU(sink_latency + 1);
Daniel Vetter50db1392016-05-18 18:47:11 +0200418
419 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
420 val |= EDP_PSR2_TP2_TIME_2500;
421 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
422 val |= EDP_PSR2_TP2_TIME_500;
423 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
424 val |= EDP_PSR2_TP2_TIME_100;
425 else
426 val |= EDP_PSR2_TP2_TIME_50;
427
428 I915_WRITE(EDP_PSR2_CTL, val);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800429}
430
Rodrigo Vivied63d242017-09-07 16:00:33 -0700431static void hsw_psr_activate(struct intel_dp *intel_dp)
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530432{
433 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
434 struct drm_device *dev = dig_port->base.base.dev;
435 struct drm_i915_private *dev_priv = to_i915(dev);
436
Rodrigo Vivied63d242017-09-07 16:00:33 -0700437 /* On HSW+ after we enable PSR on source it will activate it
438 * as soon as it match configure idle_frame count. So
439 * we just actually enable it here on activation time.
440 */
441
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530442 /* psr1 and psr2 are mutually exclusive.*/
443 if (dev_priv->psr.psr2_support)
Rodrigo Vivied63d242017-09-07 16:00:33 -0700444 hsw_activate_psr2(intel_dp);
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530445 else
Rodrigo Vivied63d242017-09-07 16:00:33 -0700446 hsw_activate_psr1(intel_dp);
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530447}
448
Rodrigo Vivic4932d72018-02-27 13:29:13 -0800449static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
450 struct intel_crtc_state *crtc_state)
451{
452 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
453 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
Dhinakaran Pandiyanc90c2752018-03-06 12:33:55 -0800454 int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
455 int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
456 int psr_max_h = 0, psr_max_v = 0;
Rodrigo Vivic4932d72018-02-27 13:29:13 -0800457
458 /*
459 * FIXME psr2_support is messed up. It's both computed
460 * dynamically during PSR enable, and extracted from sink
461 * caps during eDP detection.
462 */
463 if (!dev_priv->psr.psr2_support)
464 return false;
465
Dhinakaran Pandiyanc90c2752018-03-06 12:33:55 -0800466 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
467 psr_max_h = 4096;
468 psr_max_v = 2304;
469 } else if (IS_GEN9(dev_priv)) {
470 psr_max_h = 3640;
471 psr_max_v = 2304;
472 }
473
474 if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
475 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
476 crtc_hdisplay, crtc_vdisplay,
477 psr_max_h, psr_max_v);
Rodrigo Vivic4932d72018-02-27 13:29:13 -0800478 return false;
479 }
480
481 /*
482 * FIXME:enable psr2 only for y-cordinate psr2 panels
483 * After gtc implementation , remove this restriction.
484 */
485 if (!dev_priv->psr.y_cord_support) {
486 DRM_DEBUG_KMS("PSR2 not enabled, panel does not support Y coordinate\n");
487 return false;
488 }
489
490 return true;
491}
492
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300493void intel_psr_compute_config(struct intel_dp *intel_dp,
494 struct intel_crtc_state *crtc_state)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800495{
496 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300497 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
Ville Syrjälädfd2e9a2016-05-18 11:34:38 +0300498 const struct drm_display_mode *adjusted_mode =
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300499 &crtc_state->base.adjusted_mode;
Ville Syrjälädfd2e9a2016-05-18 11:34:38 +0300500 int psr_setup_time;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800501
Dhinakaran Pandiyan4371d892018-01-03 13:38:23 -0800502 if (!CAN_PSR(dev_priv))
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300503 return;
504
505 if (!i915_modparams.enable_psr) {
506 DRM_DEBUG_KMS("PSR disable by flag\n");
507 return;
508 }
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800509
Rodrigo Vividc9b5a02016-02-01 12:02:06 -0800510 /*
511 * HSW spec explicitly says PSR is tied to port A.
512 * BDW+ platforms with DDI implementation of PSR have different
513 * PSR registers per transcoder and we only implement transcoder EDP
514 * ones. Since by Display design transcoder EDP is tied to port A
515 * we can safely escape based on the port A.
516 */
Ville Syrjälä8f4f2792017-11-09 17:24:34 +0200517 if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
Rodrigo Vividc9b5a02016-02-01 12:02:06 -0800518 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300519 return;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800520 }
521
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +0100522 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
Rodrigo Vivi65f61b42016-02-01 12:02:08 -0800523 !dev_priv->psr.link_standby) {
524 DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300525 return;
Rodrigo Vivi65f61b42016-02-01 12:02:08 -0800526 }
527
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +0100528 if (IS_HASWELL(dev_priv) &&
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300529 I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
Rodrigo Vivic8e68b72015-01-12 10:14:29 -0800530 S3D_ENABLE) {
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800531 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300532 return;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800533 }
534
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +0100535 if (IS_HASWELL(dev_priv) &&
Ville Syrjälädfd2e9a2016-05-18 11:34:38 +0300536 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800537 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300538 return;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800539 }
540
Ville Syrjälädfd2e9a2016-05-18 11:34:38 +0300541 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
542 if (psr_setup_time < 0) {
543 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
544 intel_dp->psr_dpcd[1]);
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300545 return;
Ville Syrjälädfd2e9a2016-05-18 11:34:38 +0300546 }
547
548 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
549 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
550 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
551 psr_setup_time);
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300552 return;
553 }
554
Dhinakaran Pandiyan06d058e2018-02-26 19:27:23 -0800555 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
556 DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
557 return;
558 }
559
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300560 crtc_state->has_psr = true;
Rodrigo Vivic4932d72018-02-27 13:29:13 -0800561 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
562 DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800563}
564
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800565static void intel_psr_activate(struct intel_dp *intel_dp)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800566{
567 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
568 struct drm_device *dev = intel_dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100569 struct drm_i915_private *dev_priv = to_i915(dev);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800570
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530571 if (dev_priv->psr.psr2_support)
572 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
573 else
574 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800575 WARN_ON(dev_priv->psr.active);
576 lockdep_assert_held(&dev_priv->psr.lock);
577
Rodrigo Vivie3702ac2017-09-07 16:00:34 -0700578 dev_priv->psr.activate(intel_dp);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800579 dev_priv->psr.active = true;
580}
581
Rodrigo Vivi4d1fa222017-09-07 16:00:36 -0700582static void hsw_psr_enable_source(struct intel_dp *intel_dp,
583 const struct intel_crtc_state *crtc_state)
584{
585 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
586 struct drm_device *dev = dig_port->base.base.dev;
587 struct drm_i915_private *dev_priv = to_i915(dev);
588 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
589 u32 chicken;
590
Dhinakaran Pandiyanb891d5e2018-02-23 14:15:15 -0800591 psr_aux_io_power_get(intel_dp);
592
Rodrigo Vivi4d1fa222017-09-07 16:00:36 -0700593 if (dev_priv->psr.psr2_support) {
594 chicken = PSR2_VSC_ENABLE_PROG_HEADER;
595 if (dev_priv->psr.y_cord_support)
596 chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
597 I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
598
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -0800599 I915_WRITE(EDP_PSR_DEBUG,
Rodrigo Vivi4d1fa222017-09-07 16:00:36 -0700600 EDP_PSR_DEBUG_MASK_MEMUP |
601 EDP_PSR_DEBUG_MASK_HPD |
602 EDP_PSR_DEBUG_MASK_LPSP |
603 EDP_PSR_DEBUG_MASK_MAX_SLEEP |
604 EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
605 } else {
606 /*
607 * Per Spec: Avoid continuous PSR exit by masking MEMUP
608 * and HPD. also mask LPSP to avoid dependency on other
609 * drivers that might block runtime_pm besides
610 * preventing other hw tracking issues now we can rely
611 * on frontbuffer tracking.
612 */
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -0800613 I915_WRITE(EDP_PSR_DEBUG,
Rodrigo Vivi4d1fa222017-09-07 16:00:36 -0700614 EDP_PSR_DEBUG_MASK_MEMUP |
615 EDP_PSR_DEBUG_MASK_HPD |
616 EDP_PSR_DEBUG_MASK_LPSP);
617 }
618}
619
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800620/**
621 * intel_psr_enable - Enable PSR
622 * @intel_dp: Intel DP
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300623 * @crtc_state: new CRTC state
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800624 *
625 * This function can only be called after the pipe is fully trained and enabled.
626 */
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300627void intel_psr_enable(struct intel_dp *intel_dp,
628 const struct intel_crtc_state *crtc_state)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800629{
630 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
631 struct drm_device *dev = intel_dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100632 struct drm_i915_private *dev_priv = to_i915(dev);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800633
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300634 if (!crtc_state->has_psr)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800635 return;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800636
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -0800637 if (WARN_ON(!CAN_PSR(dev_priv)))
638 return;
639
Radhakrishna Sripadada83ef82017-09-14 11:16:41 -0700640 WARN_ON(dev_priv->drrs.dp);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800641 mutex_lock(&dev_priv->psr.lock);
642 if (dev_priv->psr.enabled) {
643 DRM_DEBUG_KMS("PSR already in use\n");
644 goto unlock;
645 }
646
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300647 dev_priv->psr.psr2_support = crtc_state->has_psr2;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800648 dev_priv->psr.busy_frontbuffer_bits = 0;
649
Rodrigo Vivi2a5db872017-09-07 16:00:39 -0700650 dev_priv->psr.setup_vsc(intel_dp, crtc_state);
Rodrigo Vivi49ad3162017-09-07 16:00:40 -0700651 dev_priv->psr.enable_sink(intel_dp);
Rodrigo Vivid0d5e0d2017-09-07 16:00:41 -0700652 dev_priv->psr.enable_source(intel_dp, crtc_state);
Rodrigo Vivi29d1efe2017-09-07 16:00:38 -0700653 dev_priv->psr.enabled = intel_dp;
654
655 if (INTEL_GEN(dev_priv) >= 9) {
656 intel_psr_activate(intel_dp);
657 } else {
658 /*
659 * FIXME: Activation should happen immediately since this
660 * function is just called after pipe is fully trained and
661 * enabled.
662 * However on some platforms we face issues when first
663 * activation follows a modeset so quickly.
664 * - On VLV/CHV we get bank screen on first activation
665 * - On HSW/BDW we get a recoverable frozen screen until
666 * next exit-activate sequence.
667 */
Rodrigo Vivid0ac8962015-11-11 11:37:07 -0800668 schedule_delayed_work(&dev_priv->psr.work,
669 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
Rodrigo Vivi29d1efe2017-09-07 16:00:38 -0700670 }
Rodrigo Vivid0ac8962015-11-11 11:37:07 -0800671
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800672unlock:
673 mutex_unlock(&dev_priv->psr.lock);
674}
675
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300676static void vlv_psr_disable(struct intel_dp *intel_dp,
677 const struct intel_crtc_state *old_crtc_state)
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800678{
679 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
680 struct drm_device *dev = intel_dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100681 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300682 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800683 uint32_t val;
684
685 if (dev_priv->psr.active) {
Rodrigo Vivi0d0c2792017-09-12 11:30:59 -0700686 /* Put VLV PSR back to PSR_state 0 (disabled). */
Chris Wilsoneb0241c2016-06-30 15:33:26 +0100687 if (intel_wait_for_register(dev_priv,
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300688 VLV_PSRSTAT(crtc->pipe),
Chris Wilsoneb0241c2016-06-30 15:33:26 +0100689 VLV_EDP_PSR_IN_TRANS,
690 0,
691 1))
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800692 WARN(1, "PSR transition took longer than expected\n");
693
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300694 val = I915_READ(VLV_PSRCTL(crtc->pipe));
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800695 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
696 val &= ~VLV_EDP_PSR_ENABLE;
697 val &= ~VLV_EDP_PSR_MODE_MASK;
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300698 I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800699
700 dev_priv->psr.active = false;
701 } else {
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300702 WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800703 }
704}
705
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300706static void hsw_psr_disable(struct intel_dp *intel_dp,
707 const struct intel_crtc_state *old_crtc_state)
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800708{
709 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
710 struct drm_device *dev = intel_dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100711 struct drm_i915_private *dev_priv = to_i915(dev);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800712
713 if (dev_priv->psr.active) {
Dhinakaran Pandiyan14c65472017-12-19 20:35:20 -0800714 i915_reg_t psr_status;
Chris Wilson77affa32017-01-16 13:06:22 +0000715 u32 psr_status_mask;
716
Nagaraju, Vathsalaf40c4842017-01-11 20:44:33 +0530717 if (dev_priv->psr.aux_frame_sync)
718 drm_dp_dpcd_writeb(&intel_dp->aux,
719 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
720 0);
721
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530722 if (dev_priv->psr.psr2_support) {
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -0800723 psr_status = EDP_PSR2_STATUS;
Chris Wilson77affa32017-01-16 13:06:22 +0000724 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
725
Dhinakaran Pandiyan14c65472017-12-19 20:35:20 -0800726 I915_WRITE(EDP_PSR2_CTL,
727 I915_READ(EDP_PSR2_CTL) &
Chris Wilson77affa32017-01-16 13:06:22 +0000728 ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
729
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530730 } else {
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -0800731 psr_status = EDP_PSR_STATUS;
Chris Wilson77affa32017-01-16 13:06:22 +0000732 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
733
Dhinakaran Pandiyan14c65472017-12-19 20:35:20 -0800734 I915_WRITE(EDP_PSR_CTL,
735 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530736 }
Chris Wilson77affa32017-01-16 13:06:22 +0000737
738 /* Wait till PSR is idle */
739 if (intel_wait_for_register(dev_priv,
Dhinakaran Pandiyan14c65472017-12-19 20:35:20 -0800740 psr_status, psr_status_mask, 0,
Chris Wilson77affa32017-01-16 13:06:22 +0000741 2000))
742 DRM_ERROR("Timed out waiting for PSR Idle State\n");
743
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800744 dev_priv->psr.active = false;
745 } else {
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530746 if (dev_priv->psr.psr2_support)
747 WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
748 else
749 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800750 }
Dhinakaran Pandiyanb891d5e2018-02-23 14:15:15 -0800751
752 psr_aux_io_power_put(intel_dp);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800753}
754
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800755/**
756 * intel_psr_disable - Disable PSR
757 * @intel_dp: Intel DP
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300758 * @old_crtc_state: old CRTC state
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800759 *
760 * This function needs to be called before disabling pipe.
761 */
Ville Syrjäläd2419ff2017-08-18 16:49:56 +0300762void intel_psr_disable(struct intel_dp *intel_dp,
763 const struct intel_crtc_state *old_crtc_state)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800764{
765 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
766 struct drm_device *dev = intel_dig_port->base.base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100767 struct drm_i915_private *dev_priv = to_i915(dev);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800768
Ville Syrjälä4d90f2d2017-10-12 16:02:01 +0300769 if (!old_crtc_state->has_psr)
Rodrigo Vivi0f328da2017-09-07 16:00:31 -0700770 return;
771
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -0800772 if (WARN_ON(!CAN_PSR(dev_priv)))
773 return;
774
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800775 mutex_lock(&dev_priv->psr.lock);
776 if (!dev_priv->psr.enabled) {
777 mutex_unlock(&dev_priv->psr.lock);
778 return;
779 }
780
Rodrigo Vivi424644c2017-09-07 16:00:32 -0700781 dev_priv->psr.disable_source(intel_dp, old_crtc_state);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800782
Rodrigo Vivib6e4d532015-11-23 14:19:32 -0800783 /* Disable PSR on Sink */
784 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
785
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800786 dev_priv->psr.enabled = NULL;
787 mutex_unlock(&dev_priv->psr.lock);
788
789 cancel_delayed_work_sync(&dev_priv->psr.work);
790}
791
792static void intel_psr_work(struct work_struct *work)
793{
794 struct drm_i915_private *dev_priv =
795 container_of(work, typeof(*dev_priv), psr.work.work);
796 struct intel_dp *intel_dp = dev_priv->psr.enabled;
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800797 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
798 enum pipe pipe = to_intel_crtc(crtc)->pipe;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800799
800 /* We have to make sure PSR is ready for re-enable
801 * otherwise it keeps disabled until next full enable/disable cycle.
802 * PSR might take some time to get fully disabled
803 * and be ready for re-enable.
804 */
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +0300805 if (HAS_DDI(dev_priv)) {
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530806 if (dev_priv->psr.psr2_support) {
807 if (intel_wait_for_register(dev_priv,
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -0800808 EDP_PSR2_STATUS,
809 EDP_PSR2_STATUS_STATE_MASK,
810 0,
811 50)) {
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530812 DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
813 return;
814 }
815 } else {
816 if (intel_wait_for_register(dev_priv,
Dhinakaran Pandiyan861023e2017-12-20 12:10:21 -0800817 EDP_PSR_STATUS,
818 EDP_PSR_STATUS_STATE_MASK,
819 0,
820 50)) {
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530821 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
822 return;
823 }
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800824 }
825 } else {
Chris Wilson12bb6312016-06-30 15:33:28 +0100826 if (intel_wait_for_register(dev_priv,
827 VLV_PSRSTAT(pipe),
828 VLV_EDP_PSR_IN_TRANS,
829 0,
830 1)) {
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800831 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
832 return;
833 }
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800834 }
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800835 mutex_lock(&dev_priv->psr.lock);
836 intel_dp = dev_priv->psr.enabled;
837
838 if (!intel_dp)
839 goto unlock;
840
841 /*
842 * The delayed work can race with an invalidate hence we need to
843 * recheck. Since psr_flush first clears this and then reschedules we
844 * won't ever miss a flush when bailing out here.
845 */
846 if (dev_priv->psr.busy_frontbuffer_bits)
847 goto unlock;
848
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800849 intel_psr_activate(intel_dp);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800850unlock:
851 mutex_unlock(&dev_priv->psr.lock);
852}
853
Chris Wilson5748b6a2016-08-04 16:32:38 +0100854static void intel_psr_exit(struct drm_i915_private *dev_priv)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800855{
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800856 struct intel_dp *intel_dp = dev_priv->psr.enabled;
857 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
858 enum pipe pipe = to_intel_crtc(crtc)->pipe;
859 u32 val;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800860
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800861 if (!dev_priv->psr.active)
862 return;
863
Chris Wilson5748b6a2016-08-04 16:32:38 +0100864 if (HAS_DDI(dev_priv)) {
Nagaraju, Vathsalaf40c4842017-01-11 20:44:33 +0530865 if (dev_priv->psr.aux_frame_sync)
866 drm_dp_dpcd_writeb(&intel_dp->aux,
867 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
868 0);
Nagaraju, Vathsala3fcb0ca2017-01-12 23:30:59 +0530869 if (dev_priv->psr.psr2_support) {
870 val = I915_READ(EDP_PSR2_CTL);
871 WARN_ON(!(val & EDP_PSR2_ENABLE));
872 I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
873 } else {
874 val = I915_READ(EDP_PSR_CTL);
875 WARN_ON(!(val & EDP_PSR_ENABLE));
876 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
877 }
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800878 } else {
879 val = I915_READ(VLV_PSRCTL(pipe));
880
Rodrigo Vivi0d0c2792017-09-12 11:30:59 -0700881 /*
882 * Here we do the transition drirectly from
883 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
884 * PSR_state 5 (exit).
885 * PSR State 4 (active with single frame update) can be skipped.
886 * On PSR_state 5 (exit) Hardware is responsible to transition
887 * back to PSR_state 1 (inactive).
888 * Now we are at Same state after vlv_psr_enable_source.
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800889 */
890 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
891 I915_WRITE(VLV_PSRCTL(pipe), val);
892
Rodrigo Vivi0d0c2792017-09-12 11:30:59 -0700893 /*
894 * Send AUX wake up - Spec says after transitioning to PSR
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800895 * active we have to send AUX wake up by writing 01h in DPCD
896 * 600h of sink device.
897 * XXX: This might slow down the transition, but without this
898 * HW doesn't complete the transition to PSR_state 1 and we
899 * never get the screen updated.
900 */
901 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
902 DP_SET_POWER_D0);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800903 }
904
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800905 dev_priv->psr.active = false;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800906}
907
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800908/**
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700909 * intel_psr_single_frame_update - Single Frame Update
Chris Wilson5748b6a2016-08-04 16:32:38 +0100910 * @dev_priv: i915 device
Daniel Vetter20c88382015-06-18 10:30:27 +0200911 * @frontbuffer_bits: frontbuffer plane tracking bits
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700912 *
913 * Some platforms support a single frame update feature that is used to
914 * send and update only one frame on Remote Frame Buffer.
915 * So far it is only implemented for Valleyview and Cherryview because
916 * hardware requires this to be done before a page flip.
917 */
Chris Wilson5748b6a2016-08-04 16:32:38 +0100918void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
Daniel Vetter20c88382015-06-18 10:30:27 +0200919 unsigned frontbuffer_bits)
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700920{
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700921 struct drm_crtc *crtc;
922 enum pipe pipe;
923 u32 val;
924
Dhinakaran Pandiyan4371d892018-01-03 13:38:23 -0800925 if (!CAN_PSR(dev_priv))
Rodrigo Vivi0f328da2017-09-07 16:00:31 -0700926 return;
927
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700928 /*
929 * Single frame update is already supported on BDW+ but it requires
930 * many W/A and it isn't really needed.
931 */
Chris Wilson5748b6a2016-08-04 16:32:38 +0100932 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700933 return;
934
935 mutex_lock(&dev_priv->psr.lock);
936 if (!dev_priv->psr.enabled) {
937 mutex_unlock(&dev_priv->psr.lock);
938 return;
939 }
940
941 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
942 pipe = to_intel_crtc(crtc)->pipe;
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700943
Daniel Vetter20c88382015-06-18 10:30:27 +0200944 if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
945 val = I915_READ(VLV_PSRCTL(pipe));
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700946
Daniel Vetter20c88382015-06-18 10:30:27 +0200947 /*
948 * We need to set this bit before writing registers for a flip.
949 * This bit will be self-clear when it gets to the PSR active state.
950 */
951 I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
952 }
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700953 mutex_unlock(&dev_priv->psr.lock);
954}
955
956/**
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800957 * intel_psr_invalidate - Invalidade PSR
Chris Wilson5748b6a2016-08-04 16:32:38 +0100958 * @dev_priv: i915 device
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800959 * @frontbuffer_bits: frontbuffer plane tracking bits
960 *
961 * Since the hardware frontbuffer tracking has gaps we need to integrate
962 * with the software frontbuffer tracking. This function gets called every
963 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
964 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
965 *
966 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
967 */
Chris Wilson5748b6a2016-08-04 16:32:38 +0100968void intel_psr_invalidate(struct drm_i915_private *dev_priv,
Daniel Vetter20c88382015-06-18 10:30:27 +0200969 unsigned frontbuffer_bits)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800970{
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800971 struct drm_crtc *crtc;
972 enum pipe pipe;
973
Dhinakaran Pandiyan4371d892018-01-03 13:38:23 -0800974 if (!CAN_PSR(dev_priv))
Rodrigo Vivi0f328da2017-09-07 16:00:31 -0700975 return;
976
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800977 mutex_lock(&dev_priv->psr.lock);
978 if (!dev_priv->psr.enabled) {
979 mutex_unlock(&dev_priv->psr.lock);
980 return;
981 }
982
983 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
984 pipe = to_intel_crtc(crtc)->pipe;
985
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800986 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800987 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
Daniel Vetterec76d622015-06-18 10:30:26 +0200988
989 if (frontbuffer_bits)
Chris Wilson5748b6a2016-08-04 16:32:38 +0100990 intel_psr_exit(dev_priv);
Daniel Vetterec76d622015-06-18 10:30:26 +0200991
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800992 mutex_unlock(&dev_priv->psr.lock);
993}
994
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800995/**
996 * intel_psr_flush - Flush PSR
Chris Wilson5748b6a2016-08-04 16:32:38 +0100997 * @dev_priv: i915 device
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800998 * @frontbuffer_bits: frontbuffer plane tracking bits
Rodrigo Vivi169de132015-07-08 16:21:31 -0700999 * @origin: which operation caused the flush
Rodrigo Vivib2b89f52014-11-14 08:52:29 -08001000 *
1001 * Since the hardware frontbuffer tracking has gaps we need to integrate
1002 * with the software frontbuffer tracking. This function gets called every
1003 * time frontbuffer rendering has completed and flushed out to memory. PSR
1004 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1005 *
1006 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1007 */
Chris Wilson5748b6a2016-08-04 16:32:38 +01001008void intel_psr_flush(struct drm_i915_private *dev_priv,
Rodrigo Vivi169de132015-07-08 16:21:31 -07001009 unsigned frontbuffer_bits, enum fb_op_origin origin)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001010{
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001011 struct drm_crtc *crtc;
1012 enum pipe pipe;
1013
Dhinakaran Pandiyan4371d892018-01-03 13:38:23 -08001014 if (!CAN_PSR(dev_priv))
Rodrigo Vivi0f328da2017-09-07 16:00:31 -07001015 return;
1016
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001017 mutex_lock(&dev_priv->psr.lock);
1018 if (!dev_priv->psr.enabled) {
1019 mutex_unlock(&dev_priv->psr.lock);
1020 return;
1021 }
1022
1023 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
1024 pipe = to_intel_crtc(crtc)->pipe;
Daniel Vetterec76d622015-06-18 10:30:26 +02001025
1026 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001027 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1028
Rodrigo Vivi921ec282015-11-18 11:21:12 -08001029 /* By definition flush = invalidate + flush */
1030 if (frontbuffer_bits)
Chris Wilson5748b6a2016-08-04 16:32:38 +01001031 intel_psr_exit(dev_priv);
Rodrigo Vivi995d3042014-11-19 07:37:47 -08001032
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001033 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
Rodrigo Vivid0ac8962015-11-11 11:37:07 -08001034 if (!work_busy(&dev_priv->psr.work.work))
1035 schedule_delayed_work(&dev_priv->psr.work,
Rodrigo Vivi20bb97f2015-11-11 11:37:08 -08001036 msecs_to_jiffies(100));
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001037 mutex_unlock(&dev_priv->psr.lock);
1038}
1039
Rodrigo Vivib2b89f52014-11-14 08:52:29 -08001040/**
1041 * intel_psr_init - Init basic PSR work and mutex.
Ander Conselvan de Oliveira93de0562016-11-29 13:48:47 +02001042 * @dev_priv: i915 device private
Rodrigo Vivib2b89f52014-11-14 08:52:29 -08001043 *
1044 * This function is called only once at driver load to initialize basic
1045 * PSR stuff.
1046 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02001047void intel_psr_init(struct drm_i915_private *dev_priv)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001048{
Rodrigo Vivi0f328da2017-09-07 16:00:31 -07001049 if (!HAS_PSR(dev_priv))
1050 return;
1051
Ville Syrjälä443a3892015-11-11 20:34:15 +02001052 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
1053 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
1054
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08001055 if (!dev_priv->psr.sink_support)
1056 return;
1057
Paulo Zanoni2ee7dc42016-12-13 18:57:44 -02001058 /* Per platform default: all disabled. */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001059 if (i915_modparams.enable_psr == -1)
1060 i915_modparams.enable_psr = 0;
Rodrigo Vivid94d6e82016-02-12 04:08:11 -08001061
Rodrigo Vivi65f61b42016-02-01 12:02:08 -08001062 /* Set link_standby x link_off defaults */
Tvrtko Ursulin86527442016-10-13 11:03:00 +01001063 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08001064 /* HSW and BDW require workarounds that we don't implement. */
1065 dev_priv->psr.link_standby = false;
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01001066 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08001067 /* On VLV and CHV only standby mode is supported. */
1068 dev_priv->psr.link_standby = true;
1069 else
1070 /* For new platforms let's respect VBT back again */
1071 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
1072
Rodrigo Vivi65f61b42016-02-01 12:02:08 -08001073 /* Override link_standby x link_off defaults */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001074 if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
Rodrigo Vivi65f61b42016-02-01 12:02:08 -08001075 DRM_DEBUG_KMS("PSR: Forcing link standby\n");
1076 dev_priv->psr.link_standby = true;
1077 }
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001078 if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
Rodrigo Vivi65f61b42016-02-01 12:02:08 -08001079 DRM_DEBUG_KMS("PSR: Forcing main link off\n");
1080 dev_priv->psr.link_standby = false;
1081 }
1082
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001083 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
1084 mutex_init(&dev_priv->psr.lock);
Rodrigo Vivi424644c2017-09-07 16:00:32 -07001085
1086 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Rodrigo Vivid0d5e0d2017-09-07 16:00:41 -07001087 dev_priv->psr.enable_source = vlv_psr_enable_source;
Rodrigo Vivi424644c2017-09-07 16:00:32 -07001088 dev_priv->psr.disable_source = vlv_psr_disable;
Rodrigo Vivi49ad3162017-09-07 16:00:40 -07001089 dev_priv->psr.enable_sink = vlv_psr_enable_sink;
Rodrigo Vivie3702ac2017-09-07 16:00:34 -07001090 dev_priv->psr.activate = vlv_psr_activate;
Rodrigo Vivi2a5db872017-09-07 16:00:39 -07001091 dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
Rodrigo Vivi424644c2017-09-07 16:00:32 -07001092 } else {
Rodrigo Vivid0d5e0d2017-09-07 16:00:41 -07001093 dev_priv->psr.enable_source = hsw_psr_enable_source;
Rodrigo Vivi424644c2017-09-07 16:00:32 -07001094 dev_priv->psr.disable_source = hsw_psr_disable;
Rodrigo Vivi49ad3162017-09-07 16:00:40 -07001095 dev_priv->psr.enable_sink = hsw_psr_enable_sink;
Rodrigo Vivie3702ac2017-09-07 16:00:34 -07001096 dev_priv->psr.activate = hsw_psr_activate;
Rodrigo Vivi2a5db872017-09-07 16:00:39 -07001097 dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
Rodrigo Vivi424644c2017-09-07 16:00:32 -07001098 }
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001099}