blob: 715a48b95dc5629b4d984906c9c08d243e6f3f75 [file] [log] [blame]
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -08001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
Rodrigo Vivib2b89f52014-11-14 08:52:29 -080024/**
25 * DOC: Panel Self Refresh (PSR/SRD)
26 *
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
33 *
34 * Panel Self Refresh must be supported by both Hardware (source) and
35 * Panel (sink).
36 *
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
40 *
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
52 */
53
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -080054#include <drm/drmP.h>
55
56#include "intel_drv.h"
57#include "i915_drv.h"
58
59static bool is_edp_psr(struct intel_dp *intel_dp)
60{
61 return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
62}
63
Rodrigo Vivie2bbc342014-11-19 07:37:00 -080064static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
65{
66 struct drm_i915_private *dev_priv = dev->dev_private;
67 uint32_t val;
68
69 val = I915_READ(VLV_PSRSTAT(pipe)) &
70 VLV_EDP_PSR_CURR_STATE_MASK;
71 return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
72 (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
73}
74
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -080075static void intel_psr_write_vsc(struct intel_dp *intel_dp,
Ville Syrjälä436c6d42015-09-18 20:03:37 +030076 const struct edp_vsc_psr *vsc_psr)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -080077{
78 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
79 struct drm_device *dev = dig_port->base.base.dev;
80 struct drm_i915_private *dev_priv = dev->dev_private;
81 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
Ville Syrjälä436c6d42015-09-18 20:03:37 +030082 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +020083 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -080084 uint32_t *data = (uint32_t *) vsc_psr;
85 unsigned int i;
86
87 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
88 the video DIP being updated before program video DIP data buffer
89 registers for DIP being updated. */
90 I915_WRITE(ctl_reg, 0);
91 POSTING_READ(ctl_reg);
92
Ville Syrjälä436c6d42015-09-18 20:03:37 +030093 for (i = 0; i < sizeof(*vsc_psr); i += 4) {
94 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
95 i >> 2), *data);
96 data++;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -080097 }
Ville Syrjälä436c6d42015-09-18 20:03:37 +030098 for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
99 I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
100 i >> 2), 0);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800101
102 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
103 POSTING_READ(ctl_reg);
104}
105
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800106static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
107{
108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109 struct drm_device *dev = intel_dig_port->base.base.dev;
110 struct drm_i915_private *dev_priv = dev->dev_private;
111 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
112 enum pipe pipe = to_intel_crtc(crtc)->pipe;
113 uint32_t val;
114
115 /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
116 val = I915_READ(VLV_VSCSDP(pipe));
117 val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
118 val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
119 I915_WRITE(VLV_VSCSDP(pipe), val);
120}
121
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530122static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
123{
124 struct edp_vsc_psr psr_vsc;
125
126 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
127 memset(&psr_vsc, 0, sizeof(psr_vsc));
128 psr_vsc.sdp_header.HB0 = 0;
129 psr_vsc.sdp_header.HB1 = 0x7;
130 psr_vsc.sdp_header.HB2 = 0x3;
131 psr_vsc.sdp_header.HB3 = 0xb;
132 intel_psr_write_vsc(intel_dp, &psr_vsc);
133}
134
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800135static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800136{
137 struct edp_vsc_psr psr_vsc;
138
139 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
140 memset(&psr_vsc, 0, sizeof(psr_vsc));
141 psr_vsc.sdp_header.HB0 = 0;
142 psr_vsc.sdp_header.HB1 = 0x7;
143 psr_vsc.sdp_header.HB2 = 0x2;
144 psr_vsc.sdp_header.HB3 = 0x8;
145 intel_psr_write_vsc(intel_dp, &psr_vsc);
146}
147
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800148static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
149{
150 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
Durgadoss R670b90d2015-03-27 17:21:32 +0530151 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800152}
153
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200154static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
155 enum port port)
Ville Syrjälä1f380892015-11-11 20:34:16 +0200156{
157 if (INTEL_INFO(dev_priv)->gen >= 9)
158 return DP_AUX_CH_CTL(port);
159 else
160 return EDP_PSR_AUX_CTL;
161}
162
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200163static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
164 enum port port, int index)
Ville Syrjälä1f380892015-11-11 20:34:16 +0200165{
166 if (INTEL_INFO(dev_priv)->gen >= 9)
167 return DP_AUX_CH_DATA(port, index);
168 else
169 return EDP_PSR_AUX_DATA(index);
170}
171
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800172static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800173{
174 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
175 struct drm_device *dev = dig_port->base.base.dev;
176 struct drm_i915_private *dev_priv = dev->dev_private;
177 uint32_t aux_clock_divider;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200178 i915_reg_t aux_ctl_reg;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800179 int precharge = 0x3;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800180 static const uint8_t aux_msg[] = {
181 [0] = DP_AUX_NATIVE_WRITE << 4,
182 [1] = DP_SET_POWER >> 8,
183 [2] = DP_SET_POWER & 0xff,
184 [3] = 1 - 1,
185 [4] = DP_SET_POWER_D0,
186 };
Ville Syrjälä750a9512015-11-11 20:34:12 +0200187 enum port port = dig_port->port;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800188 int i;
189
190 BUILD_BUG_ON(sizeof(aux_msg) > 20);
191
192 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
193
Rodrigo Vivi89251b12015-04-10 11:15:09 -0700194 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
195 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800196
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530197 /* Enable AUX frame sync at sink */
198 if (dev_priv->psr.aux_frame_sync)
199 drm_dp_dpcd_writeb(&intel_dp->aux,
200 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
201 DP_AUX_FRAME_SYNC_ENABLE);
202
Ville Syrjälä1f380892015-11-11 20:34:16 +0200203 aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
Sonika Jindale3d99842015-01-22 14:30:54 +0530204
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800205 /* Setup AUX registers */
206 for (i = 0; i < sizeof(aux_msg); i += 4)
Ville Syrjälä1f380892015-11-11 20:34:16 +0200207 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800208 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
209
Sonika Jindale3d99842015-01-22 14:30:54 +0530210 if (INTEL_INFO(dev)->gen >= 9) {
211 uint32_t val;
212
213 val = I915_READ(aux_ctl_reg);
214 val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
215 val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
216 val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
217 val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530218 /* Use hardcoded data values for PSR, frame sync and GTC */
Sonika Jindale3d99842015-01-22 14:30:54 +0530219 val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530220 val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
221 val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
Sonika Jindale3d99842015-01-22 14:30:54 +0530222 I915_WRITE(aux_ctl_reg, val);
223 } else {
224 I915_WRITE(aux_ctl_reg,
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800225 DP_AUX_CH_CTL_TIME_OUT_400us |
226 (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
227 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
228 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
Sonika Jindale3d99842015-01-22 14:30:54 +0530229 }
Rodrigo Vivi89251b12015-04-10 11:15:09 -0700230
231 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800232}
233
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800234static void vlv_psr_enable_source(struct intel_dp *intel_dp)
235{
236 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
237 struct drm_device *dev = dig_port->base.base.dev;
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 struct drm_crtc *crtc = dig_port->base.base.crtc;
240 enum pipe pipe = to_intel_crtc(crtc)->pipe;
241
242 /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
243 I915_WRITE(VLV_PSRCTL(pipe),
244 VLV_EDP_PSR_MODE_SW_TIMER |
245 VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
246 VLV_EDP_PSR_ENABLE);
247}
248
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800249static void vlv_psr_activate(struct intel_dp *intel_dp)
250{
251 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
252 struct drm_device *dev = dig_port->base.base.dev;
253 struct drm_i915_private *dev_priv = dev->dev_private;
254 struct drm_crtc *crtc = dig_port->base.base.crtc;
255 enum pipe pipe = to_intel_crtc(crtc)->pipe;
256
257 /* Let's do the transition from PSR_state 1 to PSR_state 2
258 * that is PSR transition to active - static frame transmission.
259 * Then Hardware is responsible for the transition to PSR_state 3
260 * that is PSR active - no Remote Frame Buffer (RFB) update.
261 */
262 I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
263 VLV_EDP_PSR_ACTIVE_ENTRY);
264}
265
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800266static void hsw_psr_enable_source(struct intel_dp *intel_dp)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800267{
268 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
269 struct drm_device *dev = dig_port->base.base.dev;
270 struct drm_i915_private *dev_priv = dev->dev_private;
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530271
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800272 uint32_t max_sleep_time = 0x1f;
Rodrigo Vivid44b4dc2014-11-14 08:52:31 -0800273 /* Lately it was identified that depending on panel idle frame count
274 * calculated at HW can be off by 1. So let's use what came
Rodrigo Vivi97173ea2015-07-07 16:28:55 -0700275 * from VBT + 1.
276 * There are also other cases where panel demands at least 4
277 * but VBT is not being set. To cover these 2 cases lets use
278 * at least 5 when VBT isn't set to be on the safest side.
Rodrigo Vivid44b4dc2014-11-14 08:52:31 -0800279 */
280 uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
Rodrigo Vivi97173ea2015-07-07 16:28:55 -0700281 dev_priv->vbt.psr.idle_frames + 1 : 5;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800282 uint32_t val = 0x0;
283 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800284
Rodrigo Vivi3301d402015-04-10 11:15:08 -0700285 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
286 /* It doesn't mean we shouldn't send TPS patters, so let's
287 send the minimal TP1 possible and skip TP2. */
288 val |= EDP_PSR_TP1_TIME_100us;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800289 val |= EDP_PSR_TP2_TP3_TIME_0us;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800290 val |= EDP_PSR_SKIP_AUX_EXIT;
Rodrigo Vivi3301d402015-04-10 11:15:08 -0700291 /* Sink should be able to train with the 5 or 6 idle patterns */
292 idle_frames += 4;
Rodrigo Vivicff51902015-04-10 11:15:07 -0700293 }
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800294
Ville Syrjälä443a3892015-11-11 20:34:15 +0200295 I915_WRITE(EDP_PSR_CTL, val |
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800296 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
297 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
298 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
299 EDP_PSR_ENABLE);
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530300
301 if (dev_priv->psr.psr2_support)
302 I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
303 EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800304}
305
306static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
307{
308 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
309 struct drm_device *dev = dig_port->base.base.dev;
310 struct drm_i915_private *dev_priv = dev->dev_private;
311 struct drm_crtc *crtc = dig_port->base.base.crtc;
312 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
313
314 lockdep_assert_held(&dev_priv->psr.lock);
315 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
316 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
317
318 dev_priv->psr.source_ok = false;
319
320 if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
321 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
322 return false;
323 }
324
325 if (!i915.enable_psr) {
326 DRM_DEBUG_KMS("PSR disable by flag\n");
327 return false;
328 }
329
Rodrigo Vivic8e68b72015-01-12 10:14:29 -0800330 if (IS_HASWELL(dev) &&
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +0200331 I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
Rodrigo Vivic8e68b72015-01-12 10:14:29 -0800332 S3D_ENABLE) {
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800333 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
334 return false;
335 }
336
Rodrigo Vivic8e68b72015-01-12 10:14:29 -0800337 if (IS_HASWELL(dev) &&
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +0200338 intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800339 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
340 return false;
341 }
342
Rodrigo Vivi89251b12015-04-10 11:15:09 -0700343 if (!IS_VALLEYVIEW(dev) && ((dev_priv->vbt.psr.full_link) ||
344 (dig_port->port != PORT_A))) {
345 DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
346 return false;
347 }
348
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800349 dev_priv->psr.source_ok = true;
350 return true;
351}
352
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800353static void intel_psr_activate(struct intel_dp *intel_dp)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800354{
355 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
356 struct drm_device *dev = intel_dig_port->base.base.dev;
357 struct drm_i915_private *dev_priv = dev->dev_private;
358
Ville Syrjälä443a3892015-11-11 20:34:15 +0200359 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800360 WARN_ON(dev_priv->psr.active);
361 lockdep_assert_held(&dev_priv->psr.lock);
362
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800363 /* Enable/Re-enable PSR on the host */
364 if (HAS_DDI(dev))
365 /* On HSW+ after we enable PSR on source it will activate it
366 * as soon as it match configure idle_frame count. So
367 * we just actually enable it here on activation time.
368 */
369 hsw_psr_enable_source(intel_dp);
370 else
371 vlv_psr_activate(intel_dp);
372
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800373 dev_priv->psr.active = true;
374}
375
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800376/**
377 * intel_psr_enable - Enable PSR
378 * @intel_dp: Intel DP
379 *
380 * This function can only be called after the pipe is fully trained and enabled.
381 */
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800382void intel_psr_enable(struct intel_dp *intel_dp)
383{
384 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
385 struct drm_device *dev = intel_dig_port->base.base.dev;
386 struct drm_i915_private *dev_priv = dev->dev_private;
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530387 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800388
389 if (!HAS_PSR(dev)) {
390 DRM_DEBUG_KMS("PSR not supported on this platform\n");
391 return;
392 }
393
394 if (!is_edp_psr(intel_dp)) {
395 DRM_DEBUG_KMS("PSR not supported by this panel\n");
396 return;
397 }
398
399 mutex_lock(&dev_priv->psr.lock);
400 if (dev_priv->psr.enabled) {
401 DRM_DEBUG_KMS("PSR already in use\n");
402 goto unlock;
403 }
404
405 if (!intel_psr_match_conditions(intel_dp))
406 goto unlock;
407
408 dev_priv->psr.busy_frontbuffer_bits = 0;
409
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800410 if (HAS_DDI(dev)) {
411 hsw_psr_setup_vsc(intel_dp);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800412
Sonika Jindal474d1ec2015-04-02 11:02:44 +0530413 if (dev_priv->psr.psr2_support) {
414 /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
415 if (crtc->config->pipe_src_w > 3200 ||
416 crtc->config->pipe_src_h > 2000)
417 dev_priv->psr.psr2_support = false;
418 else
419 skl_psr_setup_su_vsc(intel_dp);
420 }
421
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800422 /* Avoid continuous PSR exit by masking memup and hpd */
Ville Syrjälä443a3892015-11-11 20:34:15 +0200423 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
Rodrigo Vivi09108b92015-07-07 16:28:54 -0700424 EDP_PSR_DEBUG_MASK_HPD);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800425
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800426 /* Enable PSR on the panel */
427 hsw_psr_enable_sink(intel_dp);
Sonika Jindale3d99842015-01-22 14:30:54 +0530428
429 if (INTEL_INFO(dev)->gen >= 9)
430 intel_psr_activate(intel_dp);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800431 } else {
432 vlv_psr_setup_vsc(intel_dp);
433
434 /* Enable PSR on the panel */
435 vlv_psr_enable_sink(intel_dp);
436
437 /* On HSW+ enable_source also means go to PSR entry/active
438 * state as soon as idle_frame achieved and here would be
439 * to soon. However on VLV enable_source just enable PSR
440 * but let it on inactive state. So we might do this prior
441 * to active transition, i.e. here.
442 */
443 vlv_psr_enable_source(intel_dp);
444 }
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800445
Rodrigo Vivid0ac8962015-11-11 11:37:07 -0800446 /*
447 * FIXME: Activation should happen immediately since this function
448 * is just called after pipe is fully trained and enabled.
449 * However on every platform we face issues when first activation
450 * follows a modeset so quickly.
451 * - On VLV/CHV we get bank screen on first activation
452 * - On HSW/BDW we get a recoverable frozen screen until next
453 * exit-activate sequence.
454 */
455 if (INTEL_INFO(dev)->gen < 9)
456 schedule_delayed_work(&dev_priv->psr.work,
457 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
458
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800459 dev_priv->psr.enabled = intel_dp;
460unlock:
461 mutex_unlock(&dev_priv->psr.lock);
462}
463
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800464static void vlv_psr_disable(struct intel_dp *intel_dp)
465{
466 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
467 struct drm_device *dev = intel_dig_port->base.base.dev;
468 struct drm_i915_private *dev_priv = dev->dev_private;
469 struct intel_crtc *intel_crtc =
470 to_intel_crtc(intel_dig_port->base.base.crtc);
471 uint32_t val;
472
473 if (dev_priv->psr.active) {
474 /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
475 if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
476 VLV_EDP_PSR_IN_TRANS) == 0, 1))
477 WARN(1, "PSR transition took longer than expected\n");
478
479 val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
480 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
481 val &= ~VLV_EDP_PSR_ENABLE;
482 val &= ~VLV_EDP_PSR_MODE_MASK;
483 I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
484
485 dev_priv->psr.active = false;
486 } else {
487 WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
488 }
489}
490
491static void hsw_psr_disable(struct intel_dp *intel_dp)
492{
493 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
494 struct drm_device *dev = intel_dig_port->base.base.dev;
495 struct drm_i915_private *dev_priv = dev->dev_private;
496
497 if (dev_priv->psr.active) {
Ville Syrjälä443a3892015-11-11 20:34:15 +0200498 I915_WRITE(EDP_PSR_CTL,
499 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800500
501 /* Wait till PSR is idle */
Ville Syrjälä443a3892015-11-11 20:34:15 +0200502 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800503 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
504 DRM_ERROR("Timed out waiting for PSR Idle State\n");
505
506 dev_priv->psr.active = false;
507 } else {
Ville Syrjälä443a3892015-11-11 20:34:15 +0200508 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800509 }
510}
511
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800512/**
513 * intel_psr_disable - Disable PSR
514 * @intel_dp: Intel DP
515 *
516 * This function needs to be called before disabling pipe.
517 */
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800518void intel_psr_disable(struct intel_dp *intel_dp)
519{
520 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
521 struct drm_device *dev = intel_dig_port->base.base.dev;
522 struct drm_i915_private *dev_priv = dev->dev_private;
523
524 mutex_lock(&dev_priv->psr.lock);
525 if (!dev_priv->psr.enabled) {
526 mutex_unlock(&dev_priv->psr.lock);
527 return;
528 }
529
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800530 if (HAS_DDI(dev))
531 hsw_psr_disable(intel_dp);
532 else
533 vlv_psr_disable(intel_dp);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800534
535 dev_priv->psr.enabled = NULL;
536 mutex_unlock(&dev_priv->psr.lock);
537
538 cancel_delayed_work_sync(&dev_priv->psr.work);
539}
540
541static void intel_psr_work(struct work_struct *work)
542{
543 struct drm_i915_private *dev_priv =
544 container_of(work, typeof(*dev_priv), psr.work.work);
545 struct intel_dp *intel_dp = dev_priv->psr.enabled;
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800546 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
547 enum pipe pipe = to_intel_crtc(crtc)->pipe;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800548
549 /* We have to make sure PSR is ready for re-enable
550 * otherwise it keeps disabled until next full enable/disable cycle.
551 * PSR might take some time to get fully disabled
552 * and be ready for re-enable.
553 */
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800554 if (HAS_DDI(dev_priv->dev)) {
Ville Syrjälä443a3892015-11-11 20:34:15 +0200555 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800556 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
557 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
558 return;
559 }
560 } else {
561 if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
562 VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
563 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
564 return;
565 }
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800566 }
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800567 mutex_lock(&dev_priv->psr.lock);
568 intel_dp = dev_priv->psr.enabled;
569
570 if (!intel_dp)
571 goto unlock;
572
573 /*
574 * The delayed work can race with an invalidate hence we need to
575 * recheck. Since psr_flush first clears this and then reschedules we
576 * won't ever miss a flush when bailing out here.
577 */
578 if (dev_priv->psr.busy_frontbuffer_bits)
579 goto unlock;
580
Rodrigo Vivie2bbc342014-11-19 07:37:00 -0800581 intel_psr_activate(intel_dp);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800582unlock:
583 mutex_unlock(&dev_priv->psr.lock);
584}
585
586static void intel_psr_exit(struct drm_device *dev)
587{
588 struct drm_i915_private *dev_priv = dev->dev_private;
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800589 struct intel_dp *intel_dp = dev_priv->psr.enabled;
590 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
591 enum pipe pipe = to_intel_crtc(crtc)->pipe;
592 u32 val;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800593
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800594 if (!dev_priv->psr.active)
595 return;
596
597 if (HAS_DDI(dev)) {
Ville Syrjälä443a3892015-11-11 20:34:15 +0200598 val = I915_READ(EDP_PSR_CTL);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800599
600 WARN_ON(!(val & EDP_PSR_ENABLE));
601
Ville Syrjälä443a3892015-11-11 20:34:15 +0200602 I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800603 } else {
604 val = I915_READ(VLV_PSRCTL(pipe));
605
606 /* Here we do the transition from PSR_state 3 to PSR_state 5
607 * directly once PSR State 4 that is active with single frame
608 * update can be skipped. PSR_state 5 that is PSR exit then
609 * Hardware is responsible to transition back to PSR_state 1
610 * that is PSR inactive. Same state after
611 * vlv_edp_psr_enable_source.
612 */
613 val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
614 I915_WRITE(VLV_PSRCTL(pipe), val);
615
616 /* Send AUX wake up - Spec says after transitioning to PSR
617 * active we have to send AUX wake up by writing 01h in DPCD
618 * 600h of sink device.
619 * XXX: This might slow down the transition, but without this
620 * HW doesn't complete the transition to PSR_state 1 and we
621 * never get the screen updated.
622 */
623 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
624 DP_SET_POWER_D0);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800625 }
626
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800627 dev_priv->psr.active = false;
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800628}
629
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800630/**
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700631 * intel_psr_single_frame_update - Single Frame Update
632 * @dev: DRM device
Daniel Vetter20c88382015-06-18 10:30:27 +0200633 * @frontbuffer_bits: frontbuffer plane tracking bits
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700634 *
635 * Some platforms support a single frame update feature that is used to
636 * send and update only one frame on Remote Frame Buffer.
637 * So far it is only implemented for Valleyview and Cherryview because
638 * hardware requires this to be done before a page flip.
639 */
Daniel Vetter20c88382015-06-18 10:30:27 +0200640void intel_psr_single_frame_update(struct drm_device *dev,
641 unsigned frontbuffer_bits)
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700642{
643 struct drm_i915_private *dev_priv = dev->dev_private;
644 struct drm_crtc *crtc;
645 enum pipe pipe;
646 u32 val;
647
648 /*
649 * Single frame update is already supported on BDW+ but it requires
650 * many W/A and it isn't really needed.
651 */
652 if (!IS_VALLEYVIEW(dev))
653 return;
654
655 mutex_lock(&dev_priv->psr.lock);
656 if (!dev_priv->psr.enabled) {
657 mutex_unlock(&dev_priv->psr.lock);
658 return;
659 }
660
661 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
662 pipe = to_intel_crtc(crtc)->pipe;
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700663
Daniel Vetter20c88382015-06-18 10:30:27 +0200664 if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
665 val = I915_READ(VLV_PSRCTL(pipe));
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700666
Daniel Vetter20c88382015-06-18 10:30:27 +0200667 /*
668 * We need to set this bit before writing registers for a flip.
669 * This bit will be self-clear when it gets to the PSR active state.
670 */
671 I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
672 }
Rodrigo Vivic7240c32015-04-10 11:15:10 -0700673 mutex_unlock(&dev_priv->psr.lock);
674}
675
676/**
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800677 * intel_psr_invalidate - Invalidade PSR
678 * @dev: DRM device
679 * @frontbuffer_bits: frontbuffer plane tracking bits
680 *
681 * Since the hardware frontbuffer tracking has gaps we need to integrate
682 * with the software frontbuffer tracking. This function gets called every
683 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
684 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
685 *
686 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
687 */
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800688void intel_psr_invalidate(struct drm_device *dev,
Daniel Vetter20c88382015-06-18 10:30:27 +0200689 unsigned frontbuffer_bits)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800690{
691 struct drm_i915_private *dev_priv = dev->dev_private;
692 struct drm_crtc *crtc;
693 enum pipe pipe;
694
695 mutex_lock(&dev_priv->psr.lock);
696 if (!dev_priv->psr.enabled) {
697 mutex_unlock(&dev_priv->psr.lock);
698 return;
699 }
700
701 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
702 pipe = to_intel_crtc(crtc)->pipe;
703
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800704 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800705 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
Daniel Vetterec76d622015-06-18 10:30:26 +0200706
707 if (frontbuffer_bits)
708 intel_psr_exit(dev);
709
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800710 mutex_unlock(&dev_priv->psr.lock);
711}
712
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800713/**
714 * intel_psr_flush - Flush PSR
715 * @dev: DRM device
716 * @frontbuffer_bits: frontbuffer plane tracking bits
Rodrigo Vivi169de132015-07-08 16:21:31 -0700717 * @origin: which operation caused the flush
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800718 *
719 * Since the hardware frontbuffer tracking has gaps we need to integrate
720 * with the software frontbuffer tracking. This function gets called every
721 * time frontbuffer rendering has completed and flushed out to memory. PSR
722 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
723 *
724 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
725 */
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800726void intel_psr_flush(struct drm_device *dev,
Rodrigo Vivi169de132015-07-08 16:21:31 -0700727 unsigned frontbuffer_bits, enum fb_op_origin origin)
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800728{
729 struct drm_i915_private *dev_priv = dev->dev_private;
730 struct drm_crtc *crtc;
731 enum pipe pipe;
732
733 mutex_lock(&dev_priv->psr.lock);
734 if (!dev_priv->psr.enabled) {
735 mutex_unlock(&dev_priv->psr.lock);
736 return;
737 }
738
739 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
740 pipe = to_intel_crtc(crtc)->pipe;
Daniel Vetterec76d622015-06-18 10:30:26 +0200741
742 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800743 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
744
Rodrigo Vivi169de132015-07-08 16:21:31 -0700745 if (HAS_DDI(dev)) {
746 /*
747 * By definition every flush should mean invalidate + flush,
748 * however on core platforms let's minimize the
749 * disable/re-enable so we can avoid the invalidate when flip
750 * originated the flush.
751 */
752 if (frontbuffer_bits && origin != ORIGIN_FLIP)
753 intel_psr_exit(dev);
754 } else {
755 /*
756 * On Valleyview and Cherryview we don't use hardware tracking
757 * so any plane updates or cursor moves don't result in a PSR
758 * invalidating. Which means we need to manually fake this in
759 * software for all flushes.
760 */
761 if (frontbuffer_bits)
762 intel_psr_exit(dev);
763 }
Rodrigo Vivi995d3042014-11-19 07:37:47 -0800764
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800765 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
Rodrigo Vivid0ac8962015-11-11 11:37:07 -0800766 if (!work_busy(&dev_priv->psr.work.work))
767 schedule_delayed_work(&dev_priv->psr.work,
Rodrigo Vivi20bb97f2015-11-11 11:37:08 -0800768 msecs_to_jiffies(100));
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800769 mutex_unlock(&dev_priv->psr.lock);
770}
771
Rodrigo Vivib2b89f52014-11-14 08:52:29 -0800772/**
773 * intel_psr_init - Init basic PSR work and mutex.
774 * @dev: DRM device
775 *
776 * This function is called only once at driver load to initialize basic
777 * PSR stuff.
778 */
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800779void intel_psr_init(struct drm_device *dev)
780{
781 struct drm_i915_private *dev_priv = dev->dev_private;
782
Ville Syrjälä443a3892015-11-11 20:34:15 +0200783 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
784 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
785
Rodrigo Vivi0bc12bc2014-11-14 08:52:28 -0800786 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
787 mutex_init(&dev_priv->psr.lock);
788}