blob: 9e02dd789b86d4c6b9f7a1c1fd62f2d9f8c660b6 [file] [log] [blame]
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040030#include <linux/export.h>
Clint Taylor01527b32014-07-07 13:01:46 -070031#include <linux/notifier.h>
32#include <linux/reboot.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
Matt Roperc6f95f22015-01-22 16:50:32 -080034#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010035#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070038#include "intel_drv.h"
David Howells760285e2012-10-02 18:01:07 +010039#include <drm/i915_drm.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070040#include "i915_drv.h"
Keith Packarda4fc5ed2009-04-07 16:16:42 -070041
Keith Packarda4fc5ed2009-04-07 16:16:42 -070042#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
Todd Previte559be302015-05-04 07:48:20 -070044/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080050struct dp_link_dpll {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030051 int clock;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080052 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030056 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080057 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030058 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080059 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030063 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080064 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030065 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080066 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080069static const struct dp_link_dpll vlv_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030070 { 162000,
Chon Ming Lee58f6e632013-09-25 15:47:51 +080071 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030072 { 270000,
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080073 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
Chon Ming Leeef9348c2014-04-09 13:28:18 +030076/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
Ville Syrjälä840b32b2015-08-11 20:21:46 +030086 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030087 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030088 { 270000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030089 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030090 { 540000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030091 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
Sonika Jindal637a9c62015-05-07 09:52:08 +053093
Sonika Jindal64987fc2015-05-26 17:50:13 +053094static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
Sonika Jindal637a9c62015-05-07 09:52:08 +053096static const int skl_rates[] = { 162000, 216000, 270000,
Ville Syrjäläf4896f12015-03-12 17:10:27 +020097 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
Chon Ming Leeef9348c2014-04-09 13:28:18 +030099
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
Paulo Zanonida63a9f2012-10-26 19:05:46 -0200109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700112}
113
Imre Deak68b4d822013-05-08 13:14:06 +0300114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700115{
Imre Deak68b4d822013-05-08 13:14:06 +0300116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700119}
120
Chris Wilsondf0e9242010-09-09 16:20:55 +0100121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
Paulo Zanonifa90ece2012-10-26 19:05:44 -0200123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Chris Wilsondf0e9242010-09-09 16:20:55 +0100124}
125
Chris Wilsonea5b2132010-08-04 13:50:23 +0100126static void intel_dp_link_down(struct intel_dp *intel_dp);
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +0100128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
Ville Syrjälä093e3f12014-10-16 21:27:33 +0300129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700132
Ville Syrjäläe0fce782015-07-08 23:45:54 +0300133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
Ville Syrjäläed4e9c12015-03-12 17:10:36 +0200138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700140{
Jesse Barnes7183dc22011-07-07 11:10:58 -0700141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
Ville Syrjälä1db10e22015-03-12 17:10:32 +0200146 case DP_LINK_BW_5_4:
Imre Deakd4eead52013-07-09 17:05:26 +0300147 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700148 default:
Imre Deakd4eead52013-07-09 17:05:26 +0300149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
Paulo Zanonieeb63242014-05-06 14:56:50 +0300157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Paulo Zanonieeb63242014-05-06 14:56:50 +0300160 u8 source_max, sink_max;
161
Ville Syrjäläccb1a832015-12-08 19:59:38 +0200162 source_max = intel_dig_port->max_lanes;
Paulo Zanonieeb63242014-05-06 14:56:50 +0300163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165 return min(source_max, sink_max);
166}
167
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400168/*
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
171 *
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173 *
174 * 270000 * 1 * 8 / 10 == 216000
175 *
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
180 *
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
183 */
184
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700185static int
Keith Packardc8982612012-01-25 08:16:25 -0800186intel_dp_link_required(int pixel_clock, int bpp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700187{
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400188 return (pixel_clock * bpp + 9) / 10;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700189}
190
191static int
Dave Airliefe27d532010-06-30 11:46:17 +1000192intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193{
194 return (max_link_clock * max_lanes * 8) / 10;
195}
196
Damien Lespiauc19de8e2013-11-28 15:29:18 +0000197static enum drm_mode_status
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700198intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
200{
Chris Wilsondf0e9242010-09-09 16:20:55 +0100201 struct intel_dp *intel_dp = intel_attached_dp(connector);
Jani Nikuladd06f902012-10-19 14:51:50 +0300202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
Daniel Vetter36008362013-03-27 00:44:59 +0100204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700206
Jani Nikuladd06f902012-10-19 14:51:50 +0300207 if (is_edp(intel_dp) && fixed_mode) {
208 if (mode->hdisplay > fixed_mode->hdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100209 return MODE_PANEL;
210
Jani Nikuladd06f902012-10-19 14:51:50 +0300211 if (mode->vdisplay > fixed_mode->vdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100212 return MODE_PANEL;
Daniel Vetter03afc4a2013-04-02 23:42:31 +0200213
214 target_clock = fixed_mode->clock;
Zhao Yakui7de56f42010-07-19 09:43:14 +0100215 }
216
Ville Syrjälä50fec212015-03-12 17:10:34 +0200217 max_link_clock = intel_dp_max_link_rate(intel_dp);
Paulo Zanonieeb63242014-05-06 14:56:50 +0300218 max_lanes = intel_dp_max_lane_count(intel_dp);
Daniel Vetter36008362013-03-27 00:44:59 +0100219
220 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
221 mode_rate = intel_dp_link_required(target_clock, 18);
222
223 if (mode_rate > max_rate)
Daniel Vetterc4867932012-04-10 10:42:36 +0200224 return MODE_CLOCK_HIGH;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700225
226 if (mode->clock < 10000)
227 return MODE_CLOCK_LOW;
228
Daniel Vetter0af78a22012-05-23 11:30:55 +0200229 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
230 return MODE_H_ILLEGAL;
231
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700232 return MODE_OK;
233}
234
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800235uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700236{
237 int i;
238 uint32_t v = 0;
239
240 if (src_bytes > 4)
241 src_bytes = 4;
242 for (i = 0; i < src_bytes; i++)
243 v |= ((uint32_t) src[i]) << ((3-i) * 8);
244 return v;
245}
246
Damien Lespiauc2af70e2015-02-10 19:32:23 +0000247static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700248{
249 int i;
250 if (dst_bytes > 4)
251 dst_bytes = 4;
252 for (i = 0; i < dst_bytes; i++)
253 dst[i] = src >> ((3-i) * 8);
254}
255
Jani Nikulabf13e812013-09-06 07:40:05 +0300256static void
257intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300258 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300259static void
260intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300261 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300262
Ville Syrjälä773538e82014-09-04 14:54:56 +0300263static void pps_lock(struct intel_dp *intel_dp)
264{
265 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
266 struct intel_encoder *encoder = &intel_dig_port->base;
267 struct drm_device *dev = encoder->base.dev;
268 struct drm_i915_private *dev_priv = dev->dev_private;
269 enum intel_display_power_domain power_domain;
270
271 /*
272 * See vlv_power_sequencer_reset() why we need
273 * a power domain reference here.
274 */
Ville Syrjälä25f78f52015-11-16 15:01:04 +0100275 power_domain = intel_display_port_aux_power_domain(encoder);
Ville Syrjälä773538e82014-09-04 14:54:56 +0300276 intel_display_power_get(dev_priv, power_domain);
277
278 mutex_lock(&dev_priv->pps_mutex);
279}
280
281static void pps_unlock(struct intel_dp *intel_dp)
282{
283 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
284 struct intel_encoder *encoder = &intel_dig_port->base;
285 struct drm_device *dev = encoder->base.dev;
286 struct drm_i915_private *dev_priv = dev->dev_private;
287 enum intel_display_power_domain power_domain;
288
289 mutex_unlock(&dev_priv->pps_mutex);
290
Ville Syrjälä25f78f52015-11-16 15:01:04 +0100291 power_domain = intel_display_port_aux_power_domain(encoder);
Ville Syrjälä773538e82014-09-04 14:54:56 +0300292 intel_display_power_put(dev_priv, power_domain);
293}
294
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300295static void
296vlv_power_sequencer_kick(struct intel_dp *intel_dp)
297{
298 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
299 struct drm_device *dev = intel_dig_port->base.base.dev;
300 struct drm_i915_private *dev_priv = dev->dev_private;
301 enum pipe pipe = intel_dp->pps_pipe;
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300302 bool pll_enabled, release_cl_override = false;
303 enum dpio_phy phy = DPIO_PHY(pipe);
304 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300305 uint32_t DP;
306
307 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
308 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
309 pipe_name(pipe), port_name(intel_dig_port->port)))
310 return;
311
312 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
313 pipe_name(pipe), port_name(intel_dig_port->port));
314
315 /* Preserve the BIOS-computed detected bit. This is
316 * supposed to be read-only.
317 */
318 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
319 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
320 DP |= DP_PORT_WIDTH(1);
321 DP |= DP_LINK_TRAIN_PAT_1;
322
323 if (IS_CHERRYVIEW(dev))
324 DP |= DP_PIPE_SELECT_CHV(pipe);
325 else if (pipe == PIPE_B)
326 DP |= DP_PIPEB_SELECT;
327
Ville Syrjäläd288f652014-10-28 13:20:22 +0200328 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
329
330 /*
331 * The DPLL for the pipe must be enabled for this to work.
332 * So enable temporarily it if it's not already enabled.
333 */
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300334 if (!pll_enabled) {
335 release_cl_override = IS_CHERRYVIEW(dev) &&
336 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
337
Ville Syrjäläd288f652014-10-28 13:20:22 +0200338 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
339 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300340 }
Ville Syrjäläd288f652014-10-28 13:20:22 +0200341
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300342 /*
343 * Similar magic as in intel_dp_enable_port().
344 * We _must_ do this port enable + disable trick
345 * to make this power seqeuencer lock onto the port.
346 * Otherwise even VDD force bit won't work.
347 */
348 I915_WRITE(intel_dp->output_reg, DP);
349 POSTING_READ(intel_dp->output_reg);
350
351 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
352 POSTING_READ(intel_dp->output_reg);
353
354 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
355 POSTING_READ(intel_dp->output_reg);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200356
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300357 if (!pll_enabled) {
Ville Syrjäläd288f652014-10-28 13:20:22 +0200358 vlv_force_pll_off(dev, pipe);
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300359
360 if (release_cl_override)
361 chv_phy_powergate_ch(dev_priv, phy, ch, false);
362 }
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300363}
364
Jani Nikulabf13e812013-09-06 07:40:05 +0300365static enum pipe
366vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
367{
368 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300369 struct drm_device *dev = intel_dig_port->base.base.dev;
370 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300371 struct intel_encoder *encoder;
372 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300373 enum pipe pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300374
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300375 lockdep_assert_held(&dev_priv->pps_mutex);
376
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300377 /* We should never land here with regular DP ports */
378 WARN_ON(!is_edp(intel_dp));
379
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300380 if (intel_dp->pps_pipe != INVALID_PIPE)
381 return intel_dp->pps_pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300382
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300383 /*
384 * We don't have power sequencer currently.
385 * Pick one that's not used by other ports.
386 */
Jani Nikula19c80542015-12-16 12:48:16 +0200387 for_each_intel_encoder(dev, encoder) {
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300388 struct intel_dp *tmp;
389
390 if (encoder->type != INTEL_OUTPUT_EDP)
391 continue;
392
393 tmp = enc_to_intel_dp(&encoder->base);
394
395 if (tmp->pps_pipe != INVALID_PIPE)
396 pipes &= ~(1 << tmp->pps_pipe);
397 }
398
399 /*
400 * Didn't find one. This should not happen since there
401 * are two power sequencers and up to two eDP ports.
402 */
403 if (WARN_ON(pipes == 0))
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300404 pipe = PIPE_A;
405 else
406 pipe = ffs(pipes) - 1;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300407
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300408 vlv_steal_power_sequencer(dev, pipe);
409 intel_dp->pps_pipe = pipe;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300410
411 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
412 pipe_name(intel_dp->pps_pipe),
413 port_name(intel_dig_port->port));
414
415 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300416 intel_dp_init_panel_power_sequencer(dev, intel_dp);
417 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300418
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300419 /*
420 * Even vdd force doesn't work until we've made
421 * the power sequencer lock in on the port.
422 */
423 vlv_power_sequencer_kick(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300424
425 return intel_dp->pps_pipe;
426}
427
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300428typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
429 enum pipe pipe);
430
431static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
432 enum pipe pipe)
433{
434 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
435}
436
437static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439{
440 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
441}
442
443static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445{
446 return true;
447}
448
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300449static enum pipe
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300450vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
451 enum port port,
452 vlv_pipe_check pipe_check)
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300453{
Jani Nikulabf13e812013-09-06 07:40:05 +0300454 enum pipe pipe;
455
Jani Nikulabf13e812013-09-06 07:40:05 +0300456 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
457 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
458 PANEL_PORT_SELECT_MASK;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300459
460 if (port_sel != PANEL_PORT_SELECT_VLV(port))
461 continue;
462
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300463 if (!pipe_check(dev_priv, pipe))
464 continue;
465
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300466 return pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300467 }
468
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300469 return INVALID_PIPE;
470}
471
472static void
473vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
474{
475 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
476 struct drm_device *dev = intel_dig_port->base.base.dev;
477 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300478 enum port port = intel_dig_port->port;
479
480 lockdep_assert_held(&dev_priv->pps_mutex);
481
482 /* try to find a pipe with this port selected */
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300483 /* first pick one where the panel is on */
484 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
485 vlv_pipe_has_pp_on);
486 /* didn't find one? pick one where vdd is on */
487 if (intel_dp->pps_pipe == INVALID_PIPE)
488 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
489 vlv_pipe_has_vdd_on);
490 /* didn't find one? pick one with just the correct port */
491 if (intel_dp->pps_pipe == INVALID_PIPE)
492 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
493 vlv_pipe_any);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300494
495 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
496 if (intel_dp->pps_pipe == INVALID_PIPE) {
497 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
498 port_name(port));
499 return;
500 }
501
502 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
503 port_name(port), pipe_name(intel_dp->pps_pipe));
504
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300505 intel_dp_init_panel_power_sequencer(dev, intel_dp);
506 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300507}
508
Ville Syrjälä773538e82014-09-04 14:54:56 +0300509void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
510{
511 struct drm_device *dev = dev_priv->dev;
512 struct intel_encoder *encoder;
513
Wayne Boyer666a4532015-12-09 12:29:35 -0800514 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
Ville Syrjälä773538e82014-09-04 14:54:56 +0300515 return;
516
517 /*
518 * We can't grab pps_mutex here due to deadlock with power_domain
519 * mutex when power_domain functions are called while holding pps_mutex.
520 * That also means that in order to use pps_pipe the code needs to
521 * hold both a power domain reference and pps_mutex, and the power domain
522 * reference get/put must be done while _not_ holding pps_mutex.
523 * pps_{lock,unlock}() do these steps in the correct order, so one
524 * should use them always.
525 */
526
Jani Nikula19c80542015-12-16 12:48:16 +0200527 for_each_intel_encoder(dev, encoder) {
Ville Syrjälä773538e82014-09-04 14:54:56 +0300528 struct intel_dp *intel_dp;
529
530 if (encoder->type != INTEL_OUTPUT_EDP)
531 continue;
532
533 intel_dp = enc_to_intel_dp(&encoder->base);
534 intel_dp->pps_pipe = INVALID_PIPE;
535 }
Jani Nikulabf13e812013-09-06 07:40:05 +0300536}
537
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200538static i915_reg_t
539_pp_ctrl_reg(struct intel_dp *intel_dp)
Jani Nikulabf13e812013-09-06 07:40:05 +0300540{
541 struct drm_device *dev = intel_dp_to_dev(intel_dp);
542
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530543 if (IS_BROXTON(dev))
544 return BXT_PP_CONTROL(0);
545 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300546 return PCH_PP_CONTROL;
547 else
548 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
549}
550
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200551static i915_reg_t
552_pp_stat_reg(struct intel_dp *intel_dp)
Jani Nikulabf13e812013-09-06 07:40:05 +0300553{
554 struct drm_device *dev = intel_dp_to_dev(intel_dp);
555
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530556 if (IS_BROXTON(dev))
557 return BXT_PP_STATUS(0);
558 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300559 return PCH_PP_STATUS;
560 else
561 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
562}
563
Clint Taylor01527b32014-07-07 13:01:46 -0700564/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
565 This function only applicable when panel PM state is not to be tracked */
566static int edp_notify_handler(struct notifier_block *this, unsigned long code,
567 void *unused)
568{
569 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
570 edp_notifier);
571 struct drm_device *dev = intel_dp_to_dev(intel_dp);
572 struct drm_i915_private *dev_priv = dev->dev_private;
Clint Taylor01527b32014-07-07 13:01:46 -0700573
574 if (!is_edp(intel_dp) || code != SYS_RESTART)
575 return 0;
576
Ville Syrjälä773538e82014-09-04 14:54:56 +0300577 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300578
Wayne Boyer666a4532015-12-09 12:29:35 -0800579 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300580 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200581 i915_reg_t pp_ctrl_reg, pp_div_reg;
Ville Syrjälä649636e2015-09-22 19:50:01 +0300582 u32 pp_div;
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300583
Clint Taylor01527b32014-07-07 13:01:46 -0700584 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
585 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
586 pp_div = I915_READ(pp_div_reg);
587 pp_div &= PP_REFERENCE_DIVIDER_MASK;
588
589 /* 0x1F write to PP_DIV_REG sets max cycle delay */
590 I915_WRITE(pp_div_reg, pp_div | 0x1F);
591 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
592 msleep(intel_dp->panel_power_cycle_delay);
593 }
594
Ville Syrjälä773538e82014-09-04 14:54:56 +0300595 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300596
Clint Taylor01527b32014-07-07 13:01:46 -0700597 return 0;
598}
599
Daniel Vetter4be73782014-01-17 14:39:48 +0100600static bool edp_have_panel_power(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700601{
Paulo Zanoni30add222012-10-26 19:05:45 -0200602 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700603 struct drm_i915_private *dev_priv = dev->dev_private;
604
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300605 lockdep_assert_held(&dev_priv->pps_mutex);
606
Wayne Boyer666a4532015-12-09 12:29:35 -0800607 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
Ville Syrjälä9a423562014-10-16 21:29:48 +0300608 intel_dp->pps_pipe == INVALID_PIPE)
609 return false;
610
Jani Nikulabf13e812013-09-06 07:40:05 +0300611 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
Keith Packardebf33b12011-09-29 15:53:27 -0700612}
613
Daniel Vetter4be73782014-01-17 14:39:48 +0100614static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700615{
Paulo Zanoni30add222012-10-26 19:05:45 -0200616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700617 struct drm_i915_private *dev_priv = dev->dev_private;
618
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300619 lockdep_assert_held(&dev_priv->pps_mutex);
620
Wayne Boyer666a4532015-12-09 12:29:35 -0800621 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
Ville Syrjälä9a423562014-10-16 21:29:48 +0300622 intel_dp->pps_pipe == INVALID_PIPE)
623 return false;
624
Ville Syrjälä773538e82014-09-04 14:54:56 +0300625 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -0700626}
627
Keith Packard9b984da2011-09-19 13:54:47 -0700628static void
629intel_dp_check_edp(struct intel_dp *intel_dp)
630{
Paulo Zanoni30add222012-10-26 19:05:45 -0200631 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard9b984da2011-09-19 13:54:47 -0700632 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packardebf33b12011-09-29 15:53:27 -0700633
Keith Packard9b984da2011-09-19 13:54:47 -0700634 if (!is_edp(intel_dp))
635 return;
Jesse Barnes453c5422013-03-28 09:55:41 -0700636
Daniel Vetter4be73782014-01-17 14:39:48 +0100637 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
Keith Packard9b984da2011-09-19 13:54:47 -0700638 WARN(1, "eDP powered off while attempting aux channel communication.\n");
639 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
Jani Nikulabf13e812013-09-06 07:40:05 +0300640 I915_READ(_pp_stat_reg(intel_dp)),
641 I915_READ(_pp_ctrl_reg(intel_dp)));
Keith Packard9b984da2011-09-19 13:54:47 -0700642 }
643}
644
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100645static uint32_t
646intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
647{
648 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
649 struct drm_device *dev = intel_dig_port->base.base.dev;
650 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200651 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100652 uint32_t status;
653 bool done;
654
Daniel Vetteref04f002012-12-01 21:03:59 +0100655#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100656 if (has_aux_irq)
Paulo Zanonib18ac462013-02-18 19:00:24 -0300657 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
Imre Deak35987062013-05-21 20:03:20 +0300658 msecs_to_jiffies_timeout(10));
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100659 else
660 done = wait_for_atomic(C, 10) == 0;
661 if (!done)
662 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
663 has_aux_irq);
664#undef C
665
666 return status;
667}
668
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000669static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
670{
671 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
672 struct drm_device *dev = intel_dig_port->base.base.dev;
673
674 /*
675 * The clock divider is based off the hrawclk, and would like to run at
676 * 2MHz. So, take the hrawclk value and divide by 2 and use that
677 */
Ville Syrjäläfce18c42015-11-30 16:23:46 +0200678 return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000679}
680
681static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
682{
683 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
684 struct drm_device *dev = intel_dig_port->base.base.dev;
Ville Syrjälä469d4b22015-03-31 14:11:59 +0300685 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000686
687 if (index)
688 return 0;
689
690 if (intel_dig_port->port == PORT_A) {
Ville Syrjäläfce18c42015-11-30 16:23:46 +0200691 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
Ville Syrjälä05024da2015-06-03 15:45:08 +0300692
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000693 } else {
Ville Syrjäläfce18c42015-11-30 16:23:46 +0200694 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000695 }
696}
697
698static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300699{
700 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
701 struct drm_device *dev = intel_dig_port->base.base.dev;
702 struct drm_i915_private *dev_priv = dev->dev_private;
703
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000704 if (intel_dig_port->port == PORT_A) {
Chris Wilsonbc866252013-07-21 16:00:03 +0100705 if (index)
706 return 0;
Ville Syrjälä05024da2015-06-03 15:45:08 +0300707 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
Ville Syrjälä56f5f702015-11-30 16:23:44 +0200708 } else if (HAS_PCH_LPT_H(dev_priv)) {
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300709 /* Workaround for non-ULT HSW */
Chris Wilsonbc866252013-07-21 16:00:03 +0100710 switch (index) {
711 case 0: return 63;
712 case 1: return 72;
713 default: return 0;
714 }
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000715 } else {
Ville Syrjäläfce18c42015-11-30 16:23:46 +0200716 return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300717 }
718}
719
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000720static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
721{
722 return index ? 0 : 100;
723}
724
Damien Lespiaub6b5e382014-01-20 16:00:59 +0000725static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
726{
727 /*
728 * SKL doesn't need us to program the AUX clock divider (Hardware will
729 * derive the clock from CDCLK automatically). We still implement the
730 * get_aux_clock_divider vfunc to plug-in into the existing code.
731 */
732 return index ? 0 : 1;
733}
734
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000735static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
736 bool has_aux_irq,
737 int send_bytes,
738 uint32_t aux_clock_divider)
739{
740 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
741 struct drm_device *dev = intel_dig_port->base.base.dev;
742 uint32_t precharge, timeout;
743
744 if (IS_GEN6(dev))
745 precharge = 3;
746 else
747 precharge = 5;
748
Ville Syrjäläf3c6a3a2015-11-11 20:34:10 +0200749 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000750 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
751 else
752 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
753
754 return DP_AUX_CH_CTL_SEND_BUSY |
Damien Lespiau788d4432014-01-20 15:52:31 +0000755 DP_AUX_CH_CTL_DONE |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000756 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000757 DP_AUX_CH_CTL_TIME_OUT_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000758 timeout |
Damien Lespiau788d4432014-01-20 15:52:31 +0000759 DP_AUX_CH_CTL_RECEIVE_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000760 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
761 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000762 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000763}
764
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +0000765static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
766 bool has_aux_irq,
767 int send_bytes,
768 uint32_t unused)
769{
770 return DP_AUX_CH_CTL_SEND_BUSY |
771 DP_AUX_CH_CTL_DONE |
772 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
773 DP_AUX_CH_CTL_TIME_OUT_ERROR |
774 DP_AUX_CH_CTL_TIME_OUT_1600us |
775 DP_AUX_CH_CTL_RECEIVE_ERROR |
776 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
777 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
778}
779
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700780static int
Chris Wilsonea5b2132010-08-04 13:50:23 +0100781intel_dp_aux_ch(struct intel_dp *intel_dp,
Daniel Vetterbd9f74a2014-10-02 09:45:35 +0200782 const uint8_t *send, int send_bytes,
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700783 uint8_t *recv, int recv_size)
784{
Paulo Zanoni174edf12012-10-26 19:05:50 -0200785 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
786 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700787 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200788 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Chris Wilsonbc866252013-07-21 16:00:03 +0100789 uint32_t aux_clock_divider;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100790 int i, ret, recv_bytes;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700791 uint32_t status;
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000792 int try, clock = 0;
Daniel Vetter4e6b7882014-02-07 16:33:20 +0100793 bool has_aux_irq = HAS_AUX_IRQ(dev);
Jani Nikula884f19e2014-03-14 16:51:14 +0200794 bool vdd;
795
Ville Syrjälä773538e82014-09-04 14:54:56 +0300796 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300797
Ville Syrjälä72c35002014-08-18 22:16:00 +0300798 /*
799 * We will be called with VDD already enabled for dpcd/edid/oui reads.
800 * In such cases we want to leave VDD enabled and it's up to upper layers
801 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
802 * ourselves.
803 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300804 vdd = edp_panel_vdd_on(intel_dp);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100805
806 /* dp aux is extremely sensitive to irq latency, hence request the
807 * lowest possible wakeup latency and so prevent the cpu from going into
808 * deep sleep states.
809 */
810 pm_qos_update_request(&dev_priv->pm_qos, 0);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700811
Keith Packard9b984da2011-09-19 13:54:47 -0700812 intel_dp_check_edp(intel_dp);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +0800813
Jesse Barnes11bee432011-08-01 15:02:20 -0700814 /* Try to wait for any previous AUX channel activity */
815 for (try = 0; try < 3; try++) {
Daniel Vetteref04f002012-12-01 21:03:59 +0100816 status = I915_READ_NOTRACE(ch_ctl);
Jesse Barnes11bee432011-08-01 15:02:20 -0700817 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
818 break;
819 msleep(1);
820 }
821
822 if (try == 3) {
Mika Kuoppala02196c72015-08-06 16:48:58 +0300823 static u32 last_status = -1;
824 const u32 status = I915_READ(ch_ctl);
825
826 if (status != last_status) {
827 WARN(1, "dp_aux_ch not started status 0x%08x\n",
828 status);
829 last_status = status;
830 }
831
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100832 ret = -EBUSY;
833 goto out;
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100834 }
835
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300836 /* Only 5 data registers! */
837 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
838 ret = -E2BIG;
839 goto out;
840 }
841
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000842 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
Damien Lespiau153b1102014-01-21 13:37:15 +0000843 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
844 has_aux_irq,
845 send_bytes,
846 aux_clock_divider);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000847
Chris Wilsonbc866252013-07-21 16:00:03 +0100848 /* Must try at least 3 times according to DP spec */
849 for (try = 0; try < 5; try++) {
850 /* Load the send data into the aux channel data registers */
851 for (i = 0; i < send_bytes; i += 4)
Ville Syrjälä330e20e2015-11-11 20:34:14 +0200852 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800853 intel_dp_pack_aux(send + i,
854 send_bytes - i));
Akshay Joshi0206e352011-08-16 15:34:10 -0400855
Chris Wilsonbc866252013-07-21 16:00:03 +0100856 /* Send the command and wait for it to complete */
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000857 I915_WRITE(ch_ctl, send_ctl);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100858
Chris Wilsonbc866252013-07-21 16:00:03 +0100859 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
Akshay Joshi0206e352011-08-16 15:34:10 -0400860
Chris Wilsonbc866252013-07-21 16:00:03 +0100861 /* Clear done status and any errors */
862 I915_WRITE(ch_ctl,
863 status |
864 DP_AUX_CH_CTL_DONE |
865 DP_AUX_CH_CTL_TIME_OUT_ERROR |
866 DP_AUX_CH_CTL_RECEIVE_ERROR);
Adam Jacksond7e96fe2011-07-26 15:39:46 -0400867
Todd Previte74ebf292015-04-15 08:38:41 -0700868 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
Chris Wilsonbc866252013-07-21 16:00:03 +0100869 continue;
Todd Previte74ebf292015-04-15 08:38:41 -0700870
871 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
872 * 400us delay required for errors and timeouts
873 * Timeout errors from the HW already meet this
874 * requirement so skip to next iteration
875 */
876 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
877 usleep_range(400, 500);
878 continue;
879 }
Chris Wilsonbc866252013-07-21 16:00:03 +0100880 if (status & DP_AUX_CH_CTL_DONE)
Jim Bridee058c942015-05-27 10:21:48 -0700881 goto done;
Chris Wilsonbc866252013-07-21 16:00:03 +0100882 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700883 }
884
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700885 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700886 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100887 ret = -EBUSY;
888 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700889 }
890
Jim Bridee058c942015-05-27 10:21:48 -0700891done:
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700892 /* Check for timeout or receive error.
893 * Timeouts occur when the sink is not connected
894 */
Keith Packarda5b3da52009-06-11 22:30:32 -0700895 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700896 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100897 ret = -EIO;
898 goto out;
Keith Packarda5b3da52009-06-11 22:30:32 -0700899 }
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700900
901 /* Timeouts occur when the device isn't connected, so they're
902 * "normal" -- don't fill the kernel log with these */
Keith Packarda5b3da52009-06-11 22:30:32 -0700903 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
Zhao Yakui28c97732009-10-09 11:39:41 +0800904 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100905 ret = -ETIMEDOUT;
906 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700907 }
908
909 /* Unload any bytes sent back from the other side */
910 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
911 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
Rodrigo Vivi14e01882015-12-10 11:12:27 -0800912
913 /*
914 * By BSpec: "Message sizes of 0 or >20 are not allowed."
915 * We have no idea of what happened so we return -EBUSY so
916 * drm layer takes care for the necessary retries.
917 */
918 if (recv_bytes == 0 || recv_bytes > 20) {
919 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
920 recv_bytes);
921 /*
922 * FIXME: This patch was created on top of a series that
923 * organize the retries at drm level. There EBUSY should
924 * also take care for 1ms wait before retrying.
925 * That aux retries re-org is still needed and after that is
926 * merged we remove this sleep from here.
927 */
928 usleep_range(1000, 1500);
929 ret = -EBUSY;
930 goto out;
931 }
932
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700933 if (recv_bytes > recv_size)
934 recv_bytes = recv_size;
Akshay Joshi0206e352011-08-16 15:34:10 -0400935
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100936 for (i = 0; i < recv_bytes; i += 4)
Ville Syrjälä330e20e2015-11-11 20:34:14 +0200937 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800938 recv + i, recv_bytes - i);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700939
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100940 ret = recv_bytes;
941out:
942 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
943
Jani Nikula884f19e2014-03-14 16:51:14 +0200944 if (vdd)
945 edp_panel_vdd_off(intel_dp, false);
946
Ville Syrjälä773538e82014-09-04 14:54:56 +0300947 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300948
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100949 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700950}
951
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300952#define BARE_ADDRESS_SIZE 3
953#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
Jani Nikula9d1a1032014-03-14 16:51:15 +0200954static ssize_t
955intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700956{
Jani Nikula9d1a1032014-03-14 16:51:15 +0200957 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
958 uint8_t txbuf[20], rxbuf[20];
959 size_t txsize, rxsize;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700960 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700961
Ville Syrjäläd2d9cbb2015-03-19 11:44:06 +0200962 txbuf[0] = (msg->request << 4) |
963 ((msg->address >> 16) & 0xf);
964 txbuf[1] = (msg->address >> 8) & 0xff;
Jani Nikula9d1a1032014-03-14 16:51:15 +0200965 txbuf[2] = msg->address & 0xff;
966 txbuf[3] = msg->size - 1;
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300967
Jani Nikula9d1a1032014-03-14 16:51:15 +0200968 switch (msg->request & ~DP_AUX_I2C_MOT) {
969 case DP_AUX_NATIVE_WRITE:
970 case DP_AUX_I2C_WRITE:
Ville Syrjäläc1e741222015-08-27 17:23:27 +0300971 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300972 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200973 rxsize = 2; /* 0 or 1 data bytes */
Jani Nikulaf51a44b2014-02-11 11:52:05 +0200974
Jani Nikula9d1a1032014-03-14 16:51:15 +0200975 if (WARN_ON(txsize > 20))
976 return -E2BIG;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700977
Jani Nikula9d1a1032014-03-14 16:51:15 +0200978 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700979
Jani Nikula9d1a1032014-03-14 16:51:15 +0200980 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
981 if (ret > 0) {
982 msg->reply = rxbuf[0] >> 4;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700983
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200984 if (ret > 1) {
985 /* Number of bytes written in a short write. */
986 ret = clamp_t(int, rxbuf[1], 0, msg->size);
987 } else {
988 /* Return payload size. */
989 ret = msg->size;
990 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700991 }
Jani Nikula9d1a1032014-03-14 16:51:15 +0200992 break;
993
994 case DP_AUX_NATIVE_READ:
995 case DP_AUX_I2C_READ:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300996 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
Jani Nikula9d1a1032014-03-14 16:51:15 +0200997 rxsize = msg->size + 1;
998
999 if (WARN_ON(rxsize > 20))
1000 return -E2BIG;
1001
1002 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1003 if (ret > 0) {
1004 msg->reply = rxbuf[0] >> 4;
1005 /*
1006 * Assume happy day, and copy the data. The caller is
1007 * expected to check msg->reply before touching it.
1008 *
1009 * Return payload size.
1010 */
1011 ret--;
1012 memcpy(msg->buffer, rxbuf + 1, ret);
1013 }
1014 break;
1015
1016 default:
1017 ret = -EINVAL;
1018 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001019 }
Jani Nikulaf51a44b2014-02-11 11:52:05 +02001020
Jani Nikula9d1a1032014-03-14 16:51:15 +02001021 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001022}
1023
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001024static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1025 enum port port)
Ville Syrjäläda00bdc2015-11-11 20:34:13 +02001026{
1027 switch (port) {
1028 case PORT_B:
1029 case PORT_C:
1030 case PORT_D:
1031 return DP_AUX_CH_CTL(port);
1032 default:
1033 MISSING_CASE(port);
1034 return DP_AUX_CH_CTL(PORT_B);
1035 }
1036}
1037
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001038static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1039 enum port port, int index)
Ville Syrjälä330e20e2015-11-11 20:34:14 +02001040{
1041 switch (port) {
1042 case PORT_B:
1043 case PORT_C:
1044 case PORT_D:
1045 return DP_AUX_CH_DATA(port, index);
1046 default:
1047 MISSING_CASE(port);
1048 return DP_AUX_CH_DATA(PORT_B, index);
1049 }
1050}
1051
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001052static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1053 enum port port)
Ville Syrjäläda00bdc2015-11-11 20:34:13 +02001054{
1055 switch (port) {
1056 case PORT_A:
1057 return DP_AUX_CH_CTL(port);
1058 case PORT_B:
1059 case PORT_C:
1060 case PORT_D:
1061 return PCH_DP_AUX_CH_CTL(port);
1062 default:
1063 MISSING_CASE(port);
1064 return DP_AUX_CH_CTL(PORT_A);
1065 }
1066}
1067
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001068static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1069 enum port port, int index)
Ville Syrjälä330e20e2015-11-11 20:34:14 +02001070{
1071 switch (port) {
1072 case PORT_A:
1073 return DP_AUX_CH_DATA(port, index);
1074 case PORT_B:
1075 case PORT_C:
1076 case PORT_D:
1077 return PCH_DP_AUX_CH_DATA(port, index);
1078 default:
1079 MISSING_CASE(port);
1080 return DP_AUX_CH_DATA(PORT_A, index);
1081 }
1082}
1083
Ville Syrjäläda00bdc2015-11-11 20:34:13 +02001084/*
1085 * On SKL we don't have Aux for port E so we rely
1086 * on VBT to set a proper alternate aux channel.
1087 */
1088static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1089{
1090 const struct ddi_vbt_port_info *info =
1091 &dev_priv->vbt.ddi_port_info[PORT_E];
1092
1093 switch (info->alternate_aux_channel) {
1094 case DP_AUX_A:
1095 return PORT_A;
1096 case DP_AUX_B:
1097 return PORT_B;
1098 case DP_AUX_C:
1099 return PORT_C;
1100 case DP_AUX_D:
1101 return PORT_D;
1102 default:
1103 MISSING_CASE(info->alternate_aux_channel);
1104 return PORT_A;
1105 }
1106}
1107
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001108static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1109 enum port port)
Ville Syrjäläda00bdc2015-11-11 20:34:13 +02001110{
1111 if (port == PORT_E)
1112 port = skl_porte_aux_port(dev_priv);
1113
1114 switch (port) {
1115 case PORT_A:
1116 case PORT_B:
1117 case PORT_C:
1118 case PORT_D:
1119 return DP_AUX_CH_CTL(port);
1120 default:
1121 MISSING_CASE(port);
1122 return DP_AUX_CH_CTL(PORT_A);
1123 }
1124}
1125
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001126static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1127 enum port port, int index)
Ville Syrjälä330e20e2015-11-11 20:34:14 +02001128{
1129 if (port == PORT_E)
1130 port = skl_porte_aux_port(dev_priv);
1131
1132 switch (port) {
1133 case PORT_A:
1134 case PORT_B:
1135 case PORT_C:
1136 case PORT_D:
1137 return DP_AUX_CH_DATA(port, index);
1138 default:
1139 MISSING_CASE(port);
1140 return DP_AUX_CH_DATA(PORT_A, index);
1141 }
1142}
1143
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001144static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1145 enum port port)
Ville Syrjälä330e20e2015-11-11 20:34:14 +02001146{
1147 if (INTEL_INFO(dev_priv)->gen >= 9)
1148 return skl_aux_ctl_reg(dev_priv, port);
1149 else if (HAS_PCH_SPLIT(dev_priv))
1150 return ilk_aux_ctl_reg(dev_priv, port);
1151 else
1152 return g4x_aux_ctl_reg(dev_priv, port);
1153}
1154
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001155static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1156 enum port port, int index)
Ville Syrjälä330e20e2015-11-11 20:34:14 +02001157{
1158 if (INTEL_INFO(dev_priv)->gen >= 9)
1159 return skl_aux_data_reg(dev_priv, port, index);
1160 else if (HAS_PCH_SPLIT(dev_priv))
1161 return ilk_aux_data_reg(dev_priv, port, index);
1162 else
1163 return g4x_aux_data_reg(dev_priv, port, index);
1164}
1165
1166static void intel_aux_reg_init(struct intel_dp *intel_dp)
1167{
1168 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1169 enum port port = dp_to_dig_port(intel_dp)->port;
1170 int i;
1171
1172 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1173 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1174 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1175}
1176
Jani Nikula9d1a1032014-03-14 16:51:15 +02001177static void
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001178intel_dp_aux_fini(struct intel_dp *intel_dp)
1179{
1180 drm_dp_aux_unregister(&intel_dp->aux);
1181 kfree(intel_dp->aux.name);
1182}
1183
1184static int
Jani Nikula9d1a1032014-03-14 16:51:15 +02001185intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001186{
Jani Nikula9d1a1032014-03-14 16:51:15 +02001187 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jani Nikula33ad6622014-03-14 16:51:16 +02001188 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1189 enum port port = intel_dig_port->port;
Dave Airlieab2c0672009-12-04 10:55:24 +10001190 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001191
Ville Syrjälä330e20e2015-11-11 20:34:14 +02001192 intel_aux_reg_init(intel_dp);
David Flynn8316f332010-12-08 16:10:21 +00001193
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001194 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1195 if (!intel_dp->aux.name)
1196 return -ENOMEM;
1197
Jani Nikula9d1a1032014-03-14 16:51:15 +02001198 intel_dp->aux.dev = dev->dev;
1199 intel_dp->aux.transfer = intel_dp_aux_transfer;
David Flynn8316f332010-12-08 16:10:21 +00001200
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001201 DRM_DEBUG_KMS("registering %s bus for %s\n",
1202 intel_dp->aux.name,
Jani Nikula0b998362014-03-14 16:51:17 +02001203 connector->base.kdev->kobj.name);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001204
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001205 ret = drm_dp_aux_register(&intel_dp->aux);
Jani Nikula0b998362014-03-14 16:51:17 +02001206 if (ret < 0) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001207 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001208 intel_dp->aux.name, ret);
1209 kfree(intel_dp->aux.name);
1210 return ret;
Dave Airlieab2c0672009-12-04 10:55:24 +10001211 }
David Flynn8316f332010-12-08 16:10:21 +00001212
Jani Nikula0b998362014-03-14 16:51:17 +02001213 ret = sysfs_create_link(&connector->base.kdev->kobj,
1214 &intel_dp->aux.ddc.dev.kobj,
1215 intel_dp->aux.ddc.dev.kobj.name);
1216 if (ret < 0) {
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001217 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1218 intel_dp->aux.name, ret);
1219 intel_dp_aux_fini(intel_dp);
1220 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001221 }
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001222
1223 return 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001224}
1225
Imre Deak80f65de2014-02-11 17:12:49 +02001226static void
1227intel_dp_connector_unregister(struct intel_connector *intel_connector)
1228{
1229 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1230
Dave Airlie0e32b392014-05-02 14:02:48 +10001231 if (!intel_connector->mst_port)
1232 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1233 intel_dp->aux.ddc.dev.kobj.name);
Imre Deak80f65de2014-02-11 17:12:49 +02001234 intel_connector_unregister(intel_connector);
1235}
1236
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001237static void
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001238skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
Damien Lespiau5416d872014-11-14 17:24:33 +00001239{
1240 u32 ctrl1;
1241
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03001242 memset(&pipe_config->dpll_hw_state, 0,
1243 sizeof(pipe_config->dpll_hw_state));
1244
Damien Lespiau5416d872014-11-14 17:24:33 +00001245 pipe_config->ddi_pll_sel = SKL_DPLL0;
1246 pipe_config->dpll_hw_state.cfgcr1 = 0;
1247 pipe_config->dpll_hw_state.cfgcr2 = 0;
1248
1249 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001250 switch (pipe_config->port_clock / 2) {
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301251 case 81000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001252 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
Damien Lespiau5416d872014-11-14 17:24:33 +00001253 SKL_DPLL0);
1254 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301255 case 135000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001256 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
Damien Lespiau5416d872014-11-14 17:24:33 +00001257 SKL_DPLL0);
1258 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301259 case 270000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001260 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
Damien Lespiau5416d872014-11-14 17:24:33 +00001261 SKL_DPLL0);
1262 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301263 case 162000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001264 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301265 SKL_DPLL0);
1266 break;
1267 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1268 results in CDCLK change. Need to handle the change of CDCLK by
1269 disabling pipes and re-enabling them */
1270 case 108000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001271 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301272 SKL_DPLL0);
1273 break;
1274 case 216000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001275 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301276 SKL_DPLL0);
1277 break;
1278
Damien Lespiau5416d872014-11-14 17:24:33 +00001279 }
1280 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1281}
1282
Ander Conselvan de Oliveira6fa2d192015-08-31 11:23:28 +03001283void
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001284hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
Daniel Vetter0e503382014-07-04 11:26:04 -03001285{
Ander Conselvan de Oliveiraee46f3c72015-06-30 16:10:38 +03001286 memset(&pipe_config->dpll_hw_state, 0,
1287 sizeof(pipe_config->dpll_hw_state));
1288
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001289 switch (pipe_config->port_clock / 2) {
1290 case 81000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001291 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1292 break;
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001293 case 135000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001294 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1295 break;
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001296 case 270000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001297 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1298 break;
1299 }
1300}
1301
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301302static int
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001303intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301304{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001305 if (intel_dp->num_sink_rates) {
1306 *sink_rates = intel_dp->sink_rates;
1307 return intel_dp->num_sink_rates;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301308 }
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001309
1310 *sink_rates = default_rates;
1311
1312 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301313}
1314
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001315bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301316{
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001317 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1318 struct drm_device *dev = dig_port->base.base.dev;
1319
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301320 /* WaDisableHBR2:skl */
Jani Nikulae87a0052015-10-20 15:22:02 +03001321 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301322 return false;
1323
1324 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1325 (INTEL_INFO(dev)->gen >= 9))
1326 return true;
1327 else
1328 return false;
1329}
1330
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301331static int
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001332intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301333{
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001334 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1335 struct drm_device *dev = dig_port->base.base.dev;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301336 int size;
1337
Sonika Jindal64987fc2015-05-26 17:50:13 +05301338 if (IS_BROXTON(dev)) {
1339 *source_rates = bxt_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301340 size = ARRAY_SIZE(bxt_rates);
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001341 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
Sonika Jindal637a9c62015-05-07 09:52:08 +05301342 *source_rates = skl_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301343 size = ARRAY_SIZE(skl_rates);
1344 } else {
1345 *source_rates = default_rates;
1346 size = ARRAY_SIZE(default_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301347 }
Ville Syrjälä636280b2015-03-12 17:10:29 +02001348
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301349 /* This depends on the fact that 5.4 is last value in the array */
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001350 if (!intel_dp_source_supports_hbr2(intel_dp))
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301351 size--;
Ville Syrjälä636280b2015-03-12 17:10:29 +02001352
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301353 return size;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301354}
1355
Daniel Vetter0e503382014-07-04 11:26:04 -03001356static void
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001357intel_dp_set_clock(struct intel_encoder *encoder,
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001358 struct intel_crtc_state *pipe_config)
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001359{
1360 struct drm_device *dev = encoder->base.dev;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001361 const struct dp_link_dpll *divisor = NULL;
1362 int i, count = 0;
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001363
1364 if (IS_G4X(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001365 divisor = gen4_dpll;
1366 count = ARRAY_SIZE(gen4_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001367 } else if (HAS_PCH_SPLIT(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001368 divisor = pch_dpll;
1369 count = ARRAY_SIZE(pch_dpll);
Chon Ming Leeef9348c2014-04-09 13:28:18 +03001370 } else if (IS_CHERRYVIEW(dev)) {
1371 divisor = chv_dpll;
1372 count = ARRAY_SIZE(chv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001373 } else if (IS_VALLEYVIEW(dev)) {
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +08001374 divisor = vlv_dpll;
1375 count = ARRAY_SIZE(vlv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001376 }
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001377
1378 if (divisor && count) {
1379 for (i = 0; i < count; i++) {
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001380 if (pipe_config->port_clock == divisor[i].clock) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001381 pipe_config->dpll = divisor[i].dpll;
1382 pipe_config->clock_set = true;
1383 break;
1384 }
1385 }
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001386 }
1387}
1388
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001389static int intersect_rates(const int *source_rates, int source_len,
1390 const int *sink_rates, int sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001391 int *common_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301392{
1393 int i = 0, j = 0, k = 0;
1394
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301395 while (i < source_len && j < sink_len) {
1396 if (source_rates[i] == sink_rates[j]) {
Ville Syrjäläe6bda3e2015-03-12 17:10:37 +02001397 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1398 return k;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001399 common_rates[k] = source_rates[i];
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301400 ++k;
1401 ++i;
1402 ++j;
1403 } else if (source_rates[i] < sink_rates[j]) {
1404 ++i;
1405 } else {
1406 ++j;
1407 }
1408 }
1409 return k;
1410}
1411
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001412static int intel_dp_common_rates(struct intel_dp *intel_dp,
1413 int *common_rates)
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001414{
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001415 const int *source_rates, *sink_rates;
1416 int source_len, sink_len;
1417
1418 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001419 source_len = intel_dp_source_rates(intel_dp, &source_rates);
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001420
1421 return intersect_rates(source_rates, source_len,
1422 sink_rates, sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001423 common_rates);
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001424}
1425
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001426static void snprintf_int_array(char *str, size_t len,
1427 const int *array, int nelem)
1428{
1429 int i;
1430
1431 str[0] = '\0';
1432
1433 for (i = 0; i < nelem; i++) {
Jani Nikulab2f505b2015-05-18 16:01:45 +03001434 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001435 if (r >= len)
1436 return;
1437 str += r;
1438 len -= r;
1439 }
1440}
1441
1442static void intel_dp_print_rates(struct intel_dp *intel_dp)
1443{
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001444 const int *source_rates, *sink_rates;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001445 int source_len, sink_len, common_len;
1446 int common_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001447 char str[128]; /* FIXME: too big for stack? */
1448
1449 if ((drm_debug & DRM_UT_KMS) == 0)
1450 return;
1451
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001452 source_len = intel_dp_source_rates(intel_dp, &source_rates);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001453 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1454 DRM_DEBUG_KMS("source rates: %s\n", str);
1455
1456 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1457 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1458 DRM_DEBUG_KMS("sink rates: %s\n", str);
1459
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001460 common_len = intel_dp_common_rates(intel_dp, common_rates);
1461 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1462 DRM_DEBUG_KMS("common rates: %s\n", str);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001463}
1464
Ville Syrjäläf4896f12015-03-12 17:10:27 +02001465static int rate_to_index(int find, const int *rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301466{
1467 int i = 0;
1468
1469 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1470 if (find == rates[i])
1471 break;
1472
1473 return i;
1474}
1475
Ville Syrjälä50fec212015-03-12 17:10:34 +02001476int
1477intel_dp_max_link_rate(struct intel_dp *intel_dp)
1478{
1479 int rates[DP_MAX_SUPPORTED_RATES] = {};
1480 int len;
1481
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001482 len = intel_dp_common_rates(intel_dp, rates);
Ville Syrjälä50fec212015-03-12 17:10:34 +02001483 if (WARN_ON(len <= 0))
1484 return 162000;
1485
1486 return rates[rate_to_index(0, rates) - 1];
1487}
1488
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001489int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1490{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001491 return rate_to_index(rate, intel_dp->sink_rates);
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001492}
1493
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03001494void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1495 uint8_t *link_bw, uint8_t *rate_select)
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001496{
1497 if (intel_dp->num_sink_rates) {
1498 *link_bw = 0;
1499 *rate_select =
1500 intel_dp_rate_select(intel_dp, port_clock);
1501 } else {
1502 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1503 *rate_select = 0;
1504 }
1505}
1506
Paulo Zanoni00c09d72012-10-26 19:05:52 -02001507bool
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001508intel_dp_compute_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02001509 struct intel_crtc_state *pipe_config)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001510{
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001511 struct drm_device *dev = encoder->base.dev;
Daniel Vetter36008362013-03-27 00:44:59 +01001512 struct drm_i915_private *dev_priv = dev->dev_private;
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02001513 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001514 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001515 enum port port = dp_to_dig_port(intel_dp)->port;
Ander Conselvan de Oliveira84556d52015-03-20 16:18:10 +02001516 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
Jani Nikuladd06f902012-10-19 14:51:50 +03001517 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001518 int lane_count, clock;
Jani Nikula56071a22014-05-06 14:56:52 +03001519 int min_lane_count = 1;
Paulo Zanonieeb63242014-05-06 14:56:50 +03001520 int max_lane_count = intel_dp_max_lane_count(intel_dp);
Todd Previte06ea66b2014-01-20 10:19:39 -07001521 /* Conveniently, the link BW constants become indices with a shift...*/
Jani Nikula56071a22014-05-06 14:56:52 +03001522 int min_clock = 0;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301523 int max_clock;
Daniel Vetter083f9562012-04-20 20:23:49 +02001524 int bpp, mode_rate;
Daniel Vetterff9a6752013-06-01 17:16:21 +02001525 int link_avail, link_clock;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001526 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1527 int common_len;
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001528 uint8_t link_bw, rate_select;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301529
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001530 common_len = intel_dp_common_rates(intel_dp, common_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301531
1532 /* No common link rates between source and sink */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001533 WARN_ON(common_len <= 0);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301534
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001535 max_clock = common_len - 1;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001536
Imre Deakbc7d38a2013-05-16 14:40:36 +03001537 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001538 pipe_config->has_pch_encoder = true;
1539
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001540 pipe_config->has_dp_encoder = true;
Vandana Kannanf769cd22014-08-05 07:51:22 -07001541 pipe_config->has_drrs = false;
Jani Nikula9fcb1702015-05-05 16:32:12 +03001542 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001543
Jani Nikuladd06f902012-10-19 14:51:50 +03001544 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1545 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1546 adjusted_mode);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001547
1548 if (INTEL_INFO(dev)->gen >= 9) {
1549 int ret;
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02001550 ret = skl_update_scaler_crtc(pipe_config);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001551 if (ret)
1552 return ret;
1553 }
1554
Matt Roperb56676272015-11-04 09:05:27 -08001555 if (HAS_GMCH_DISPLAY(dev))
Jesse Barnes2dd24552013-04-25 12:55:01 -07001556 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1557 intel_connector->panel.fitting_mode);
1558 else
Jesse Barnesb074cec2013-04-25 12:55:02 -07001559 intel_pch_panel_fitting(intel_crtc, pipe_config,
1560 intel_connector->panel.fitting_mode);
Zhao Yakui0d3a1be2010-07-19 09:43:13 +01001561 }
1562
Daniel Vettercb1793c2012-06-04 18:39:21 +02001563 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
Daniel Vetter0af78a22012-05-23 11:30:55 +02001564 return false;
1565
Daniel Vetter083f9562012-04-20 20:23:49 +02001566 DRM_DEBUG_KMS("DP link computation with max lane count %i "
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301567 "max bw %d pixel clock %iKHz\n",
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001568 max_lane_count, common_rates[max_clock],
Damien Lespiau241bfc32013-09-25 16:45:37 +01001569 adjusted_mode->crtc_clock);
Daniel Vetter083f9562012-04-20 20:23:49 +02001570
Daniel Vetter36008362013-03-27 00:44:59 +01001571 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1572 * bpc in between. */
Daniel Vetter3e7ca982013-06-01 19:45:56 +02001573 bpp = pipe_config->pipe_bpp;
Jani Nikula56071a22014-05-06 14:56:52 +03001574 if (is_edp(intel_dp)) {
Thulasimani,Sivakumar22ce5622015-07-31 11:05:27 +05301575
1576 /* Get bpp from vbt only for panels that dont have bpp in edid */
1577 if (intel_connector->base.display_info.bpc == 0 &&
1578 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
Jani Nikula56071a22014-05-06 14:56:52 +03001579 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1580 dev_priv->vbt.edp_bpp);
1581 bpp = dev_priv->vbt.edp_bpp;
1582 }
1583
Jani Nikula344c5bb2014-09-09 11:25:13 +03001584 /*
1585 * Use the maximum clock and number of lanes the eDP panel
1586 * advertizes being capable of. The panels are generally
1587 * designed to support only a single clock and lane
1588 * configuration, and typically these values correspond to the
1589 * native resolution of the panel.
1590 */
1591 min_lane_count = max_lane_count;
1592 min_clock = max_clock;
Imre Deak79842112013-07-18 17:44:13 +03001593 }
Daniel Vetter657445f2013-05-04 10:09:18 +02001594
Daniel Vetter36008362013-03-27 00:44:59 +01001595 for (; bpp >= 6*3; bpp -= 2*3) {
Damien Lespiau241bfc32013-09-25 16:45:37 +01001596 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1597 bpp);
Daniel Vetterc4867932012-04-10 10:42:36 +02001598
Dave Airliec6930992014-07-14 11:04:39 +10001599 for (clock = min_clock; clock <= max_clock; clock++) {
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301600 for (lane_count = min_lane_count;
1601 lane_count <= max_lane_count;
1602 lane_count <<= 1) {
1603
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001604 link_clock = common_rates[clock];
Daniel Vetter36008362013-03-27 00:44:59 +01001605 link_avail = intel_dp_max_data_rate(link_clock,
1606 lane_count);
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001607
Daniel Vetter36008362013-03-27 00:44:59 +01001608 if (mode_rate <= link_avail) {
1609 goto found;
1610 }
1611 }
1612 }
1613 }
1614
1615 return false;
1616
1617found:
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001618 if (intel_dp->color_range_auto) {
1619 /*
1620 * See:
1621 * CEA-861-E - 5.1 Default Encoding Parameters
1622 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1623 */
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001624 pipe_config->limited_color_range =
1625 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1626 } else {
1627 pipe_config->limited_color_range =
1628 intel_dp->limited_color_range;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001629 }
1630
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03001631 pipe_config->lane_count = lane_count;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301632
Daniel Vetter657445f2013-05-04 10:09:18 +02001633 pipe_config->pipe_bpp = bpp;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001634 pipe_config->port_clock = common_rates[clock];
Daniel Vetterc4867932012-04-10 10:42:36 +02001635
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001636 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1637 &link_bw, &rate_select);
1638
1639 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1640 link_bw, rate_select, pipe_config->lane_count,
Daniel Vetterff9a6752013-06-01 17:16:21 +02001641 pipe_config->port_clock, bpp);
Daniel Vetter36008362013-03-27 00:44:59 +01001642 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1643 mode_rate, link_avail);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001644
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001645 intel_link_compute_m_n(bpp, lane_count,
Damien Lespiau241bfc32013-09-25 16:45:37 +01001646 adjusted_mode->crtc_clock,
1647 pipe_config->port_clock,
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001648 &pipe_config->dp_m_n);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001649
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301650 if (intel_connector->panel.downclock_mode != NULL &&
Vandana Kannan96178ee2015-01-10 02:25:56 +05301651 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
Vandana Kannanf769cd22014-08-05 07:51:22 -07001652 pipe_config->has_drrs = true;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301653 intel_link_compute_m_n(bpp, lane_count,
1654 intel_connector->panel.downclock_mode->clock,
1655 pipe_config->port_clock,
1656 &pipe_config->dp_m2_n2);
1657 }
1658
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001659 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001660 skl_edp_set_pll_config(pipe_config);
Satheeshakrishna M977bb382014-08-22 09:49:12 +05301661 else if (IS_BROXTON(dev))
1662 /* handled in ddi */;
Damien Lespiau5416d872014-11-14 17:24:33 +00001663 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001664 hsw_dp_set_ddi_pll_sel(pipe_config);
Daniel Vetter0e503382014-07-04 11:26:04 -03001665 else
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001666 intel_dp_set_clock(encoder, pipe_config);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001667
Daniel Vetter36008362013-03-27 00:44:59 +01001668 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001669}
1670
Ville Syrjälä901c2da2015-08-17 18:05:12 +03001671void intel_dp_set_link_params(struct intel_dp *intel_dp,
1672 const struct intel_crtc_state *pipe_config)
1673{
1674 intel_dp->link_rate = pipe_config->port_clock;
1675 intel_dp->lane_count = pipe_config->lane_count;
1676}
1677
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02001678static void intel_dp_prepare(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001679{
Daniel Vetterb934223d2013-07-21 21:37:05 +02001680 struct drm_device *dev = encoder->base.dev;
Keith Packard417e8222011-11-01 19:54:11 -07001681 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001682 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001683 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001684 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03001685 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001686
Ville Syrjälä901c2da2015-08-17 18:05:12 +03001687 intel_dp_set_link_params(intel_dp, crtc->config);
1688
Keith Packard417e8222011-11-01 19:54:11 -07001689 /*
Keith Packard1a2eb462011-11-16 16:26:07 -08001690 * There are four kinds of DP registers:
Keith Packard417e8222011-11-01 19:54:11 -07001691 *
1692 * IBX PCH
Keith Packard1a2eb462011-11-16 16:26:07 -08001693 * SNB CPU
1694 * IVB CPU
Keith Packard417e8222011-11-01 19:54:11 -07001695 * CPT PCH
1696 *
1697 * IBX PCH and CPU are the same for almost everything,
1698 * except that the CPU DP PLL is configured in this
1699 * register
1700 *
1701 * CPT PCH is quite different, having many bits moved
1702 * to the TRANS_DP_CTL register instead. That
1703 * configuration happens (oddly) in ironlake_pch_enable
1704 */
Adam Jackson9c9e7922010-04-05 17:57:59 -04001705
Keith Packard417e8222011-11-01 19:54:11 -07001706 /* Preserve the BIOS-computed detected bit. This is
1707 * supposed to be read-only.
1708 */
1709 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001710
Keith Packard417e8222011-11-01 19:54:11 -07001711 /* Handle DP bits in common between all three register formats */
Keith Packard417e8222011-11-01 19:54:11 -07001712 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03001713 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001714
Keith Packard417e8222011-11-01 19:54:11 -07001715 /* Split out the IBX/CPU vs CPT settings */
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001716
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001717 if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08001718 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1719 intel_dp->DP |= DP_SYNC_HS_HIGH;
1720 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1721 intel_dp->DP |= DP_SYNC_VS_HIGH;
1722 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1723
Jani Nikula6aba5b62013-10-04 15:08:10 +03001724 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard1a2eb462011-11-16 16:26:07 -08001725 intel_dp->DP |= DP_ENHANCED_FRAMING;
1726
Daniel Vetter7c62a162013-06-01 17:16:20 +02001727 intel_dp->DP |= crtc->pipe << 29;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001728 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001729 u32 trans_dp;
1730
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001731 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001732
1733 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1734 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1735 trans_dp |= TRANS_DP_ENH_FRAMING;
1736 else
1737 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1738 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001739 } else {
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001740 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
Wayne Boyer666a4532015-12-09 12:29:35 -08001741 !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001742 intel_dp->DP |= DP_COLOR_RANGE_16_235;
Keith Packard417e8222011-11-01 19:54:11 -07001743
1744 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1745 intel_dp->DP |= DP_SYNC_HS_HIGH;
1746 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1747 intel_dp->DP |= DP_SYNC_VS_HIGH;
1748 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1749
Jani Nikula6aba5b62013-10-04 15:08:10 +03001750 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard417e8222011-11-01 19:54:11 -07001751 intel_dp->DP |= DP_ENHANCED_FRAMING;
1752
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001753 if (IS_CHERRYVIEW(dev))
Chon Ming Lee44f37d12014-04-09 13:28:21 +03001754 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001755 else if (crtc->pipe == PIPE_B)
1756 intel_dp->DP |= DP_PIPEB_SELECT;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001757 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001758}
1759
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001760#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1761#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001762
Paulo Zanoni1a5ef5b2013-12-19 14:29:43 -02001763#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1764#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
Keith Packard99ea7122011-11-01 19:57:50 -07001765
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001766#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1767#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001768
Daniel Vetter4be73782014-01-17 14:39:48 +01001769static void wait_panel_status(struct intel_dp *intel_dp,
Keith Packard99ea7122011-11-01 19:57:50 -07001770 u32 mask,
1771 u32 value)
1772{
Paulo Zanoni30add222012-10-26 19:05:45 -02001773 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001774 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001775 i915_reg_t pp_stat_reg, pp_ctrl_reg;
Jesse Barnes453c5422013-03-28 09:55:41 -07001776
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001777 lockdep_assert_held(&dev_priv->pps_mutex);
1778
Jani Nikulabf13e812013-09-06 07:40:05 +03001779 pp_stat_reg = _pp_stat_reg(intel_dp);
1780 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001781
1782 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001783 mask, value,
1784 I915_READ(pp_stat_reg),
1785 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001786
Jesse Barnes453c5422013-03-28 09:55:41 -07001787 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
Keith Packard99ea7122011-11-01 19:57:50 -07001788 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001789 I915_READ(pp_stat_reg),
1790 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001791 }
Chris Wilson54c136d2013-12-02 09:57:16 +00001792
1793 DRM_DEBUG_KMS("Wait complete\n");
Keith Packard99ea7122011-11-01 19:57:50 -07001794}
1795
Daniel Vetter4be73782014-01-17 14:39:48 +01001796static void wait_panel_on(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001797{
1798 DRM_DEBUG_KMS("Wait for panel power on\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001799 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001800}
1801
Daniel Vetter4be73782014-01-17 14:39:48 +01001802static void wait_panel_off(struct intel_dp *intel_dp)
Keith Packardbd943152011-09-18 23:09:52 -07001803{
Keith Packardbd943152011-09-18 23:09:52 -07001804 DRM_DEBUG_KMS("Wait for panel power off time\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001805 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
Keith Packardbd943152011-09-18 23:09:52 -07001806}
Keith Packardbd943152011-09-18 23:09:52 -07001807
Daniel Vetter4be73782014-01-17 14:39:48 +01001808static void wait_panel_power_cycle(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001809{
1810 DRM_DEBUG_KMS("Wait for panel power cycle\n");
Paulo Zanonidce56b32013-12-19 14:29:40 -02001811
1812 /* When we disable the VDD override bit last we have to do the manual
1813 * wait. */
1814 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1815 intel_dp->panel_power_cycle_delay);
1816
Daniel Vetter4be73782014-01-17 14:39:48 +01001817 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001818}
Keith Packardbd943152011-09-18 23:09:52 -07001819
Daniel Vetter4be73782014-01-17 14:39:48 +01001820static void wait_backlight_on(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001821{
1822 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1823 intel_dp->backlight_on_delay);
1824}
1825
Daniel Vetter4be73782014-01-17 14:39:48 +01001826static void edp_wait_backlight_off(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001827{
1828 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1829 intel_dp->backlight_off_delay);
1830}
Keith Packard99ea7122011-11-01 19:57:50 -07001831
Keith Packard832dd3c2011-11-01 19:34:06 -07001832/* Read the current pp_control value, unlocking the register if it
1833 * is locked
1834 */
1835
Jesse Barnes453c5422013-03-28 09:55:41 -07001836static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
Keith Packard832dd3c2011-11-01 19:34:06 -07001837{
Jesse Barnes453c5422013-03-28 09:55:41 -07001838 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1839 struct drm_i915_private *dev_priv = dev->dev_private;
1840 u32 control;
Jesse Barnes453c5422013-03-28 09:55:41 -07001841
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001842 lockdep_assert_held(&dev_priv->pps_mutex);
1843
Jani Nikulabf13e812013-09-06 07:40:05 +03001844 control = I915_READ(_pp_ctrl_reg(intel_dp));
Vandana Kannanb0a08be2015-06-18 11:00:55 +05301845 if (!IS_BROXTON(dev)) {
1846 control &= ~PANEL_UNLOCK_MASK;
1847 control |= PANEL_UNLOCK_REGS;
1848 }
Keith Packard832dd3c2011-11-01 19:34:06 -07001849 return control;
Keith Packardbd943152011-09-18 23:09:52 -07001850}
1851
Ville Syrjälä951468f2014-09-04 14:55:31 +03001852/*
1853 * Must be paired with edp_panel_vdd_off().
1854 * Must hold pps_mutex around the whole on/off sequence.
1855 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1856 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +03001857static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001858{
Paulo Zanoni30add222012-10-26 19:05:45 -02001859 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001860 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1861 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Jesse Barnes5d613502011-01-24 17:10:54 -08001862 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02001863 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001864 u32 pp;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001865 i915_reg_t pp_stat_reg, pp_ctrl_reg;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001866 bool need_to_disable = !intel_dp->want_panel_vdd;
Jesse Barnes5d613502011-01-24 17:10:54 -08001867
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001868 lockdep_assert_held(&dev_priv->pps_mutex);
1869
Keith Packard97af61f572011-09-28 16:23:51 -07001870 if (!is_edp(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001871 return false;
Keith Packardbd943152011-09-18 23:09:52 -07001872
Egbert Eich2c623c12014-11-25 12:54:57 +01001873 cancel_delayed_work(&intel_dp->panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001874 intel_dp->want_panel_vdd = true;
Keith Packard99ea7122011-11-01 19:57:50 -07001875
Daniel Vetter4be73782014-01-17 14:39:48 +01001876 if (edp_have_panel_vdd(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001877 return need_to_disable;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001878
Ville Syrjälä25f78f52015-11-16 15:01:04 +01001879 power_domain = intel_display_port_aux_power_domain(intel_encoder);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001880 intel_display_power_get(dev_priv, power_domain);
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001881
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001882 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1883 port_name(intel_dig_port->port));
Keith Packardbd943152011-09-18 23:09:52 -07001884
Daniel Vetter4be73782014-01-17 14:39:48 +01001885 if (!edp_have_panel_power(intel_dp))
1886 wait_panel_power_cycle(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001887
Jesse Barnes453c5422013-03-28 09:55:41 -07001888 pp = ironlake_get_pp_control(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001889 pp |= EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -07001890
Jani Nikulabf13e812013-09-06 07:40:05 +03001891 pp_stat_reg = _pp_stat_reg(intel_dp);
1892 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001893
1894 I915_WRITE(pp_ctrl_reg, pp);
1895 POSTING_READ(pp_ctrl_reg);
1896 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1897 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Keith Packardebf33b12011-09-29 15:53:27 -07001898 /*
1899 * If the panel wasn't on, delay before accessing aux channel
1900 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001901 if (!edp_have_panel_power(intel_dp)) {
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001902 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1903 port_name(intel_dig_port->port));
Keith Packardf01eca22011-09-28 16:48:10 -07001904 msleep(intel_dp->panel_power_up_delay);
Keith Packardf01eca22011-09-28 16:48:10 -07001905 }
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001906
1907 return need_to_disable;
1908}
1909
Ville Syrjälä951468f2014-09-04 14:55:31 +03001910/*
1911 * Must be paired with intel_edp_panel_vdd_off() or
1912 * intel_edp_panel_off().
1913 * Nested calls to these functions are not allowed since
1914 * we drop the lock. Caller must use some higher level
1915 * locking to prevent nested calls from other threads.
1916 */
Daniel Vetterb80d6c72014-03-19 15:54:37 +01001917void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001918{
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001919 bool vdd;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001920
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001921 if (!is_edp(intel_dp))
1922 return;
1923
Ville Syrjälä773538e82014-09-04 14:54:56 +03001924 pps_lock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001925 vdd = edp_panel_vdd_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001926 pps_unlock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001927
Rob Clarke2c719b2014-12-15 13:56:32 -05001928 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001929 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes5d613502011-01-24 17:10:54 -08001930}
1931
Daniel Vetter4be73782014-01-17 14:39:48 +01001932static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001933{
Paulo Zanoni30add222012-10-26 19:05:45 -02001934 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001935 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001936 struct intel_digital_port *intel_dig_port =
1937 dp_to_dig_port(intel_dp);
1938 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1939 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001940 u32 pp;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001941 i915_reg_t pp_stat_reg, pp_ctrl_reg;
Jesse Barnes5d613502011-01-24 17:10:54 -08001942
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001943 lockdep_assert_held(&dev_priv->pps_mutex);
Daniel Vettera0e99e62012-12-02 01:05:46 +01001944
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001945 WARN_ON(intel_dp->want_panel_vdd);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001946
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001947 if (!edp_have_panel_vdd(intel_dp))
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001948 return;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001949
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001950 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1951 port_name(intel_dig_port->port));
Jesse Barnes453c5422013-03-28 09:55:41 -07001952
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001953 pp = ironlake_get_pp_control(intel_dp);
1954 pp &= ~EDP_FORCE_VDD;
Jesse Barnes453c5422013-03-28 09:55:41 -07001955
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001956 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1957 pp_stat_reg = _pp_stat_reg(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001958
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001959 I915_WRITE(pp_ctrl_reg, pp);
1960 POSTING_READ(pp_ctrl_reg);
Paulo Zanoni90791a52013-12-06 17:32:42 -02001961
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001962 /* Make sure sequencer is idle before allowing subsequent activity */
1963 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1964 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001965
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001966 if ((pp & POWER_TARGET_ON) == 0)
1967 intel_dp->last_power_cycle = jiffies;
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001968
Ville Syrjälä25f78f52015-11-16 15:01:04 +01001969 power_domain = intel_display_port_aux_power_domain(intel_encoder);
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001970 intel_display_power_put(dev_priv, power_domain);
Keith Packardbd943152011-09-18 23:09:52 -07001971}
1972
Daniel Vetter4be73782014-01-17 14:39:48 +01001973static void edp_panel_vdd_work(struct work_struct *__work)
Keith Packardbd943152011-09-18 23:09:52 -07001974{
1975 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1976 struct intel_dp, panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001977
Ville Syrjälä773538e82014-09-04 14:54:56 +03001978 pps_lock(intel_dp);
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001979 if (!intel_dp->want_panel_vdd)
1980 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001981 pps_unlock(intel_dp);
Keith Packardbd943152011-09-18 23:09:52 -07001982}
1983
Imre Deakaba86892014-07-30 15:57:31 +03001984static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1985{
1986 unsigned long delay;
1987
1988 /*
1989 * Queue the timer to fire a long time from now (relative to the power
1990 * down delay) to keep the panel power up across a sequence of
1991 * operations.
1992 */
1993 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1994 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1995}
1996
Ville Syrjälä951468f2014-09-04 14:55:31 +03001997/*
1998 * Must be paired with edp_panel_vdd_on().
1999 * Must hold pps_mutex around the whole on/off sequence.
2000 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2001 */
Daniel Vetter4be73782014-01-17 14:39:48 +01002002static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
Keith Packardbd943152011-09-18 23:09:52 -07002003{
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002004 struct drm_i915_private *dev_priv =
2005 intel_dp_to_dev(intel_dp)->dev_private;
2006
2007 lockdep_assert_held(&dev_priv->pps_mutex);
2008
Keith Packard97af61f572011-09-28 16:23:51 -07002009 if (!is_edp(intel_dp))
2010 return;
Jesse Barnes5d613502011-01-24 17:10:54 -08002011
Rob Clarke2c719b2014-12-15 13:56:32 -05002012 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002013 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packardf2e8b182011-11-01 20:01:35 -07002014
Keith Packardbd943152011-09-18 23:09:52 -07002015 intel_dp->want_panel_vdd = false;
2016
Imre Deakaba86892014-07-30 15:57:31 +03002017 if (sync)
Daniel Vetter4be73782014-01-17 14:39:48 +01002018 edp_panel_vdd_off_sync(intel_dp);
Imre Deakaba86892014-07-30 15:57:31 +03002019 else
2020 edp_panel_vdd_schedule_off(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08002021}
2022
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002023static void edp_panel_on(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07002024{
Paulo Zanoni30add222012-10-26 19:05:45 -02002025 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002026 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packard99ea7122011-11-01 19:57:50 -07002027 u32 pp;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02002028 i915_reg_t pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07002029
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002030 lockdep_assert_held(&dev_priv->pps_mutex);
2031
Keith Packard97af61f572011-09-28 16:23:51 -07002032 if (!is_edp(intel_dp))
Keith Packardbd943152011-09-18 23:09:52 -07002033 return;
Keith Packard99ea7122011-11-01 19:57:50 -07002034
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002035 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2036 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packard99ea7122011-11-01 19:57:50 -07002037
Ville Syrjäläe7a89ac2014-10-16 21:30:07 +03002038 if (WARN(edp_have_panel_power(intel_dp),
2039 "eDP port %c panel power already on\n",
2040 port_name(dp_to_dig_port(intel_dp)->port)))
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002041 return;
Jesse Barnes9934c132010-07-22 13:18:19 -07002042
Daniel Vetter4be73782014-01-17 14:39:48 +01002043 wait_panel_power_cycle(intel_dp);
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002044
Jani Nikulabf13e812013-09-06 07:40:05 +03002045 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002046 pp = ironlake_get_pp_control(intel_dp);
Keith Packard05ce1a42011-09-29 16:33:01 -07002047 if (IS_GEN5(dev)) {
2048 /* ILK workaround: disable reset around power sequence */
2049 pp &= ~PANEL_POWER_RESET;
Jani Nikulabf13e812013-09-06 07:40:05 +03002050 I915_WRITE(pp_ctrl_reg, pp);
2051 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07002052 }
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002053
Keith Packard1c0ae802011-09-19 13:59:29 -07002054 pp |= POWER_TARGET_ON;
Keith Packard99ea7122011-11-01 19:57:50 -07002055 if (!IS_GEN5(dev))
2056 pp |= PANEL_POWER_RESET;
2057
Jesse Barnes453c5422013-03-28 09:55:41 -07002058 I915_WRITE(pp_ctrl_reg, pp);
2059 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07002060
Daniel Vetter4be73782014-01-17 14:39:48 +01002061 wait_panel_on(intel_dp);
Paulo Zanonidce56b32013-12-19 14:29:40 -02002062 intel_dp->last_power_on = jiffies;
Jesse Barnes9934c132010-07-22 13:18:19 -07002063
Keith Packard05ce1a42011-09-29 16:33:01 -07002064 if (IS_GEN5(dev)) {
2065 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
Jani Nikulabf13e812013-09-06 07:40:05 +03002066 I915_WRITE(pp_ctrl_reg, pp);
2067 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07002068 }
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002069}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002070
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002071void intel_edp_panel_on(struct intel_dp *intel_dp)
2072{
2073 if (!is_edp(intel_dp))
2074 return;
2075
2076 pps_lock(intel_dp);
2077 edp_panel_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002078 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002079}
2080
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002081
2082static void edp_panel_off(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07002083{
Imre Deak4e6e1a52014-03-27 17:45:11 +02002084 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2085 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanoni30add222012-10-26 19:05:45 -02002086 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002087 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02002088 enum intel_display_power_domain power_domain;
Keith Packard99ea7122011-11-01 19:57:50 -07002089 u32 pp;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02002090 i915_reg_t pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07002091
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002092 lockdep_assert_held(&dev_priv->pps_mutex);
2093
Keith Packard97af61f572011-09-28 16:23:51 -07002094 if (!is_edp(intel_dp))
2095 return;
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002096
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002097 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2098 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002099
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002100 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2101 port_name(dp_to_dig_port(intel_dp)->port));
Jani Nikula24f3e092014-03-17 16:43:36 +02002102
Jesse Barnes453c5422013-03-28 09:55:41 -07002103 pp = ironlake_get_pp_control(intel_dp);
Daniel Vetter35a38552012-08-12 22:17:14 +02002104 /* We need to switch off panel power _and_ force vdd, for otherwise some
2105 * panels get very unhappy and cease to work. */
Patrik Jakobssonb3064152014-03-04 00:42:44 +01002106 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2107 EDP_BLC_ENABLE);
Jesse Barnes453c5422013-03-28 09:55:41 -07002108
Jani Nikulabf13e812013-09-06 07:40:05 +03002109 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002110
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002111 intel_dp->want_panel_vdd = false;
2112
Jesse Barnes453c5422013-03-28 09:55:41 -07002113 I915_WRITE(pp_ctrl_reg, pp);
2114 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07002115
Paulo Zanonidce56b32013-12-19 14:29:40 -02002116 intel_dp->last_power_cycle = jiffies;
Daniel Vetter4be73782014-01-17 14:39:48 +01002117 wait_panel_off(intel_dp);
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002118
2119 /* We got a reference when we enabled the VDD. */
Ville Syrjälä25f78f52015-11-16 15:01:04 +01002120 power_domain = intel_display_port_aux_power_domain(intel_encoder);
Imre Deak4e6e1a52014-03-27 17:45:11 +02002121 intel_display_power_put(dev_priv, power_domain);
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002122}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002123
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002124void intel_edp_panel_off(struct intel_dp *intel_dp)
2125{
2126 if (!is_edp(intel_dp))
2127 return;
2128
2129 pps_lock(intel_dp);
2130 edp_panel_off(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002131 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002132}
2133
Jani Nikula1250d102014-08-12 17:11:39 +03002134/* Enable backlight in the panel power control. */
2135static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002136{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002137 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2138 struct drm_device *dev = intel_dig_port->base.base.dev;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002139 struct drm_i915_private *dev_priv = dev->dev_private;
2140 u32 pp;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02002141 i915_reg_t pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002142
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07002143 /*
2144 * If we enable the backlight right away following a panel power
2145 * on, we may see slight flicker as the panel syncs with the eDP
2146 * link. So delay a bit to make sure the image is solid before
2147 * allowing it to appear.
2148 */
Daniel Vetter4be73782014-01-17 14:39:48 +01002149 wait_backlight_on(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002150
Ville Syrjälä773538e82014-09-04 14:54:56 +03002151 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002152
Jesse Barnes453c5422013-03-28 09:55:41 -07002153 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002154 pp |= EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002155
Jani Nikulabf13e812013-09-06 07:40:05 +03002156 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002157
2158 I915_WRITE(pp_ctrl_reg, pp);
2159 POSTING_READ(pp_ctrl_reg);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002160
Ville Syrjälä773538e82014-09-04 14:54:56 +03002161 pps_unlock(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002162}
2163
Jani Nikula1250d102014-08-12 17:11:39 +03002164/* Enable backlight PWM and backlight PP control. */
2165void intel_edp_backlight_on(struct intel_dp *intel_dp)
2166{
2167 if (!is_edp(intel_dp))
2168 return;
2169
2170 DRM_DEBUG_KMS("\n");
2171
2172 intel_panel_enable_backlight(intel_dp->attached_connector);
2173 _intel_edp_backlight_on(intel_dp);
2174}
2175
2176/* Disable backlight in the panel power control. */
2177static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002178{
Paulo Zanoni30add222012-10-26 19:05:45 -02002179 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002180 struct drm_i915_private *dev_priv = dev->dev_private;
2181 u32 pp;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02002182 i915_reg_t pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002183
Keith Packardf01eca22011-09-28 16:48:10 -07002184 if (!is_edp(intel_dp))
2185 return;
2186
Ville Syrjälä773538e82014-09-04 14:54:56 +03002187 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002188
Jesse Barnes453c5422013-03-28 09:55:41 -07002189 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002190 pp &= ~EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002191
Jani Nikulabf13e812013-09-06 07:40:05 +03002192 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002193
2194 I915_WRITE(pp_ctrl_reg, pp);
2195 POSTING_READ(pp_ctrl_reg);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002196
Ville Syrjälä773538e82014-09-04 14:54:56 +03002197 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002198
Paulo Zanonidce56b32013-12-19 14:29:40 -02002199 intel_dp->last_backlight_off = jiffies;
Jesse Barnesf7d23232014-03-31 11:13:56 -07002200 edp_wait_backlight_off(intel_dp);
Jani Nikula1250d102014-08-12 17:11:39 +03002201}
Jesse Barnesf7d23232014-03-31 11:13:56 -07002202
Jani Nikula1250d102014-08-12 17:11:39 +03002203/* Disable backlight PP control and backlight PWM. */
2204void intel_edp_backlight_off(struct intel_dp *intel_dp)
2205{
2206 if (!is_edp(intel_dp))
2207 return;
2208
2209 DRM_DEBUG_KMS("\n");
2210
2211 _intel_edp_backlight_off(intel_dp);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002212 intel_panel_disable_backlight(intel_dp->attached_connector);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002213}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002214
Jani Nikula73580fb72014-08-12 17:11:41 +03002215/*
2216 * Hook for controlling the panel power control backlight through the bl_power
2217 * sysfs attribute. Take care to handle multiple calls.
2218 */
2219static void intel_edp_backlight_power(struct intel_connector *connector,
2220 bool enable)
2221{
2222 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002223 bool is_enabled;
2224
Ville Syrjälä773538e82014-09-04 14:54:56 +03002225 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002226 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002227 pps_unlock(intel_dp);
Jani Nikula73580fb72014-08-12 17:11:41 +03002228
2229 if (is_enabled == enable)
2230 return;
2231
Jani Nikula23ba9372014-08-27 14:08:43 +03002232 DRM_DEBUG_KMS("panel power control backlight %s\n",
2233 enable ? "enable" : "disable");
Jani Nikula73580fb72014-08-12 17:11:41 +03002234
2235 if (enable)
2236 _intel_edp_backlight_on(intel_dp);
2237 else
2238 _intel_edp_backlight_off(intel_dp);
2239}
2240
Ville Syrjälä64e10772015-10-29 21:26:01 +02002241static const char *state_string(bool enabled)
2242{
2243 return enabled ? "on" : "off";
2244}
2245
2246static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2247{
2248 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2249 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2250 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2251
2252 I915_STATE_WARN(cur_state != state,
2253 "DP port %c state assertion failure (expected %s, current %s)\n",
2254 port_name(dig_port->port),
2255 state_string(state), state_string(cur_state));
2256}
2257#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2258
2259static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2260{
2261 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2262
2263 I915_STATE_WARN(cur_state != state,
2264 "eDP PLL state assertion failure (expected %s, current %s)\n",
2265 state_string(state), state_string(cur_state));
2266}
2267#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2268#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2269
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002270static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002271{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002272 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä64e10772015-10-29 21:26:01 +02002273 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2274 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesd240f202010-08-13 15:43:26 -07002275
Ville Syrjälä64e10772015-10-29 21:26:01 +02002276 assert_pipe_disabled(dev_priv, crtc->pipe);
2277 assert_dp_port_disabled(intel_dp);
2278 assert_edp_pll_disabled(dev_priv);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002279
Ville Syrjäläabfce942015-10-29 21:26:03 +02002280 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2281 crtc->config->port_clock);
2282
2283 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2284
2285 if (crtc->config->port_clock == 162000)
2286 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2287 else
2288 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2289
2290 I915_WRITE(DP_A, intel_dp->DP);
2291 POSTING_READ(DP_A);
2292 udelay(500);
2293
Daniel Vetter07679352012-09-06 22:15:42 +02002294 intel_dp->DP |= DP_PLL_ENABLE;
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002295
Daniel Vetter07679352012-09-06 22:15:42 +02002296 I915_WRITE(DP_A, intel_dp->DP);
Jesse Barnes298b0b32010-10-07 16:01:24 -07002297 POSTING_READ(DP_A);
2298 udelay(200);
Jesse Barnesd240f202010-08-13 15:43:26 -07002299}
2300
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002301static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002302{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä64e10772015-10-29 21:26:01 +02002304 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2305 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesd240f202010-08-13 15:43:26 -07002306
Ville Syrjälä64e10772015-10-29 21:26:01 +02002307 assert_pipe_disabled(dev_priv, crtc->pipe);
2308 assert_dp_port_disabled(intel_dp);
2309 assert_edp_pll_enabled(dev_priv);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002310
Ville Syrjäläabfce942015-10-29 21:26:03 +02002311 DRM_DEBUG_KMS("disabling eDP PLL\n");
2312
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002313 intel_dp->DP &= ~DP_PLL_ENABLE;
Daniel Vetter07679352012-09-06 22:15:42 +02002314
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002315 I915_WRITE(DP_A, intel_dp->DP);
Chris Wilson1af5fa12010-09-08 21:07:28 +01002316 POSTING_READ(DP_A);
Jesse Barnesd240f202010-08-13 15:43:26 -07002317 udelay(200);
2318}
2319
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002320/* If the sink supports it, try to set the power state appropriately */
Paulo Zanonic19b0662012-10-15 15:51:41 -03002321void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002322{
2323 int ret, i;
2324
2325 /* Should have a valid DPCD by this point */
2326 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2327 return;
2328
2329 if (mode != DRM_MODE_DPMS_ON) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002330 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2331 DP_SET_POWER_D3);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002332 } else {
2333 /*
2334 * When turning on, we need to retry for 1ms to give the sink
2335 * time to wake up.
2336 */
2337 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002338 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2339 DP_SET_POWER_D0);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002340 if (ret == 1)
2341 break;
2342 msleep(1);
2343 }
2344 }
Jani Nikulaf9cac722014-09-02 16:33:52 +03002345
2346 if (ret != 1)
2347 DRM_DEBUG_KMS("failed to %s sink power state\n",
2348 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002349}
2350
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002351static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2352 enum pipe *pipe)
Jesse Barnesd240f202010-08-13 15:43:26 -07002353{
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002354 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03002355 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002356 struct drm_device *dev = encoder->base.dev;
2357 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak6d129be2014-03-05 16:20:54 +02002358 enum intel_display_power_domain power_domain;
2359 u32 tmp;
2360
2361 power_domain = intel_display_port_power_domain(encoder);
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002362 if (!intel_display_power_is_enabled(dev_priv, power_domain))
Imre Deak6d129be2014-03-05 16:20:54 +02002363 return false;
2364
2365 tmp = I915_READ(intel_dp->output_reg);
Jesse Barnesd240f202010-08-13 15:43:26 -07002366
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002367 if (!(tmp & DP_PORT_EN))
2368 return false;
2369
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002370 if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002371 *pipe = PORT_TO_PIPE_CPT(tmp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002372 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002373 enum pipe p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002374
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002375 for_each_pipe(dev_priv, p) {
2376 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2377 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2378 *pipe = p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002379 return true;
2380 }
2381 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002382
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002383 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02002384 i915_mmio_reg_offset(intel_dp->output_reg));
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002385 } else if (IS_CHERRYVIEW(dev)) {
2386 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2387 } else {
2388 *pipe = PORT_TO_PIPE(tmp);
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002389 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002390
2391 return true;
2392}
2393
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002394static void intel_dp_get_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02002395 struct intel_crtc_state *pipe_config)
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002396{
2397 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002398 u32 tmp, flags = 0;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002399 struct drm_device *dev = encoder->base.dev;
2400 struct drm_i915_private *dev_priv = dev->dev_private;
2401 enum port port = dp_to_dig_port(intel_dp)->port;
2402 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjälä18442d02013-09-13 16:00:08 +03002403 int dotclock;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002404
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002405 tmp = I915_READ(intel_dp->output_reg);
Jani Nikula9fcb1702015-05-05 16:32:12 +03002406
2407 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002408
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002409 if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläb81e34c2015-07-06 15:10:03 +03002410 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2411
2412 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
Xiong Zhang63000ef2013-06-28 12:59:06 +08002413 flags |= DRM_MODE_FLAG_PHSYNC;
2414 else
2415 flags |= DRM_MODE_FLAG_NHSYNC;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002416
Ville Syrjäläb81e34c2015-07-06 15:10:03 +03002417 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
Xiong Zhang63000ef2013-06-28 12:59:06 +08002418 flags |= DRM_MODE_FLAG_PVSYNC;
2419 else
2420 flags |= DRM_MODE_FLAG_NVSYNC;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002421 } else {
2422 if (tmp & DP_SYNC_HS_HIGH)
2423 flags |= DRM_MODE_FLAG_PHSYNC;
2424 else
2425 flags |= DRM_MODE_FLAG_NHSYNC;
2426
2427 if (tmp & DP_SYNC_VS_HIGH)
2428 flags |= DRM_MODE_FLAG_PVSYNC;
2429 else
2430 flags |= DRM_MODE_FLAG_NVSYNC;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002431 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002432
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002433 pipe_config->base.adjusted_mode.flags |= flags;
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002434
Ville Syrjälä8c875fc2014-09-12 15:46:29 +03002435 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
Wayne Boyer666a4532015-12-09 12:29:35 -08002436 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
Ville Syrjälä8c875fc2014-09-12 15:46:29 +03002437 pipe_config->limited_color_range = true;
2438
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002439 pipe_config->has_dp_encoder = true;
2440
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03002441 pipe_config->lane_count =
2442 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2443
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002444 intel_dp_get_m_n(crtc, pipe_config);
2445
Ville Syrjälä18442d02013-09-13 16:00:08 +03002446 if (port == PORT_A) {
Ville Syrjäläb377e0d2015-10-29 21:25:59 +02002447 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002448 pipe_config->port_clock = 162000;
2449 else
2450 pipe_config->port_clock = 270000;
2451 }
Ville Syrjälä18442d02013-09-13 16:00:08 +03002452
2453 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2454 &pipe_config->dp_m_n);
2455
2456 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2457 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2458
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002459 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
Daniel Vetter7f16e5c2013-11-04 16:28:47 +01002460
Jani Nikulac6cd2ee2013-10-21 10:52:07 +03002461 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2462 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2463 /*
2464 * This is a big fat ugly hack.
2465 *
2466 * Some machines in UEFI boot mode provide us a VBT that has 18
2467 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2468 * unknown we fail to light up. Yet the same BIOS boots up with
2469 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2470 * max, not what it tells us to use.
2471 *
2472 * Note: This will still be broken if the eDP panel is not lit
2473 * up by the BIOS, and thus we can't get the mode at module
2474 * load.
2475 */
2476 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2477 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2478 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2479 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002480}
2481
Daniel Vettere8cb4552012-07-01 13:05:48 +02002482static void intel_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002483{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002484 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002485 struct drm_device *dev = encoder->base.dev;
Jani Nikula495a5bb2014-10-27 16:26:55 +02002486 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2487
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002488 if (crtc->config->has_audio)
Jani Nikula495a5bb2014-10-27 16:26:55 +02002489 intel_audio_codec_disable(encoder);
Daniel Vetter6cb49832012-05-20 17:14:50 +02002490
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002491 if (HAS_PSR(dev) && !HAS_DDI(dev))
2492 intel_psr_disable(intel_dp);
2493
Daniel Vetter6cb49832012-05-20 17:14:50 +02002494 /* Make sure the panel is off before trying to change the mode. But also
2495 * ensure that we have vdd while we switch off the panel. */
Jani Nikula24f3e092014-03-17 16:43:36 +02002496 intel_edp_panel_vdd_on(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01002497 intel_edp_backlight_off(intel_dp);
Jani Nikulafdbc3b12013-11-12 17:10:13 +02002498 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
Daniel Vetter4be73782014-01-17 14:39:48 +01002499 intel_edp_panel_off(intel_dp);
Daniel Vetter37398502012-09-06 22:15:44 +02002500
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002501 /* disable the port before the pipe on g4x */
2502 if (INTEL_INFO(dev)->gen < 5)
Daniel Vetter37398502012-09-06 22:15:44 +02002503 intel_dp_link_down(intel_dp);
Jesse Barnesd240f202010-08-13 15:43:26 -07002504}
2505
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002506static void ilk_post_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002507{
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002508 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002509 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002510
Ville Syrjälä49277c32014-03-31 18:21:26 +03002511 intel_dp_link_down(intel_dp);
Ville Syrjäläabfce942015-10-29 21:26:03 +02002512
2513 /* Only ilk+ has port A */
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002514 if (port == PORT_A)
2515 ironlake_edp_pll_off(intel_dp);
Ville Syrjälä49277c32014-03-31 18:21:26 +03002516}
2517
2518static void vlv_post_disable_dp(struct intel_encoder *encoder)
2519{
2520 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2521
2522 intel_dp_link_down(intel_dp);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002523}
2524
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002525static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2526 bool reset)
2527{
2528 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2529 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2530 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2531 enum pipe pipe = crtc->pipe;
2532 uint32_t val;
2533
2534 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2535 if (reset)
2536 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2537 else
2538 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2539 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2540
2541 if (crtc->config->lane_count > 2) {
2542 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2543 if (reset)
2544 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2545 else
2546 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2547 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2548 }
2549
2550 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2551 val |= CHV_PCS_REQ_SOFTRESET_EN;
2552 if (reset)
2553 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2554 else
2555 val |= DPIO_PCS_CLK_SOFT_RESET;
2556 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2557
2558 if (crtc->config->lane_count > 2) {
2559 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2560 val |= CHV_PCS_REQ_SOFTRESET_EN;
2561 if (reset)
2562 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2563 else
2564 val |= DPIO_PCS_CLK_SOFT_RESET;
2565 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2566 }
2567}
2568
Ville Syrjälä580d3812014-04-09 13:29:00 +03002569static void chv_post_disable_dp(struct intel_encoder *encoder)
2570{
2571 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002572 struct drm_device *dev = encoder->base.dev;
2573 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä580d3812014-04-09 13:29:00 +03002574
2575 intel_dp_link_down(intel_dp);
2576
Ville Syrjäläa5805162015-05-26 20:42:30 +03002577 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002578
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002579 /* Assert data lane reset */
2580 chv_data_lane_soft_reset(encoder, true);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002581
Ville Syrjäläa5805162015-05-26 20:42:30 +03002582 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002583}
2584
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002585static void
2586_intel_dp_set_link_train(struct intel_dp *intel_dp,
2587 uint32_t *DP,
2588 uint8_t dp_train_pat)
2589{
2590 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2591 struct drm_device *dev = intel_dig_port->base.base.dev;
2592 struct drm_i915_private *dev_priv = dev->dev_private;
2593 enum port port = intel_dig_port->port;
2594
2595 if (HAS_DDI(dev)) {
2596 uint32_t temp = I915_READ(DP_TP_CTL(port));
2597
2598 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2599 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2600 else
2601 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2602
2603 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2604 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2605 case DP_TRAINING_PATTERN_DISABLE:
2606 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2607
2608 break;
2609 case DP_TRAINING_PATTERN_1:
2610 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2611 break;
2612 case DP_TRAINING_PATTERN_2:
2613 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2614 break;
2615 case DP_TRAINING_PATTERN_3:
2616 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2617 break;
2618 }
2619 I915_WRITE(DP_TP_CTL(port), temp);
2620
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002621 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2622 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002623 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2624
2625 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2626 case DP_TRAINING_PATTERN_DISABLE:
2627 *DP |= DP_LINK_TRAIN_OFF_CPT;
2628 break;
2629 case DP_TRAINING_PATTERN_1:
2630 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2631 break;
2632 case DP_TRAINING_PATTERN_2:
2633 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2634 break;
2635 case DP_TRAINING_PATTERN_3:
2636 DRM_ERROR("DP training pattern 3 not supported\n");
2637 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2638 break;
2639 }
2640
2641 } else {
2642 if (IS_CHERRYVIEW(dev))
2643 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2644 else
2645 *DP &= ~DP_LINK_TRAIN_MASK;
2646
2647 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2648 case DP_TRAINING_PATTERN_DISABLE:
2649 *DP |= DP_LINK_TRAIN_OFF;
2650 break;
2651 case DP_TRAINING_PATTERN_1:
2652 *DP |= DP_LINK_TRAIN_PAT_1;
2653 break;
2654 case DP_TRAINING_PATTERN_2:
2655 *DP |= DP_LINK_TRAIN_PAT_2;
2656 break;
2657 case DP_TRAINING_PATTERN_3:
2658 if (IS_CHERRYVIEW(dev)) {
2659 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2660 } else {
2661 DRM_ERROR("DP training pattern 3 not supported\n");
2662 *DP |= DP_LINK_TRAIN_PAT_2;
2663 }
2664 break;
2665 }
2666 }
2667}
2668
2669static void intel_dp_enable_port(struct intel_dp *intel_dp)
2670{
2671 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2672 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002673 struct intel_crtc *crtc =
2674 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002675
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002676 /* enable with pattern 1 (as per spec) */
2677 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2678 DP_TRAINING_PATTERN_1);
2679
2680 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2681 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b713f52014-10-16 21:27:35 +03002682
2683 /*
2684 * Magic for VLV/CHV. We _must_ first set up the register
2685 * without actually enabling the port, and then do another
2686 * write to enable the port. Otherwise link training will
2687 * fail when the power sequencer is freshly used for this port.
2688 */
2689 intel_dp->DP |= DP_PORT_EN;
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002690 if (crtc->config->has_audio)
2691 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
Ville Syrjälä7b713f52014-10-16 21:27:35 +03002692
2693 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2694 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002695}
2696
Daniel Vettere8cb4552012-07-01 13:05:48 +02002697static void intel_enable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002698{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002699 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2700 struct drm_device *dev = encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002701 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulac1dec792014-10-27 16:26:56 +02002702 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Chris Wilsonea5b2132010-08-04 13:50:23 +01002703 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002704 enum port port = dp_to_dig_port(intel_dp)->port;
2705 enum pipe pipe = crtc->pipe;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002706
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02002707 if (WARN_ON(dp_reg & DP_PORT_EN))
2708 return;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002709
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002710 pps_lock(intel_dp);
2711
Wayne Boyer666a4532015-12-09 12:29:35 -08002712 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002713 vlv_init_panel_power_sequencer(intel_dp);
2714
Ville Syrjälä78645782015-11-20 22:09:19 +02002715 /*
2716 * We get an occasional spurious underrun between the port
2717 * enable and vdd enable, when enabling port A eDP.
2718 *
2719 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2720 */
2721 if (port == PORT_A)
2722 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2723
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002724 intel_dp_enable_port(intel_dp);
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002725
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002726 if (port == PORT_A && IS_GEN5(dev_priv)) {
2727 /*
2728 * Underrun reporting for the other pipe was disabled in
2729 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2730 * enabled, so it's now safe to re-enable underrun reporting.
2731 */
2732 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2733 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2734 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2735 }
2736
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002737 edp_panel_vdd_on(intel_dp);
2738 edp_panel_on(intel_dp);
2739 edp_panel_vdd_off(intel_dp, true);
2740
Ville Syrjälä78645782015-11-20 22:09:19 +02002741 if (port == PORT_A)
2742 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2743
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002744 pps_unlock(intel_dp);
2745
Wayne Boyer666a4532015-12-09 12:29:35 -08002746 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002747 unsigned int lane_mask = 0x0;
2748
2749 if (IS_CHERRYVIEW(dev))
2750 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2751
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03002752 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2753 lane_mask);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002754 }
Ville Syrjälä61234fa2014-10-16 21:27:34 +03002755
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002756 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2757 intel_dp_start_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03002758 intel_dp_stop_link_train(intel_dp);
Jani Nikulac1dec792014-10-27 16:26:56 +02002759
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002760 if (crtc->config->has_audio) {
Jani Nikulac1dec792014-10-27 16:26:56 +02002761 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002762 pipe_name(pipe));
Jani Nikulac1dec792014-10-27 16:26:56 +02002763 intel_audio_codec_enable(encoder);
2764 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002765}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002766
Jani Nikulaecff4f32013-09-06 07:38:29 +03002767static void g4x_enable_dp(struct intel_encoder *encoder)
2768{
Jani Nikula828f5c62013-09-05 16:44:45 +03002769 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2770
Jani Nikulaecff4f32013-09-06 07:38:29 +03002771 intel_enable_dp(encoder);
Daniel Vetter4be73782014-01-17 14:39:48 +01002772 intel_edp_backlight_on(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002773}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002774
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002775static void vlv_enable_dp(struct intel_encoder *encoder)
2776{
Jani Nikula828f5c62013-09-05 16:44:45 +03002777 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2778
Daniel Vetter4be73782014-01-17 14:39:48 +01002779 intel_edp_backlight_on(intel_dp);
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002780 intel_psr_enable(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002781}
2782
Jani Nikulaecff4f32013-09-06 07:38:29 +03002783static void g4x_pre_enable_dp(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002784{
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002785 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002786 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002787 enum port port = dp_to_dig_port(intel_dp)->port;
2788 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002789
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002790 intel_dp_prepare(encoder);
2791
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002792 if (port == PORT_A && IS_GEN5(dev_priv)) {
2793 /*
2794 * We get FIFO underruns on the other pipe when
2795 * enabling the CPU eDP PLL, and when enabling CPU
2796 * eDP port. We could potentially avoid the PLL
2797 * underrun with a vblank wait just prior to enabling
2798 * the PLL, but that doesn't appear to help the port
2799 * enable case. Just sweep it all under the rug.
2800 */
2801 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2802 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2803 }
2804
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002805 /* Only ilk+ has port A */
Ville Syrjäläabfce942015-10-29 21:26:03 +02002806 if (port == PORT_A)
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002807 ironlake_edp_pll_on(intel_dp);
2808}
2809
Ville Syrjälä83b84592014-10-16 21:29:51 +03002810static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2811{
2812 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2813 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2814 enum pipe pipe = intel_dp->pps_pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02002815 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
Ville Syrjälä83b84592014-10-16 21:29:51 +03002816
2817 edp_panel_vdd_off_sync(intel_dp);
2818
2819 /*
2820 * VLV seems to get confused when multiple power seqeuencers
2821 * have the same port selected (even if only one has power/vdd
2822 * enabled). The failure manifests as vlv_wait_port_ready() failing
2823 * CHV on the other hand doesn't seem to mind having the same port
2824 * selected in multiple power seqeuencers, but let's clear the
2825 * port select always when logically disconnecting a power sequencer
2826 * from a port.
2827 */
2828 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2829 pipe_name(pipe), port_name(intel_dig_port->port));
2830 I915_WRITE(pp_on_reg, 0);
2831 POSTING_READ(pp_on_reg);
2832
2833 intel_dp->pps_pipe = INVALID_PIPE;
2834}
2835
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002836static void vlv_steal_power_sequencer(struct drm_device *dev,
2837 enum pipe pipe)
2838{
2839 struct drm_i915_private *dev_priv = dev->dev_private;
2840 struct intel_encoder *encoder;
2841
2842 lockdep_assert_held(&dev_priv->pps_mutex);
2843
Ville Syrjäläac3c12e2014-10-16 21:29:56 +03002844 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2845 return;
2846
Jani Nikula19c80542015-12-16 12:48:16 +02002847 for_each_intel_encoder(dev, encoder) {
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002848 struct intel_dp *intel_dp;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002849 enum port port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002850
2851 if (encoder->type != INTEL_OUTPUT_EDP)
2852 continue;
2853
2854 intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002855 port = dp_to_dig_port(intel_dp)->port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002856
2857 if (intel_dp->pps_pipe != pipe)
2858 continue;
2859
2860 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
Ville Syrjälä773538e82014-09-04 14:54:56 +03002861 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002862
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02002863 WARN(encoder->base.crtc,
Ville Syrjälä034e43c2014-10-16 21:27:28 +03002864 "stealing pipe %c power sequencer from active eDP port %c\n",
2865 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002866
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002867 /* make sure vdd is off before we steal it */
Ville Syrjälä83b84592014-10-16 21:29:51 +03002868 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002869 }
2870}
2871
2872static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2873{
2874 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2875 struct intel_encoder *encoder = &intel_dig_port->base;
2876 struct drm_device *dev = encoder->base.dev;
2877 struct drm_i915_private *dev_priv = dev->dev_private;
2878 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002879
2880 lockdep_assert_held(&dev_priv->pps_mutex);
2881
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002882 if (!is_edp(intel_dp))
2883 return;
2884
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002885 if (intel_dp->pps_pipe == crtc->pipe)
2886 return;
2887
2888 /*
2889 * If another power sequencer was being used on this
2890 * port previously make sure to turn off vdd there while
2891 * we still have control of it.
2892 */
2893 if (intel_dp->pps_pipe != INVALID_PIPE)
Ville Syrjälä83b84592014-10-16 21:29:51 +03002894 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002895
2896 /*
2897 * We may be stealing the power
2898 * sequencer from another port.
2899 */
2900 vlv_steal_power_sequencer(dev, crtc->pipe);
2901
2902 /* now it's all ours */
2903 intel_dp->pps_pipe = crtc->pipe;
2904
2905 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2906 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2907
2908 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03002909 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2910 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002911}
2912
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002913static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2914{
2915 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2916 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Jesse Barnesb2634012013-03-28 09:55:40 -07002917 struct drm_device *dev = encoder->base.dev;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002918 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002919 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002920 enum dpio_channel port = vlv_dport_to_channel(dport);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002921 int pipe = intel_crtc->pipe;
2922 u32 val;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002923
Ville Syrjäläa5805162015-05-26 20:42:30 +03002924 mutex_lock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002925
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002926 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002927 val = 0;
2928 if (pipe)
2929 val |= (1<<21);
2930 else
2931 val &= ~(1<<21);
2932 val |= 0x001000c4;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002933 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2934 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2935 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002936
Ville Syrjäläa5805162015-05-26 20:42:30 +03002937 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002938
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002939 intel_enable_dp(encoder);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002940}
2941
Jani Nikulaecff4f32013-09-06 07:38:29 +03002942static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
Jesse Barnes89b667f2013-04-18 14:51:36 -07002943{
2944 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2945 struct drm_device *dev = encoder->base.dev;
2946 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002947 struct intel_crtc *intel_crtc =
2948 to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002949 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002950 int pipe = intel_crtc->pipe;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002951
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002952 intel_dp_prepare(encoder);
2953
Jesse Barnes89b667f2013-04-18 14:51:36 -07002954 /* Program Tx lane resets to default */
Ville Syrjäläa5805162015-05-26 20:42:30 +03002955 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002956 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002957 DPIO_PCS_TX_LANE2_RESET |
2958 DPIO_PCS_TX_LANE1_RESET);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002959 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002960 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2961 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2962 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2963 DPIO_PCS_CLK_SOFT_RESET);
2964
2965 /* Fix up inter-pair skew failure */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002966 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2967 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2968 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03002969 mutex_unlock(&dev_priv->sb_lock);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002970}
2971
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002972static void chv_pre_enable_dp(struct intel_encoder *encoder)
2973{
2974 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2975 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2976 struct drm_device *dev = encoder->base.dev;
2977 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002978 struct intel_crtc *intel_crtc =
2979 to_intel_crtc(encoder->base.crtc);
2980 enum dpio_channel ch = vlv_dport_to_channel(dport);
2981 int pipe = intel_crtc->pipe;
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002982 int data, i, stagger;
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002983 u32 val;
2984
Ville Syrjäläa5805162015-05-26 20:42:30 +03002985 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002986
Ville Syrjälä570e2a72014-08-18 14:42:46 +03002987 /* allow hardware to manage TX FIFO reset source */
2988 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2989 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2990 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2991
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002992 if (intel_crtc->config->lane_count > 2) {
2993 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2994 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2995 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2996 }
Ville Syrjälä570e2a72014-08-18 14:42:46 +03002997
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002998 /* Program Tx lane latency optimal setting*/
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002999 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003000 /* Set the upar bit */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003001 if (intel_crtc->config->lane_count == 1)
3002 data = 0x0;
3003 else
3004 data = (i == 1) ? 0x0 : 0x1;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003005 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3006 data << DPIO_UPAR_SHIFT);
3007 }
3008
3009 /* Data lane stagger programming */
Ville Syrjälä2e523e92015-04-10 18:21:27 +03003010 if (intel_crtc->config->port_clock > 270000)
3011 stagger = 0x18;
3012 else if (intel_crtc->config->port_clock > 135000)
3013 stagger = 0xd;
3014 else if (intel_crtc->config->port_clock > 67500)
3015 stagger = 0x7;
3016 else if (intel_crtc->config->port_clock > 33750)
3017 stagger = 0x4;
3018 else
3019 stagger = 0x2;
3020
3021 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3022 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3023 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3024
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003025 if (intel_crtc->config->lane_count > 2) {
3026 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3027 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3028 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3029 }
Ville Syrjälä2e523e92015-04-10 18:21:27 +03003030
3031 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3032 DPIO_LANESTAGGER_STRAP(stagger) |
3033 DPIO_LANESTAGGER_STRAP_OVRD |
3034 DPIO_TX1_STAGGER_MASK(0x1f) |
3035 DPIO_TX1_STAGGER_MULT(6) |
3036 DPIO_TX2_STAGGER_MULT(0));
3037
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003038 if (intel_crtc->config->lane_count > 2) {
3039 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3040 DPIO_LANESTAGGER_STRAP(stagger) |
3041 DPIO_LANESTAGGER_STRAP_OVRD |
3042 DPIO_TX1_STAGGER_MASK(0x1f) |
3043 DPIO_TX1_STAGGER_MULT(7) |
3044 DPIO_TX2_STAGGER_MULT(5));
3045 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003046
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03003047 /* Deassert data lane reset */
3048 chv_data_lane_soft_reset(encoder, false);
3049
Ville Syrjäläa5805162015-05-26 20:42:30 +03003050 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003051
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003052 intel_enable_dp(encoder);
Ville Syrjäläb0b33842015-07-08 23:45:55 +03003053
3054 /* Second common lane will stay alive on its own now */
3055 if (dport->release_cl2_override) {
3056 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3057 dport->release_cl2_override = false;
3058 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003059}
3060
Ville Syrjälä9197c882014-04-09 13:29:05 +03003061static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3062{
3063 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3064 struct drm_device *dev = encoder->base.dev;
3065 struct drm_i915_private *dev_priv = dev->dev_private;
3066 struct intel_crtc *intel_crtc =
3067 to_intel_crtc(encoder->base.crtc);
3068 enum dpio_channel ch = vlv_dport_to_channel(dport);
3069 enum pipe pipe = intel_crtc->pipe;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003070 unsigned int lane_mask =
3071 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
Ville Syrjälä9197c882014-04-09 13:29:05 +03003072 u32 val;
3073
Ville Syrjälä625695f2014-06-28 02:04:02 +03003074 intel_dp_prepare(encoder);
3075
Ville Syrjäläb0b33842015-07-08 23:45:55 +03003076 /*
3077 * Must trick the second common lane into life.
3078 * Otherwise we can't even access the PLL.
3079 */
3080 if (ch == DPIO_CH0 && pipe == PIPE_B)
3081 dport->release_cl2_override =
3082 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3083
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003084 chv_phy_powergate_lanes(encoder, true, lane_mask);
3085
Ville Syrjäläa5805162015-05-26 20:42:30 +03003086 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03003087
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03003088 /* Assert data lane reset */
3089 chv_data_lane_soft_reset(encoder, true);
3090
Ville Syrjäläb9e5ac32014-05-27 16:30:18 +03003091 /* program left/right clock distribution */
3092 if (pipe != PIPE_B) {
3093 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3094 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3095 if (ch == DPIO_CH0)
3096 val |= CHV_BUFLEFTENA1_FORCE;
3097 if (ch == DPIO_CH1)
3098 val |= CHV_BUFRIGHTENA1_FORCE;
3099 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3100 } else {
3101 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3102 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3103 if (ch == DPIO_CH0)
3104 val |= CHV_BUFLEFTENA2_FORCE;
3105 if (ch == DPIO_CH1)
3106 val |= CHV_BUFRIGHTENA2_FORCE;
3107 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3108 }
3109
Ville Syrjälä9197c882014-04-09 13:29:05 +03003110 /* program clock channel usage */
3111 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3112 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3113 if (pipe != PIPE_B)
3114 val &= ~CHV_PCS_USEDCLKCHANNEL;
3115 else
3116 val |= CHV_PCS_USEDCLKCHANNEL;
3117 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3118
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003119 if (intel_crtc->config->lane_count > 2) {
3120 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3121 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3122 if (pipe != PIPE_B)
3123 val &= ~CHV_PCS_USEDCLKCHANNEL;
3124 else
3125 val |= CHV_PCS_USEDCLKCHANNEL;
3126 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3127 }
Ville Syrjälä9197c882014-04-09 13:29:05 +03003128
3129 /*
3130 * This a a bit weird since generally CL
3131 * matches the pipe, but here we need to
3132 * pick the CL based on the port.
3133 */
3134 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3135 if (pipe != PIPE_B)
3136 val &= ~CHV_CMN_USEDCLKCHANNEL;
3137 else
3138 val |= CHV_CMN_USEDCLKCHANNEL;
3139 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3140
Ville Syrjäläa5805162015-05-26 20:42:30 +03003141 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03003142}
3143
Ville Syrjäläd6db9952015-07-08 23:45:49 +03003144static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3145{
3146 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3147 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3148 u32 val;
3149
3150 mutex_lock(&dev_priv->sb_lock);
3151
3152 /* disable left/right clock distribution */
3153 if (pipe != PIPE_B) {
3154 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3155 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3156 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3157 } else {
3158 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3159 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3160 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3161 }
3162
3163 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003164
Ville Syrjäläb0b33842015-07-08 23:45:55 +03003165 /*
3166 * Leave the power down bit cleared for at least one
3167 * lane so that chv_powergate_phy_ch() will power
3168 * on something when the channel is otherwise unused.
3169 * When the port is off and the override is removed
3170 * the lanes power down anyway, so otherwise it doesn't
3171 * really matter what the state of power down bits is
3172 * after this.
3173 */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003174 chv_phy_powergate_lanes(encoder, false, 0x0);
Ville Syrjäläd6db9952015-07-08 23:45:49 +03003175}
3176
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003177/*
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003178 * Native read with retry for link status and receiver capability reads for
3179 * cases where the sink may still be asleep.
Jani Nikula9d1a1032014-03-14 16:51:15 +02003180 *
3181 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3182 * supposed to retry 3 times per the spec.
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003183 */
Jani Nikula9d1a1032014-03-14 16:51:15 +02003184static ssize_t
3185intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3186 void *buffer, size_t size)
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003187{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003188 ssize_t ret;
3189 int i;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003190
Ville Syrjäläf6a19062014-10-16 20:46:09 +03003191 /*
3192 * Sometime we just get the same incorrect byte repeated
3193 * over the entire buffer. Doing just one throw away read
3194 * initially seems to "solve" it.
3195 */
3196 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3197
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003198 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003199 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3200 if (ret == size)
3201 return ret;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003202 msleep(1);
3203 }
3204
Jani Nikula9d1a1032014-03-14 16:51:15 +02003205 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003206}
3207
3208/*
3209 * Fetch AUX CH registers 0x202 - 0x207 which contain
3210 * link status information
3211 */
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003212bool
Keith Packard93f62da2011-11-01 19:45:03 -07003213intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003214{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003215 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3216 DP_LANE0_1_STATUS,
3217 link_status,
3218 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003219}
3220
Paulo Zanoni11002442014-06-13 18:45:41 -03003221/* These are source-specific values. */
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003222uint8_t
Keith Packard1a2eb462011-11-16 16:26:07 -08003223intel_dp_voltage_max(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003224{
Paulo Zanoni30add222012-10-26 19:05:45 -02003225 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303226 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003227 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003228
Vandana Kannan93147262014-11-18 15:45:29 +05303229 if (IS_BROXTON(dev))
3230 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3231 else if (INTEL_INFO(dev)->gen >= 9) {
Sonika Jindal9e458032015-05-06 17:35:48 +05303232 if (dev_priv->edp_low_vswing && port == PORT_A)
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303233 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003234 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Wayne Boyer666a4532015-12-09 12:29:35 -08003235 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
Sonika Jindalbd600182014-08-08 16:23:41 +05303236 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003237 else if (IS_GEN7(dev) && port == PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05303238 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003239 else if (HAS_PCH_CPT(dev) && port != PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05303240 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Keith Packard1a2eb462011-11-16 16:26:07 -08003241 else
Sonika Jindalbd600182014-08-08 16:23:41 +05303242 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Keith Packard1a2eb462011-11-16 16:26:07 -08003243}
3244
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003245uint8_t
Keith Packard1a2eb462011-11-16 16:26:07 -08003246intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3247{
Paulo Zanoni30add222012-10-26 19:05:45 -02003248 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003249 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003250
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003251 if (INTEL_INFO(dev)->gen >= 9) {
3252 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3253 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3254 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3255 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3256 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3257 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3258 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303259 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3260 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003261 default:
3262 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3263 }
3264 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003265 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3267 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3269 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3271 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3272 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003273 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303274 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003275 }
Wayne Boyer666a4532015-12-09 12:29:35 -08003276 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003277 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3279 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3281 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3282 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3283 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3284 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003285 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303286 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003287 }
Imre Deakbc7d38a2013-05-16 14:40:36 +03003288 } else if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08003289 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303290 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3291 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3293 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3294 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Keith Packard1a2eb462011-11-16 16:26:07 -08003295 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303296 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003297 }
3298 } else {
3299 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303300 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3301 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3303 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3305 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packard1a2eb462011-11-16 16:26:07 -08003307 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303308 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003309 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003310 }
3311}
3312
Daniel Vetter5829975c2015-04-16 11:36:52 +02003313static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003314{
3315 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3316 struct drm_i915_private *dev_priv = dev->dev_private;
3317 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003318 struct intel_crtc *intel_crtc =
3319 to_intel_crtc(dport->base.base.crtc);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003320 unsigned long demph_reg_value, preemph_reg_value,
3321 uniqtranscale_reg_value;
3322 uint8_t train_set = intel_dp->train_set[0];
Chon Ming Leee4607fc2013-11-06 14:36:35 +08003323 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003324 int pipe = intel_crtc->pipe;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003325
3326 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303327 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003328 preemph_reg_value = 0x0004000;
3329 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303330 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003331 demph_reg_value = 0x2B405555;
3332 uniqtranscale_reg_value = 0x552AB83A;
3333 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303334 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003335 demph_reg_value = 0x2B404040;
3336 uniqtranscale_reg_value = 0x5548B83A;
3337 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303338 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003339 demph_reg_value = 0x2B245555;
3340 uniqtranscale_reg_value = 0x5560B83A;
3341 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303342 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003343 demph_reg_value = 0x2B405555;
3344 uniqtranscale_reg_value = 0x5598DA3A;
3345 break;
3346 default:
3347 return 0;
3348 }
3349 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303350 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003351 preemph_reg_value = 0x0002000;
3352 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303353 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003354 demph_reg_value = 0x2B404040;
3355 uniqtranscale_reg_value = 0x5552B83A;
3356 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303357 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003358 demph_reg_value = 0x2B404848;
3359 uniqtranscale_reg_value = 0x5580B83A;
3360 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303361 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003362 demph_reg_value = 0x2B404040;
3363 uniqtranscale_reg_value = 0x55ADDA3A;
3364 break;
3365 default:
3366 return 0;
3367 }
3368 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303369 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003370 preemph_reg_value = 0x0000000;
3371 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303372 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003373 demph_reg_value = 0x2B305555;
3374 uniqtranscale_reg_value = 0x5570B83A;
3375 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303376 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003377 demph_reg_value = 0x2B2B4040;
3378 uniqtranscale_reg_value = 0x55ADDA3A;
3379 break;
3380 default:
3381 return 0;
3382 }
3383 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303384 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003385 preemph_reg_value = 0x0006000;
3386 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303387 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003388 demph_reg_value = 0x1B405555;
3389 uniqtranscale_reg_value = 0x55ADDA3A;
3390 break;
3391 default:
3392 return 0;
3393 }
3394 break;
3395 default:
3396 return 0;
3397 }
3398
Ville Syrjäläa5805162015-05-26 20:42:30 +03003399 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003400 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3401 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3402 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003403 uniqtranscale_reg_value);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003404 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3405 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3406 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3407 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03003408 mutex_unlock(&dev_priv->sb_lock);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003409
3410 return 0;
3411}
3412
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003413static bool chv_need_uniq_trans_scale(uint8_t train_set)
3414{
3415 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3416 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3417}
3418
Daniel Vetter5829975c2015-04-16 11:36:52 +02003419static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003420{
3421 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3422 struct drm_i915_private *dev_priv = dev->dev_private;
3423 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3424 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003425 u32 deemph_reg_value, margin_reg_value, val;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003426 uint8_t train_set = intel_dp->train_set[0];
3427 enum dpio_channel ch = vlv_dport_to_channel(dport);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003428 enum pipe pipe = intel_crtc->pipe;
3429 int i;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003430
3431 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303432 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003433 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303434 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003435 deemph_reg_value = 128;
3436 margin_reg_value = 52;
3437 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303438 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003439 deemph_reg_value = 128;
3440 margin_reg_value = 77;
3441 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303442 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003443 deemph_reg_value = 128;
3444 margin_reg_value = 102;
3445 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303446 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003447 deemph_reg_value = 128;
3448 margin_reg_value = 154;
3449 /* FIXME extra to set for 1200 */
3450 break;
3451 default:
3452 return 0;
3453 }
3454 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303455 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003456 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303457 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003458 deemph_reg_value = 85;
3459 margin_reg_value = 78;
3460 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303461 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003462 deemph_reg_value = 85;
3463 margin_reg_value = 116;
3464 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003466 deemph_reg_value = 85;
3467 margin_reg_value = 154;
3468 break;
3469 default:
3470 return 0;
3471 }
3472 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303473 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003474 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303475 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003476 deemph_reg_value = 64;
3477 margin_reg_value = 104;
3478 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303479 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003480 deemph_reg_value = 64;
3481 margin_reg_value = 154;
3482 break;
3483 default:
3484 return 0;
3485 }
3486 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303487 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003488 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003490 deemph_reg_value = 43;
3491 margin_reg_value = 154;
3492 break;
3493 default:
3494 return 0;
3495 }
3496 break;
3497 default:
3498 return 0;
3499 }
3500
Ville Syrjäläa5805162015-05-26 20:42:30 +03003501 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003502
3503 /* Clear calc init */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003504 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3505 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003506 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3507 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
Ville Syrjälä1966e592014-04-09 13:29:04 +03003508 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3509
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003510 if (intel_crtc->config->lane_count > 2) {
3511 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3512 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3513 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3514 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3515 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3516 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003517
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003518 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3519 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3520 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3521 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3522
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003523 if (intel_crtc->config->lane_count > 2) {
3524 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3525 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3526 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3527 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3528 }
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003529
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003530 /* Program swing deemph */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003531 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003532 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3533 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3534 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3535 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3536 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003537
3538 /* Program swing margin */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003539 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003540 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003541
Ville Syrjälä1fb44502014-06-28 02:04:03 +03003542 val &= ~DPIO_SWING_MARGIN000_MASK;
3543 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003544
3545 /*
3546 * Supposedly this value shouldn't matter when unique transition
3547 * scale is disabled, but in fact it does matter. Let's just
3548 * always program the same value and hope it's OK.
3549 */
3550 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3551 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3552
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003553 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3554 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003555
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003556 /*
3557 * The document said it needs to set bit 27 for ch0 and bit 26
3558 * for ch1. Might be a typo in the doc.
3559 * For now, for this unique transition scale selection, set bit
3560 * 27 for ch0 and ch1.
3561 */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003562 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003563 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003564 if (chv_need_uniq_trans_scale(train_set))
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003565 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003566 else
3567 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3568 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003569 }
3570
3571 /* Start swing calculation */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003572 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3573 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3574 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3575
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003576 if (intel_crtc->config->lane_count > 2) {
3577 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3578 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3579 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3580 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003581
Ville Syrjäläa5805162015-05-26 20:42:30 +03003582 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003583
3584 return 0;
3585}
3586
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003587static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003588gen4_signal_levels(uint8_t train_set)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003589{
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003590 uint32_t signal_levels = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003591
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003592 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303593 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003594 default:
3595 signal_levels |= DP_VOLTAGE_0_4;
3596 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303597 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003598 signal_levels |= DP_VOLTAGE_0_6;
3599 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303600 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003601 signal_levels |= DP_VOLTAGE_0_8;
3602 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303603 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003604 signal_levels |= DP_VOLTAGE_1_2;
3605 break;
3606 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003607 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303608 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003609 default:
3610 signal_levels |= DP_PRE_EMPHASIS_0;
3611 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303612 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003613 signal_levels |= DP_PRE_EMPHASIS_3_5;
3614 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303615 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003616 signal_levels |= DP_PRE_EMPHASIS_6;
3617 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303618 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003619 signal_levels |= DP_PRE_EMPHASIS_9_5;
3620 break;
3621 }
3622 return signal_levels;
3623}
3624
Zhenyu Wange3421a12010-04-08 09:43:27 +08003625/* Gen6's DP voltage swing and pre-emphasis control */
3626static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003627gen6_edp_signal_levels(uint8_t train_set)
Zhenyu Wange3421a12010-04-08 09:43:27 +08003628{
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003629 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3630 DP_TRAIN_PRE_EMPHASIS_MASK);
3631 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303632 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3633 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003634 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303635 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003636 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303637 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3638 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003639 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303640 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3641 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003642 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303643 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3644 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003645 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003646 default:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003647 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3648 "0x%x\n", signal_levels);
3649 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003650 }
3651}
3652
Keith Packard1a2eb462011-11-16 16:26:07 -08003653/* Gen7's DP voltage swing and pre-emphasis control */
3654static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003655gen7_edp_signal_levels(uint8_t train_set)
Keith Packard1a2eb462011-11-16 16:26:07 -08003656{
3657 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3658 DP_TRAIN_PRE_EMPHASIS_MASK);
3659 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303660 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003661 return EDP_LINK_TRAIN_400MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303662 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003663 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303664 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packard1a2eb462011-11-16 16:26:07 -08003665 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3666
Sonika Jindalbd600182014-08-08 16:23:41 +05303667 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003668 return EDP_LINK_TRAIN_600MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303669 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003670 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3671
Sonika Jindalbd600182014-08-08 16:23:41 +05303672 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003673 return EDP_LINK_TRAIN_800MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303674 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003675 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3676
3677 default:
3678 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3679 "0x%x\n", signal_levels);
3680 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3681 }
3682}
3683
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003684void
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003685intel_dp_set_signal_levels(struct intel_dp *intel_dp)
Paulo Zanonif0a34242012-12-06 16:51:50 -02003686{
3687 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003688 enum port port = intel_dig_port->port;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003689 struct drm_device *dev = intel_dig_port->base.base.dev;
Ander Conselvan de Oliveirab905a912015-10-23 13:01:47 +03003690 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehallf8896f52015-06-25 11:11:03 +03003691 uint32_t signal_levels, mask = 0;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003692 uint8_t train_set = intel_dp->train_set[0];
3693
David Weinehallf8896f52015-06-25 11:11:03 +03003694 if (HAS_DDI(dev)) {
3695 signal_levels = ddi_signal_levels(intel_dp);
3696
3697 if (IS_BROXTON(dev))
3698 signal_levels = 0;
3699 else
3700 mask = DDI_BUF_EMP_MASK;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003701 } else if (IS_CHERRYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003702 signal_levels = chv_signal_levels(intel_dp);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003703 } else if (IS_VALLEYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003704 signal_levels = vlv_signal_levels(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003705 } else if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003706 signal_levels = gen7_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003707 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003708 } else if (IS_GEN6(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003709 signal_levels = gen6_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003710 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3711 } else {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003712 signal_levels = gen4_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003713 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3714 }
3715
Vandana Kannan96fb9f92014-11-18 15:45:27 +05303716 if (mask)
3717 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3718
3719 DRM_DEBUG_KMS("Using vswing level %d\n",
3720 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3721 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3722 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3723 DP_TRAIN_PRE_EMPHASIS_SHIFT);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003724
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003725 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
Ander Conselvan de Oliveirab905a912015-10-23 13:01:47 +03003726
3727 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3728 POSTING_READ(intel_dp->output_reg);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003729}
3730
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003731void
Ander Conselvan de Oliveirae9c176d2015-10-23 13:01:45 +03003732intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3733 uint8_t dp_train_pat)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003734{
Paulo Zanoni174edf12012-10-26 19:05:50 -02003735 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03003736 struct drm_i915_private *dev_priv =
3737 to_i915(intel_dig_port->base.base.dev);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003738
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003739 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003740
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003741 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
Chris Wilsonea5b2132010-08-04 13:50:23 +01003742 POSTING_READ(intel_dp->output_reg);
Ander Conselvan de Oliveirae9c176d2015-10-23 13:01:45 +03003743}
3744
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003745void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
Imre Deak3ab9c632013-05-03 12:57:41 +03003746{
3747 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3748 struct drm_device *dev = intel_dig_port->base.base.dev;
3749 struct drm_i915_private *dev_priv = dev->dev_private;
3750 enum port port = intel_dig_port->port;
3751 uint32_t val;
3752
3753 if (!HAS_DDI(dev))
3754 return;
3755
3756 val = I915_READ(DP_TP_CTL(port));
3757 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3758 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3759 I915_WRITE(DP_TP_CTL(port), val);
3760
3761 /*
3762 * On PORT_A we can have only eDP in SST mode. There the only reason
3763 * we need to set idle transmission mode is to work around a HW issue
3764 * where we enable the pipe while not in idle link-training mode.
3765 * In this case there is requirement to wait for a minimum number of
3766 * idle patterns to be sent.
3767 */
3768 if (port == PORT_A)
3769 return;
3770
3771 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3772 1))
3773 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3774}
3775
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003776static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01003777intel_dp_link_down(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003778{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003779 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003780 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003781 enum port port = intel_dig_port->port;
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003782 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003783 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonea5b2132010-08-04 13:50:23 +01003784 uint32_t DP = intel_dp->DP;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003785
Daniel Vetterbc76e3202014-05-20 22:46:50 +02003786 if (WARN_ON(HAS_DDI(dev)))
Paulo Zanonic19b0662012-10-15 15:51:41 -03003787 return;
3788
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02003789 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
Chris Wilson1b39d6f2010-12-06 11:20:45 +00003790 return;
3791
Zhao Yakui28c97732009-10-09 11:39:41 +08003792 DRM_DEBUG_KMS("\n");
Zhenyu Wang32f9d652009-07-24 01:00:32 +08003793
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03003794 if ((IS_GEN7(dev) && port == PORT_A) ||
3795 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Zhenyu Wange3421a12010-04-08 09:43:27 +08003796 DP &= ~DP_LINK_TRAIN_MASK_CPT;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003797 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003798 } else {
Ville Syrjäläaad3d142014-06-28 02:04:25 +03003799 if (IS_CHERRYVIEW(dev))
3800 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3801 else
3802 DP &= ~DP_LINK_TRAIN_MASK;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003803 DP |= DP_LINK_TRAIN_PAT_IDLE;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003804 }
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003805 I915_WRITE(intel_dp->output_reg, DP);
Chris Wilsonfe255d02010-09-11 21:37:48 +01003806 POSTING_READ(intel_dp->output_reg);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08003807
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003808 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3809 I915_WRITE(intel_dp->output_reg, DP);
3810 POSTING_READ(intel_dp->output_reg);
3811
3812 /*
3813 * HW workaround for IBX, we need to move the port
3814 * to transcoder A after disabling it to allow the
3815 * matching HDMI port to be enabled on transcoder A.
3816 */
3817 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
Ville Syrjälä0c241d52015-10-30 19:23:22 +02003818 /*
3819 * We get CPU/PCH FIFO underruns on the other pipe when
3820 * doing the workaround. Sweep them under the rug.
3821 */
3822 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3823 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3824
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003825 /* always enable with pattern 1 (as per spec) */
3826 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3827 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3828 I915_WRITE(intel_dp->output_reg, DP);
3829 POSTING_READ(intel_dp->output_reg);
3830
3831 DP &= ~DP_PORT_EN;
Eric Anholt5bddd172010-11-18 09:32:59 +08003832 I915_WRITE(intel_dp->output_reg, DP);
Daniel Vetter0ca09682014-11-24 16:54:11 +01003833 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä0c241d52015-10-30 19:23:22 +02003834
3835 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3836 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3837 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
Eric Anholt5bddd172010-11-18 09:32:59 +08003838 }
3839
Keith Packardf01eca22011-09-28 16:48:10 -07003840 msleep(intel_dp->panel_power_down_delay);
Ville Syrjälä6fec7662015-11-10 16:16:17 +02003841
3842 intel_dp->DP = DP;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003843}
3844
Keith Packard26d61aa2011-07-25 20:01:09 -07003845static bool
3846intel_dp_get_dpcd(struct intel_dp *intel_dp)
Keith Packard92fd8fd2011-07-25 19:50:10 -07003847{
Rodrigo Vivia031d702013-10-03 16:15:06 -03003848 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3849 struct drm_device *dev = dig_port->base.base.dev;
3850 struct drm_i915_private *dev_priv = dev->dev_private;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303851 uint8_t rev;
Rodrigo Vivia031d702013-10-03 16:15:06 -03003852
Jani Nikula9d1a1032014-03-14 16:51:15 +02003853 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3854 sizeof(intel_dp->dpcd)) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003855 return false; /* aux transfer failed */
Keith Packard92fd8fd2011-07-25 19:50:10 -07003856
Andy Shevchenkoa8e98152014-09-01 14:12:01 +03003857 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
Damien Lespiau577c7a52012-12-13 16:09:02 +00003858
Adam Jacksonedb39242012-09-18 10:58:49 -04003859 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3860 return false; /* DPCD not present */
3861
Shobhit Kumar2293bb52013-07-11 18:44:56 -03003862 /* Check if the panel supports PSR */
3863 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
Jani Nikula50003932013-09-20 16:42:17 +03003864 if (is_edp(intel_dp)) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003865 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3866 intel_dp->psr_dpcd,
3867 sizeof(intel_dp->psr_dpcd));
Rodrigo Vivia031d702013-10-03 16:15:06 -03003868 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3869 dev_priv->psr.sink_support = true;
Jani Nikula50003932013-09-20 16:42:17 +03003870 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
Rodrigo Vivia031d702013-10-03 16:15:06 -03003871 }
Sonika Jindal474d1ec2015-04-02 11:02:44 +05303872
3873 if (INTEL_INFO(dev)->gen >= 9 &&
3874 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3875 uint8_t frame_sync_cap;
3876
3877 dev_priv->psr.sink_support = true;
3878 intel_dp_dpcd_read_wake(&intel_dp->aux,
3879 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3880 &frame_sync_cap, 1);
3881 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3882 /* PSR2 needs frame sync as well */
3883 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3884 DRM_DEBUG_KMS("PSR2 %s on sink",
3885 dev_priv->psr.psr2_support ? "supported" : "not supported");
3886 }
Jani Nikula50003932013-09-20 16:42:17 +03003887 }
3888
Jani Nikulabc5133d2015-09-03 11:16:07 +03003889 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03003890 yesno(intel_dp_source_supports_hbr2(intel_dp)),
Jani Nikula742f4912015-09-03 11:16:09 +03003891 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
Todd Previte06ea66b2014-01-20 10:19:39 -07003892
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303893 /* Intermediate frequency support */
3894 if (is_edp(intel_dp) &&
3895 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3896 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3897 (rev >= 0x03)) { /* eDp v1.4 or higher */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003898 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003899 int i;
3900
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303901 intel_dp_dpcd_read_wake(&intel_dp->aux,
3902 DP_SUPPORTED_LINK_RATES,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003903 sink_rates,
3904 sizeof(sink_rates));
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003905
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003906 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3907 int val = le16_to_cpu(sink_rates[i]);
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003908
3909 if (val == 0)
3910 break;
3911
Sonika Jindalaf77b972015-05-07 13:59:28 +05303912 /* Value read is in kHz while drm clock is saved in deca-kHz */
3913 intel_dp->sink_rates[i] = (val * 200) / 10;
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003914 }
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003915 intel_dp->num_sink_rates = i;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303916 }
Ville Syrjälä0336400e2015-03-12 17:10:39 +02003917
3918 intel_dp_print_rates(intel_dp);
3919
Adam Jacksonedb39242012-09-18 10:58:49 -04003920 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3921 DP_DWN_STRM_PORT_PRESENT))
3922 return true; /* native DP sink */
3923
3924 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3925 return true; /* no per-port downstream info */
3926
Jani Nikula9d1a1032014-03-14 16:51:15 +02003927 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3928 intel_dp->downstream_ports,
3929 DP_MAX_DOWNSTREAM_PORTS) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003930 return false; /* downstream port status fetch failed */
3931
3932 return true;
Keith Packard92fd8fd2011-07-25 19:50:10 -07003933}
3934
Adam Jackson0d198322012-05-14 16:05:47 -04003935static void
3936intel_dp_probe_oui(struct intel_dp *intel_dp)
3937{
3938 u8 buf[3];
3939
3940 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3941 return;
3942
Jani Nikula9d1a1032014-03-14 16:51:15 +02003943 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04003944 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3945 buf[0], buf[1], buf[2]);
3946
Jani Nikula9d1a1032014-03-14 16:51:15 +02003947 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04003948 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3949 buf[0], buf[1], buf[2]);
3950}
3951
Dave Airlie0e32b392014-05-02 14:02:48 +10003952static bool
3953intel_dp_probe_mst(struct intel_dp *intel_dp)
3954{
3955 u8 buf[1];
3956
3957 if (!intel_dp->can_mst)
3958 return false;
3959
3960 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3961 return false;
3962
Dave Airlie0e32b392014-05-02 14:02:48 +10003963 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3964 if (buf[0] & DP_MST_CAP) {
3965 DRM_DEBUG_KMS("Sink is MST capable\n");
3966 intel_dp->is_mst = true;
3967 } else {
3968 DRM_DEBUG_KMS("Sink is not MST capable\n");
3969 intel_dp->is_mst = false;
3970 }
3971 }
Dave Airlie0e32b392014-05-02 14:02:48 +10003972
3973 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3974 return intel_dp->is_mst;
3975}
3976
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003977static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003978{
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003979 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
Rodrigo Vivid72f9d92015-11-05 10:50:19 -08003980 struct drm_device *dev = dig_port->base.base.dev;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003981 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
Rodrigo Viviad9dc912014-09-16 19:18:12 -04003982 u8 buf;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003983 int ret = 0;
Rodrigo Vivic6297842015-11-05 10:50:20 -08003984 int count = 0;
3985 int attempts = 10;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003986
3987 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003988 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003989 ret = -EIO;
3990 goto out;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003991 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003992
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003993 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003994 buf & ~DP_TEST_SINK_START) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003995 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003996 ret = -EIO;
3997 goto out;
3998 }
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003999
Rodrigo Vivic6297842015-11-05 10:50:20 -08004000 do {
4001 intel_wait_for_vblank(dev, intel_crtc->pipe);
4002
4003 if (drm_dp_dpcd_readb(&intel_dp->aux,
4004 DP_TEST_SINK_MISC, &buf) < 0) {
4005 ret = -EIO;
4006 goto out;
4007 }
4008 count = buf & DP_TEST_COUNT_MASK;
4009 } while (--attempts && count);
4010
4011 if (attempts == 0) {
4012 DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
4013 ret = -ETIMEDOUT;
4014 }
4015
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004016 out:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004017 hsw_enable_ips(intel_crtc);
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004018 return ret;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004019}
4020
4021static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4022{
4023 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
Rodrigo Vivid72f9d92015-11-05 10:50:19 -08004024 struct drm_device *dev = dig_port->base.base.dev;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004025 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4026 u8 buf;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004027 int ret;
4028
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004029 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4030 return -EIO;
4031
4032 if (!(buf & DP_TEST_CRC_SUPPORTED))
4033 return -ENOTTY;
4034
4035 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4036 return -EIO;
4037
Rodrigo Vivi6d8175d2015-11-05 10:50:22 -08004038 if (buf & DP_TEST_SINK_START) {
4039 ret = intel_dp_sink_crc_stop(intel_dp);
4040 if (ret)
4041 return ret;
4042 }
4043
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004044 hsw_disable_ips(intel_crtc);
4045
4046 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4047 buf | DP_TEST_SINK_START) < 0) {
4048 hsw_enable_ips(intel_crtc);
4049 return -EIO;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004050 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004051
Rodrigo Vivid72f9d92015-11-05 10:50:19 -08004052 intel_wait_for_vblank(dev, intel_crtc->pipe);
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004053 return 0;
4054}
4055
4056int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4057{
4058 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4059 struct drm_device *dev = dig_port->base.base.dev;
4060 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4061 u8 buf;
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004062 int count, ret;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004063 int attempts = 6;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004064
4065 ret = intel_dp_sink_crc_start(intel_dp);
4066 if (ret)
4067 return ret;
4068
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004069 do {
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004070 intel_wait_for_vblank(dev, intel_crtc->pipe);
4071
Rodrigo Vivi1dda5f92014-10-01 07:32:37 -07004072 if (drm_dp_dpcd_readb(&intel_dp->aux,
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004073 DP_TEST_SINK_MISC, &buf) < 0) {
4074 ret = -EIO;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004075 goto stop;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004076 }
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004077 count = buf & DP_TEST_COUNT_MASK;
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07004078
Rodrigo Vivi7e38eef2015-11-05 10:50:21 -08004079 } while (--attempts && count == 0);
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004080
4081 if (attempts == 0) {
Rodrigo Vivi7e38eef2015-11-05 10:50:21 -08004082 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4083 ret = -ETIMEDOUT;
4084 goto stop;
4085 }
4086
4087 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4088 ret = -EIO;
4089 goto stop;
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004090 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004091
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004092stop:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004093 intel_dp_sink_crc_stop(intel_dp);
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004094 return ret;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004095}
4096
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004097static bool
4098intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4099{
Jani Nikula9d1a1032014-03-14 16:51:15 +02004100 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4101 DP_DEVICE_SERVICE_IRQ_VECTOR,
4102 sink_irq_vector, 1) == 1;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004103}
4104
Dave Airlie0e32b392014-05-02 14:02:48 +10004105static bool
4106intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4107{
4108 int ret;
4109
4110 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4111 DP_SINK_COUNT_ESI,
4112 sink_irq_vector, 14);
4113 if (ret != 14)
4114 return false;
4115
4116 return true;
4117}
4118
Todd Previtec5d5ab72015-04-15 08:38:38 -07004119static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004120{
Todd Previtec5d5ab72015-04-15 08:38:38 -07004121 uint8_t test_result = DP_TEST_ACK;
4122 return test_result;
4123}
4124
4125static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4126{
4127 uint8_t test_result = DP_TEST_NAK;
4128 return test_result;
4129}
4130
4131static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4132{
4133 uint8_t test_result = DP_TEST_NAK;
Todd Previte559be302015-05-04 07:48:20 -07004134 struct intel_connector *intel_connector = intel_dp->attached_connector;
4135 struct drm_connector *connector = &intel_connector->base;
4136
4137 if (intel_connector->detect_edid == NULL ||
Daniel Vetterac6f2e22015-05-08 16:15:41 +02004138 connector->edid_corrupt ||
Todd Previte559be302015-05-04 07:48:20 -07004139 intel_dp->aux.i2c_defer_count > 6) {
4140 /* Check EDID read for NACKs, DEFERs and corruption
4141 * (DP CTS 1.2 Core r1.1)
4142 * 4.2.2.4 : Failed EDID read, I2C_NAK
4143 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4144 * 4.2.2.6 : EDID corruption detected
4145 * Use failsafe mode for all cases
4146 */
4147 if (intel_dp->aux.i2c_nack_count > 0 ||
4148 intel_dp->aux.i2c_defer_count > 0)
4149 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4150 intel_dp->aux.i2c_nack_count,
4151 intel_dp->aux.i2c_defer_count);
4152 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4153 } else {
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304154 struct edid *block = intel_connector->detect_edid;
4155
4156 /* We have to write the checksum
4157 * of the last block read
4158 */
4159 block += intel_connector->detect_edid->extensions;
4160
Todd Previte559be302015-05-04 07:48:20 -07004161 if (!drm_dp_dpcd_write(&intel_dp->aux,
4162 DP_TEST_EDID_CHECKSUM,
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304163 &block->checksum,
Dan Carpenter5a1cc652015-05-12 21:07:37 +03004164 1))
Todd Previte559be302015-05-04 07:48:20 -07004165 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4166
4167 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4168 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4169 }
4170
4171 /* Set test active flag here so userspace doesn't interrupt things */
4172 intel_dp->compliance_test_active = 1;
4173
Todd Previtec5d5ab72015-04-15 08:38:38 -07004174 return test_result;
4175}
4176
4177static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4178{
4179 uint8_t test_result = DP_TEST_NAK;
4180 return test_result;
4181}
4182
4183static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4184{
4185 uint8_t response = DP_TEST_NAK;
4186 uint8_t rxdata = 0;
4187 int status = 0;
4188
Todd Previtec5d5ab72015-04-15 08:38:38 -07004189 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4190 if (status <= 0) {
4191 DRM_DEBUG_KMS("Could not read test request from sink\n");
4192 goto update_status;
4193 }
4194
4195 switch (rxdata) {
4196 case DP_TEST_LINK_TRAINING:
4197 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4198 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4199 response = intel_dp_autotest_link_training(intel_dp);
4200 break;
4201 case DP_TEST_LINK_VIDEO_PATTERN:
4202 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4203 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4204 response = intel_dp_autotest_video_pattern(intel_dp);
4205 break;
4206 case DP_TEST_LINK_EDID_READ:
4207 DRM_DEBUG_KMS("EDID test requested\n");
4208 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4209 response = intel_dp_autotest_edid(intel_dp);
4210 break;
4211 case DP_TEST_LINK_PHY_TEST_PATTERN:
4212 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4213 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4214 response = intel_dp_autotest_phy_pattern(intel_dp);
4215 break;
4216 default:
4217 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4218 break;
4219 }
4220
4221update_status:
4222 status = drm_dp_dpcd_write(&intel_dp->aux,
4223 DP_TEST_RESPONSE,
4224 &response, 1);
4225 if (status <= 0)
4226 DRM_DEBUG_KMS("Could not write test response to sink\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004227}
4228
Dave Airlie0e32b392014-05-02 14:02:48 +10004229static int
4230intel_dp_check_mst_status(struct intel_dp *intel_dp)
4231{
4232 bool bret;
4233
4234 if (intel_dp->is_mst) {
4235 u8 esi[16] = { 0 };
4236 int ret = 0;
4237 int retry;
4238 bool handled;
4239 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4240go_again:
4241 if (bret == true) {
4242
4243 /* check link status - esi[10] = 0x200c */
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03004244 if (intel_dp->active_mst_links &&
Ville Syrjälä901c2da2015-08-17 18:05:12 +03004245 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
Dave Airlie0e32b392014-05-02 14:02:48 +10004246 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4247 intel_dp_start_link_train(intel_dp);
Dave Airlie0e32b392014-05-02 14:02:48 +10004248 intel_dp_stop_link_train(intel_dp);
4249 }
4250
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004251 DRM_DEBUG_KMS("got esi %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004252 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4253
4254 if (handled) {
4255 for (retry = 0; retry < 3; retry++) {
4256 int wret;
4257 wret = drm_dp_dpcd_write(&intel_dp->aux,
4258 DP_SINK_COUNT_ESI+1,
4259 &esi[1], 3);
4260 if (wret == 3) {
4261 break;
4262 }
4263 }
4264
4265 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4266 if (bret == true) {
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004267 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004268 goto go_again;
4269 }
4270 } else
4271 ret = 0;
4272
4273 return ret;
4274 } else {
4275 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4276 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4277 intel_dp->is_mst = false;
4278 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4279 /* send a hotplug event */
4280 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4281 }
4282 }
4283 return -EINVAL;
4284}
4285
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004286/*
4287 * According to DP spec
4288 * 5.1.2:
4289 * 1. Read DPCD
4290 * 2. Configure link according to Receiver Capabilities
4291 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4292 * 4. Check link status on receipt of hot-plug interrupt
4293 */
Damien Lespiaua5146202015-02-10 19:32:22 +00004294static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01004295intel_dp_check_link_status(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004296{
Dave Airlie5b215bc2014-08-05 10:40:20 +10004297 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004298 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004299 u8 sink_irq_vector;
Keith Packard93f62da2011-11-01 19:45:03 -07004300 u8 link_status[DP_LINK_STATUS_SIZE];
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004301
Dave Airlie5b215bc2014-08-05 10:40:20 +10004302 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4303
Shubhangi Shrivastava4df69602015-10-28 15:30:36 +05304304 /*
4305 * Clearing compliance test variables to allow capturing
4306 * of values for next automated test request.
4307 */
4308 intel_dp->compliance_test_active = 0;
4309 intel_dp->compliance_test_type = 0;
4310 intel_dp->compliance_test_data = 0;
4311
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02004312 if (!intel_encoder->base.crtc)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004313 return;
4314
Imre Deak1a125d82014-08-18 14:42:46 +03004315 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4316 return;
4317
Keith Packard92fd8fd2011-07-25 19:50:10 -07004318 /* Try to read receiver status if the link appears to be up */
Keith Packard93f62da2011-11-01 19:45:03 -07004319 if (!intel_dp_get_link_status(intel_dp, link_status)) {
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004320 return;
4321 }
4322
Keith Packard92fd8fd2011-07-25 19:50:10 -07004323 /* Now read the DPCD to see if it's actually running */
Keith Packard26d61aa2011-07-25 20:01:09 -07004324 if (!intel_dp_get_dpcd(intel_dp)) {
Jesse Barnes59cd09e2011-07-07 11:10:59 -07004325 return;
4326 }
4327
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004328 /* Try to read the source of the interrupt */
4329 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4330 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4331 /* Clear interrupt source */
Jani Nikula9d1a1032014-03-14 16:51:15 +02004332 drm_dp_dpcd_writeb(&intel_dp->aux,
4333 DP_DEVICE_SERVICE_IRQ_VECTOR,
4334 sink_irq_vector);
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004335
4336 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
Todd Previte09b1eb12015-04-20 15:27:34 -07004337 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004338 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4339 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4340 }
4341
Shubhangi Shrivastava14631e92015-10-14 14:56:49 +05304342 /* if link training is requested we should perform it always */
4343 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4344 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
Keith Packard92fd8fd2011-07-25 19:50:10 -07004345 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
Jani Nikula8e329a02014-06-03 14:56:21 +03004346 intel_encoder->base.name);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004347 intel_dp_start_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03004348 intel_dp_stop_link_train(intel_dp);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004349 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004350}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004351
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004352/* XXX this is probably wrong for multiple downstream ports */
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004353static enum drm_connector_status
Keith Packard26d61aa2011-07-25 20:01:09 -07004354intel_dp_detect_dpcd(struct intel_dp *intel_dp)
Adam Jackson71ba90002011-07-12 17:38:04 -04004355{
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004356 uint8_t *dpcd = intel_dp->dpcd;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004357 uint8_t type;
4358
4359 if (!intel_dp_get_dpcd(intel_dp))
4360 return connector_status_disconnected;
4361
4362 /* if there's no downstream port, we're done */
4363 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
Keith Packard26d61aa2011-07-25 20:01:09 -07004364 return connector_status_connected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004365
4366 /* If we're HPD-aware, SINK_COUNT changes dynamically */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004367 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4368 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
Adam Jackson23235172012-09-20 16:42:45 -04004369 uint8_t reg;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004370
4371 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4372 &reg, 1) < 0)
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004373 return connector_status_unknown;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004374
Adam Jackson23235172012-09-20 16:42:45 -04004375 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4376 : connector_status_disconnected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004377 }
4378
4379 /* If no HPD, poke DDC gently */
Jani Nikula0b998362014-03-14 16:51:17 +02004380 if (drm_probe_ddc(&intel_dp->aux.ddc))
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004381 return connector_status_connected;
4382
4383 /* Well we tried, say unknown for unreliable port types */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004384 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4385 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4386 if (type == DP_DS_PORT_TYPE_VGA ||
4387 type == DP_DS_PORT_TYPE_NON_EDID)
4388 return connector_status_unknown;
4389 } else {
4390 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4391 DP_DWN_STRM_PORT_TYPE_MASK;
4392 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4393 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4394 return connector_status_unknown;
4395 }
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004396
4397 /* Anything else is out of spec, warn and ignore */
4398 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
Keith Packard26d61aa2011-07-25 20:01:09 -07004399 return connector_status_disconnected;
Adam Jackson71ba90002011-07-12 17:38:04 -04004400}
4401
4402static enum drm_connector_status
Chris Wilsond410b562014-09-02 20:03:59 +01004403edp_detect(struct intel_dp *intel_dp)
4404{
4405 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4406 enum drm_connector_status status;
4407
4408 status = intel_panel_detect(dev);
4409 if (status == connector_status_unknown)
4410 status = connector_status_connected;
4411
4412 return status;
4413}
4414
Jani Nikulab93433c2015-08-20 10:47:36 +03004415static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4416 struct intel_digital_port *port)
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004417{
Jani Nikulab93433c2015-08-20 10:47:36 +03004418 u32 bit;
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07004419
Jani Nikula0df53b72015-08-20 10:47:40 +03004420 switch (port->port) {
4421 case PORT_A:
4422 return true;
4423 case PORT_B:
4424 bit = SDE_PORTB_HOTPLUG;
4425 break;
4426 case PORT_C:
4427 bit = SDE_PORTC_HOTPLUG;
4428 break;
4429 case PORT_D:
4430 bit = SDE_PORTD_HOTPLUG;
4431 break;
4432 default:
4433 MISSING_CASE(port->port);
4434 return false;
4435 }
4436
4437 return I915_READ(SDEISR) & bit;
4438}
4439
4440static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4441 struct intel_digital_port *port)
4442{
4443 u32 bit;
4444
4445 switch (port->port) {
4446 case PORT_A:
4447 return true;
4448 case PORT_B:
4449 bit = SDE_PORTB_HOTPLUG_CPT;
4450 break;
4451 case PORT_C:
4452 bit = SDE_PORTC_HOTPLUG_CPT;
4453 break;
4454 case PORT_D:
4455 bit = SDE_PORTD_HOTPLUG_CPT;
4456 break;
Jani Nikulaa78695d2015-09-18 15:54:50 +03004457 case PORT_E:
4458 bit = SDE_PORTE_HOTPLUG_SPT;
4459 break;
Jani Nikula0df53b72015-08-20 10:47:40 +03004460 default:
4461 MISSING_CASE(port->port);
4462 return false;
Jani Nikulab93433c2015-08-20 10:47:36 +03004463 }
Damien Lespiau1b469632012-12-13 16:09:01 +00004464
Jani Nikulab93433c2015-08-20 10:47:36 +03004465 return I915_READ(SDEISR) & bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004466}
4467
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004468static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
Jani Nikula1d245982015-08-20 10:47:37 +03004469 struct intel_digital_port *port)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004470{
Jani Nikula9642c812015-08-20 10:47:41 +03004471 u32 bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004472
Jani Nikula9642c812015-08-20 10:47:41 +03004473 switch (port->port) {
4474 case PORT_B:
4475 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4476 break;
4477 case PORT_C:
4478 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4479 break;
4480 case PORT_D:
4481 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4482 break;
4483 default:
4484 MISSING_CASE(port->port);
4485 return false;
4486 }
4487
4488 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4489}
4490
4491static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4492 struct intel_digital_port *port)
4493{
4494 u32 bit;
4495
4496 switch (port->port) {
4497 case PORT_B:
4498 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4499 break;
4500 case PORT_C:
4501 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4502 break;
4503 case PORT_D:
4504 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4505 break;
4506 default:
4507 MISSING_CASE(port->port);
4508 return false;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004509 }
4510
Jani Nikula1d245982015-08-20 10:47:37 +03004511 return I915_READ(PORT_HOTPLUG_STAT) & bit;
Dave Airlie2a592be2014-09-01 16:58:12 +10004512}
4513
Jani Nikulae464bfd2015-08-20 10:47:42 +03004514static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304515 struct intel_digital_port *intel_dig_port)
Jani Nikulae464bfd2015-08-20 10:47:42 +03004516{
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304517 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4518 enum port port;
Jani Nikulae464bfd2015-08-20 10:47:42 +03004519 u32 bit;
4520
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304521 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4522 switch (port) {
Jani Nikulae464bfd2015-08-20 10:47:42 +03004523 case PORT_A:
4524 bit = BXT_DE_PORT_HP_DDIA;
4525 break;
4526 case PORT_B:
4527 bit = BXT_DE_PORT_HP_DDIB;
4528 break;
4529 case PORT_C:
4530 bit = BXT_DE_PORT_HP_DDIC;
4531 break;
4532 default:
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304533 MISSING_CASE(port);
Jani Nikulae464bfd2015-08-20 10:47:42 +03004534 return false;
4535 }
4536
4537 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4538}
4539
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004540/*
4541 * intel_digital_port_connected - is the specified port connected?
4542 * @dev_priv: i915 private structure
4543 * @port: the port to test
4544 *
4545 * Return %true if @port is connected, %false otherwise.
4546 */
Sonika Jindal237ed862015-09-15 09:44:20 +05304547bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004548 struct intel_digital_port *port)
4549{
Jani Nikula0df53b72015-08-20 10:47:40 +03004550 if (HAS_PCH_IBX(dev_priv))
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004551 return ibx_digital_port_connected(dev_priv, port);
Jani Nikula0df53b72015-08-20 10:47:40 +03004552 if (HAS_PCH_SPLIT(dev_priv))
4553 return cpt_digital_port_connected(dev_priv, port);
Jani Nikulae464bfd2015-08-20 10:47:42 +03004554 else if (IS_BROXTON(dev_priv))
4555 return bxt_digital_port_connected(dev_priv, port);
Wayne Boyer666a4532015-12-09 12:29:35 -08004556 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Jani Nikula9642c812015-08-20 10:47:41 +03004557 return vlv_digital_port_connected(dev_priv, port);
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004558 else
4559 return g4x_digital_port_connected(dev_priv, port);
4560}
4561
Keith Packard8c241fe2011-09-28 16:38:44 -07004562static struct edid *
Chris Wilsonbeb60602014-09-02 20:04:00 +01004563intel_dp_get_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004564{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004565 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packard8c241fe2011-09-28 16:38:44 -07004566
Jani Nikula9cd300e2012-10-19 14:51:52 +03004567 /* use cached edid if we have one */
4568 if (intel_connector->edid) {
Jani Nikula9cd300e2012-10-19 14:51:52 +03004569 /* invalid edid */
4570 if (IS_ERR(intel_connector->edid))
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004571 return NULL;
4572
Jani Nikula55e9ede2013-10-01 10:38:54 +03004573 return drm_edid_duplicate(intel_connector->edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004574 } else
4575 return drm_get_edid(&intel_connector->base,
4576 &intel_dp->aux.ddc);
Keith Packard8c241fe2011-09-28 16:38:44 -07004577}
4578
Chris Wilsonbeb60602014-09-02 20:04:00 +01004579static void
4580intel_dp_set_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004581{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004582 struct intel_connector *intel_connector = intel_dp->attached_connector;
4583 struct edid *edid;
Keith Packard8c241fe2011-09-28 16:38:44 -07004584
Chris Wilsonbeb60602014-09-02 20:04:00 +01004585 edid = intel_dp_get_edid(intel_dp);
4586 intel_connector->detect_edid = edid;
Jani Nikula9cd300e2012-10-19 14:51:52 +03004587
Chris Wilsonbeb60602014-09-02 20:04:00 +01004588 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4589 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4590 else
4591 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4592}
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004593
Chris Wilsonbeb60602014-09-02 20:04:00 +01004594static void
4595intel_dp_unset_edid(struct intel_dp *intel_dp)
4596{
4597 struct intel_connector *intel_connector = intel_dp->attached_connector;
4598
4599 kfree(intel_connector->detect_edid);
4600 intel_connector->detect_edid = NULL;
4601
4602 intel_dp->has_audio = false;
4603}
4604
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004605static enum drm_connector_status
4606intel_dp_detect(struct drm_connector *connector, bool force)
4607{
4608 struct intel_dp *intel_dp = intel_attached_dp(connector);
Paulo Zanonid63885d2012-10-26 19:05:49 -02004609 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4610 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004611 struct drm_device *dev = connector->dev;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004612 enum drm_connector_status status;
Imre Deak671dedd2014-03-05 16:20:53 +02004613 enum intel_display_power_domain power_domain;
Dave Airlie0e32b392014-05-02 14:02:48 +10004614 bool ret;
Todd Previte09b1eb12015-04-20 15:27:34 -07004615 u8 sink_irq_vector;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004616
Chris Wilson164c8592013-07-20 20:27:08 +01004617 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03004618 connector->base.id, connector->name);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004619 intel_dp_unset_edid(intel_dp);
Chris Wilson164c8592013-07-20 20:27:08 +01004620
Dave Airlie0e32b392014-05-02 14:02:48 +10004621 if (intel_dp->is_mst) {
4622 /* MST devices are disconnected from a monitor POV */
4623 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4624 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004625 return connector_status_disconnected;
Dave Airlie0e32b392014-05-02 14:02:48 +10004626 }
4627
Ville Syrjälä25f78f52015-11-16 15:01:04 +01004628 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4629 intel_display_power_get(to_i915(dev), power_domain);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004630
Chris Wilsond410b562014-09-02 20:03:59 +01004631 /* Can't disconnect eDP, but you can close the lid... */
4632 if (is_edp(intel_dp))
4633 status = edp_detect(intel_dp);
Ander Conselvan de Oliveirac555a812015-11-18 17:19:30 +02004634 else if (intel_digital_port_connected(to_i915(dev),
4635 dp_to_dig_port(intel_dp)))
4636 status = intel_dp_detect_dpcd(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004637 else
Ander Conselvan de Oliveirac555a812015-11-18 17:19:30 +02004638 status = connector_status_disconnected;
4639
Shubhangi Shrivastava4df69602015-10-28 15:30:36 +05304640 if (status != connector_status_connected) {
4641 intel_dp->compliance_test_active = 0;
4642 intel_dp->compliance_test_type = 0;
4643 intel_dp->compliance_test_data = 0;
4644
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004645 goto out;
Shubhangi Shrivastava4df69602015-10-28 15:30:36 +05304646 }
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004647
Adam Jackson0d198322012-05-14 16:05:47 -04004648 intel_dp_probe_oui(intel_dp);
4649
Dave Airlie0e32b392014-05-02 14:02:48 +10004650 ret = intel_dp_probe_mst(intel_dp);
4651 if (ret) {
4652 /* if we are in MST mode then this connector
4653 won't appear connected or have anything with EDID on it */
4654 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4655 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4656 status = connector_status_disconnected;
4657 goto out;
4658 }
4659
Shubhangi Shrivastava4df69602015-10-28 15:30:36 +05304660 /*
4661 * Clearing NACK and defer counts to get their exact values
4662 * while reading EDID which are required by Compliance tests
4663 * 4.2.2.4 and 4.2.2.5
4664 */
4665 intel_dp->aux.i2c_nack_count = 0;
4666 intel_dp->aux.i2c_defer_count = 0;
4667
Chris Wilsonbeb60602014-09-02 20:04:00 +01004668 intel_dp_set_edid(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004669
Paulo Zanonid63885d2012-10-26 19:05:49 -02004670 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4671 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004672 status = connector_status_connected;
4673
Todd Previte09b1eb12015-04-20 15:27:34 -07004674 /* Try to read the source of the interrupt */
4675 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4676 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4677 /* Clear interrupt source */
4678 drm_dp_dpcd_writeb(&intel_dp->aux,
4679 DP_DEVICE_SERVICE_IRQ_VECTOR,
4680 sink_irq_vector);
4681
4682 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4683 intel_dp_handle_test_request(intel_dp);
4684 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4685 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4686 }
4687
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004688out:
Ville Syrjälä25f78f52015-11-16 15:01:04 +01004689 intel_display_power_put(to_i915(dev), power_domain);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004690 return status;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004691}
4692
Chris Wilsonbeb60602014-09-02 20:04:00 +01004693static void
4694intel_dp_force(struct drm_connector *connector)
4695{
4696 struct intel_dp *intel_dp = intel_attached_dp(connector);
4697 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
Ville Syrjälä25f78f52015-11-16 15:01:04 +01004698 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004699 enum intel_display_power_domain power_domain;
4700
4701 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4702 connector->base.id, connector->name);
4703 intel_dp_unset_edid(intel_dp);
4704
4705 if (connector->status != connector_status_connected)
4706 return;
4707
Ville Syrjälä25f78f52015-11-16 15:01:04 +01004708 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4709 intel_display_power_get(dev_priv, power_domain);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004710
4711 intel_dp_set_edid(intel_dp);
4712
Ville Syrjälä25f78f52015-11-16 15:01:04 +01004713 intel_display_power_put(dev_priv, power_domain);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004714
4715 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4716 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4717}
4718
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004719static int intel_dp_get_modes(struct drm_connector *connector)
4720{
Jani Nikuladd06f902012-10-19 14:51:50 +03004721 struct intel_connector *intel_connector = to_intel_connector(connector);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004722 struct edid *edid;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004723
Chris Wilsonbeb60602014-09-02 20:04:00 +01004724 edid = intel_connector->detect_edid;
4725 if (edid) {
4726 int ret = intel_connector_update_modes(connector, edid);
4727 if (ret)
4728 return ret;
4729 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004730
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004731 /* if eDP has no EDID, fall back to fixed mode */
Chris Wilsonbeb60602014-09-02 20:04:00 +01004732 if (is_edp(intel_attached_dp(connector)) &&
4733 intel_connector->panel.fixed_mode) {
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004734 struct drm_display_mode *mode;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004735
4736 mode = drm_mode_duplicate(connector->dev,
Jani Nikuladd06f902012-10-19 14:51:50 +03004737 intel_connector->panel.fixed_mode);
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004738 if (mode) {
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004739 drm_mode_probed_add(connector, mode);
4740 return 1;
4741 }
4742 }
Chris Wilsonbeb60602014-09-02 20:04:00 +01004743
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004744 return 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004745}
4746
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004747static bool
4748intel_dp_detect_audio(struct drm_connector *connector)
4749{
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004750 bool has_audio = false;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004751 struct edid *edid;
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004752
Chris Wilsonbeb60602014-09-02 20:04:00 +01004753 edid = to_intel_connector(connector)->detect_edid;
4754 if (edid)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004755 has_audio = drm_detect_monitor_audio(edid);
Imre Deak671dedd2014-03-05 16:20:53 +02004756
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004757 return has_audio;
4758}
4759
Chris Wilsonf6849602010-09-19 09:29:33 +01004760static int
4761intel_dp_set_property(struct drm_connector *connector,
4762 struct drm_property *property,
4763 uint64_t val)
4764{
Chris Wilsone953fd72011-02-21 22:23:52 +00004765 struct drm_i915_private *dev_priv = connector->dev->dev_private;
Yuly Novikov53b41832012-10-26 12:04:00 +03004766 struct intel_connector *intel_connector = to_intel_connector(connector);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004767 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4768 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonf6849602010-09-19 09:29:33 +01004769 int ret;
4770
Rob Clark662595d2012-10-11 20:36:04 -05004771 ret = drm_object_property_set_value(&connector->base, property, val);
Chris Wilsonf6849602010-09-19 09:29:33 +01004772 if (ret)
4773 return ret;
4774
Chris Wilson3f43c482011-05-12 22:17:24 +01004775 if (property == dev_priv->force_audio_property) {
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004776 int i = val;
4777 bool has_audio;
4778
4779 if (i == intel_dp->force_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004780 return 0;
4781
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004782 intel_dp->force_audio = i;
Chris Wilsonf6849602010-09-19 09:29:33 +01004783
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004784 if (i == HDMI_AUDIO_AUTO)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004785 has_audio = intel_dp_detect_audio(connector);
4786 else
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004787 has_audio = (i == HDMI_AUDIO_ON);
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004788
4789 if (has_audio == intel_dp->has_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004790 return 0;
4791
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004792 intel_dp->has_audio = has_audio;
Chris Wilsonf6849602010-09-19 09:29:33 +01004793 goto done;
4794 }
4795
Chris Wilsone953fd72011-02-21 22:23:52 +00004796 if (property == dev_priv->broadcast_rgb_property) {
Daniel Vetterae4edb82013-04-22 17:07:23 +02004797 bool old_auto = intel_dp->color_range_auto;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004798 bool old_range = intel_dp->limited_color_range;
Daniel Vetterae4edb82013-04-22 17:07:23 +02004799
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004800 switch (val) {
4801 case INTEL_BROADCAST_RGB_AUTO:
4802 intel_dp->color_range_auto = true;
4803 break;
4804 case INTEL_BROADCAST_RGB_FULL:
4805 intel_dp->color_range_auto = false;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004806 intel_dp->limited_color_range = false;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004807 break;
4808 case INTEL_BROADCAST_RGB_LIMITED:
4809 intel_dp->color_range_auto = false;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004810 intel_dp->limited_color_range = true;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004811 break;
4812 default:
4813 return -EINVAL;
4814 }
Daniel Vetterae4edb82013-04-22 17:07:23 +02004815
4816 if (old_auto == intel_dp->color_range_auto &&
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004817 old_range == intel_dp->limited_color_range)
Daniel Vetterae4edb82013-04-22 17:07:23 +02004818 return 0;
4819
Chris Wilsone953fd72011-02-21 22:23:52 +00004820 goto done;
4821 }
4822
Yuly Novikov53b41832012-10-26 12:04:00 +03004823 if (is_edp(intel_dp) &&
4824 property == connector->dev->mode_config.scaling_mode_property) {
4825 if (val == DRM_MODE_SCALE_NONE) {
4826 DRM_DEBUG_KMS("no scaling not supported\n");
4827 return -EINVAL;
4828 }
4829
4830 if (intel_connector->panel.fitting_mode == val) {
4831 /* the eDP scaling property is not changed */
4832 return 0;
4833 }
4834 intel_connector->panel.fitting_mode = val;
4835
4836 goto done;
4837 }
4838
Chris Wilsonf6849602010-09-19 09:29:33 +01004839 return -EINVAL;
4840
4841done:
Chris Wilsonc0c36b942012-12-19 16:08:43 +00004842 if (intel_encoder->base.crtc)
4843 intel_crtc_restore_mode(intel_encoder->base.crtc);
Chris Wilsonf6849602010-09-19 09:29:33 +01004844
4845 return 0;
4846}
4847
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004848static void
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004849intel_dp_connector_destroy(struct drm_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004850{
Jani Nikula1d508702012-10-19 14:51:49 +03004851 struct intel_connector *intel_connector = to_intel_connector(connector);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004852
Chris Wilson10e972d2014-09-04 21:43:45 +01004853 kfree(intel_connector->detect_edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004854
Jani Nikula9cd300e2012-10-19 14:51:52 +03004855 if (!IS_ERR_OR_NULL(intel_connector->edid))
4856 kfree(intel_connector->edid);
4857
Paulo Zanoniacd8db102013-06-12 17:27:23 -03004858 /* Can't call is_edp() since the encoder may have been destroyed
4859 * already. */
4860 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
Jani Nikula1d508702012-10-19 14:51:49 +03004861 intel_panel_fini(&intel_connector->panel);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004862
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004863 drm_connector_cleanup(connector);
Zhenyu Wang55f78c42010-03-29 16:13:57 +08004864 kfree(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004865}
4866
Paulo Zanoni00c09d72012-10-26 19:05:52 -02004867void intel_dp_encoder_destroy(struct drm_encoder *encoder)
Daniel Vetter24d05922010-08-20 18:08:28 +02004868{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004869 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4870 struct intel_dp *intel_dp = &intel_dig_port->dp;
Daniel Vetter24d05922010-08-20 18:08:28 +02004871
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02004872 intel_dp_aux_fini(intel_dp);
Dave Airlie0e32b392014-05-02 14:02:48 +10004873 intel_dp_mst_encoder_cleanup(intel_dig_port);
Keith Packardbd943152011-09-18 23:09:52 -07004874 if (is_edp(intel_dp)) {
4875 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03004876 /*
4877 * vdd might still be enabled do to the delayed vdd off.
4878 * Make sure vdd is actually turned off here.
4879 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03004880 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01004881 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004882 pps_unlock(intel_dp);
4883
Clint Taylor01527b32014-07-07 13:01:46 -07004884 if (intel_dp->edp_notifier.notifier_call) {
4885 unregister_reboot_notifier(&intel_dp->edp_notifier);
4886 intel_dp->edp_notifier.notifier_call = NULL;
4887 }
Keith Packardbd943152011-09-18 23:09:52 -07004888 }
Imre Deakc8bd0e42014-12-12 17:57:38 +02004889 drm_encoder_cleanup(encoder);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004890 kfree(intel_dig_port);
Daniel Vetter24d05922010-08-20 18:08:28 +02004891}
4892
Imre Deak07f9cd02014-08-18 14:42:45 +03004893static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4894{
4895 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4896
4897 if (!is_edp(intel_dp))
4898 return;
4899
Ville Syrjälä951468f2014-09-04 14:55:31 +03004900 /*
4901 * vdd might still be enabled do to the delayed vdd off.
4902 * Make sure vdd is actually turned off here.
4903 */
Ville Syrjäläafa4e532014-11-25 15:43:48 +02004904 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004905 pps_lock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004906 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004907 pps_unlock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004908}
4909
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004910static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4911{
4912 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4913 struct drm_device *dev = intel_dig_port->base.base.dev;
4914 struct drm_i915_private *dev_priv = dev->dev_private;
4915 enum intel_display_power_domain power_domain;
4916
4917 lockdep_assert_held(&dev_priv->pps_mutex);
4918
4919 if (!edp_have_panel_vdd(intel_dp))
4920 return;
4921
4922 /*
4923 * The VDD bit needs a power domain reference, so if the bit is
4924 * already enabled when we boot or resume, grab this reference and
4925 * schedule a vdd off, so we don't hold on to the reference
4926 * indefinitely.
4927 */
4928 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
Ville Syrjälä25f78f52015-11-16 15:01:04 +01004929 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004930 intel_display_power_get(dev_priv, power_domain);
4931
4932 edp_panel_vdd_schedule_off(intel_dp);
4933}
4934
Imre Deak6d93c0c2014-07-31 14:03:36 +03004935static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4936{
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004937 struct intel_dp *intel_dp;
4938
4939 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4940 return;
4941
4942 intel_dp = enc_to_intel_dp(encoder);
4943
4944 pps_lock(intel_dp);
4945
4946 /*
4947 * Read out the current power sequencer assignment,
4948 * in case the BIOS did something with it.
4949 */
Wayne Boyer666a4532015-12-09 12:29:35 -08004950 if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004951 vlv_initial_power_sequencer_setup(intel_dp);
4952
4953 intel_edp_panel_vdd_sanitize(intel_dp);
4954
4955 pps_unlock(intel_dp);
Imre Deak6d93c0c2014-07-31 14:03:36 +03004956}
4957
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004958static const struct drm_connector_funcs intel_dp_connector_funcs = {
Maarten Lankhorst4d688a22015-08-05 12:37:06 +02004959 .dpms = drm_atomic_helper_connector_dpms,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004960 .detect = intel_dp_detect,
Chris Wilsonbeb60602014-09-02 20:04:00 +01004961 .force = intel_dp_force,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004962 .fill_modes = drm_helper_probe_single_connector_modes,
Chris Wilsonf6849602010-09-19 09:29:33 +01004963 .set_property = intel_dp_set_property,
Matt Roper2545e4a2015-01-22 16:51:27 -08004964 .atomic_get_property = intel_connector_atomic_get_property,
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004965 .destroy = intel_dp_connector_destroy,
Matt Roperc6f95f22015-01-22 16:50:32 -08004966 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
Ander Conselvan de Oliveira98969722015-03-20 16:18:06 +02004967 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004968};
4969
4970static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4971 .get_modes = intel_dp_get_modes,
4972 .mode_valid = intel_dp_mode_valid,
Chris Wilsondf0e9242010-09-09 16:20:55 +01004973 .best_encoder = intel_best_encoder,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004974};
4975
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004976static const struct drm_encoder_funcs intel_dp_enc_funcs = {
Imre Deak6d93c0c2014-07-31 14:03:36 +03004977 .reset = intel_dp_encoder_reset,
Daniel Vetter24d05922010-08-20 18:08:28 +02004978 .destroy = intel_dp_encoder_destroy,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004979};
4980
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004981enum irqreturn
Dave Airlie13cf5502014-06-18 11:29:35 +10004982intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4983{
4984 struct intel_dp *intel_dp = &intel_dig_port->dp;
Imre Deak1c767b32014-08-18 14:42:42 +03004985 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Dave Airlie0e32b392014-05-02 14:02:48 +10004986 struct drm_device *dev = intel_dig_port->base.base.dev;
4987 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak1c767b32014-08-18 14:42:42 +03004988 enum intel_display_power_domain power_domain;
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004989 enum irqreturn ret = IRQ_NONE;
Imre Deak1c767b32014-08-18 14:42:42 +03004990
Takashi Iwai25400582015-11-19 12:09:56 +01004991 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4992 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
Dave Airlie0e32b392014-05-02 14:02:48 +10004993 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
Dave Airlie13cf5502014-06-18 11:29:35 +10004994
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03004995 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4996 /*
4997 * vdd off can generate a long pulse on eDP which
4998 * would require vdd on to handle it, and thus we
4999 * would end up in an endless cycle of
5000 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5001 */
5002 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5003 port_name(intel_dig_port->port));
Ville Syrjäläa8b3d522015-02-10 14:11:46 +02005004 return IRQ_HANDLED;
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03005005 }
5006
Ville Syrjälä26fbb772014-08-11 18:37:37 +03005007 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5008 port_name(intel_dig_port->port),
Dave Airlie0e32b392014-05-02 14:02:48 +10005009 long_hpd ? "long" : "short");
Dave Airlie13cf5502014-06-18 11:29:35 +10005010
Ville Syrjälä25f78f52015-11-16 15:01:04 +01005011 power_domain = intel_display_port_aux_power_domain(intel_encoder);
Imre Deak1c767b32014-08-18 14:42:42 +03005012 intel_display_power_get(dev_priv, power_domain);
5013
Dave Airlie0e32b392014-05-02 14:02:48 +10005014 if (long_hpd) {
Mika Kahola5fa836a2015-04-29 09:17:40 +03005015 /* indicate that we need to restart link training */
5016 intel_dp->train_set_valid = false;
Dave Airlie2a592be2014-09-01 16:58:12 +10005017
Jani Nikula7e66bcf2015-08-20 10:47:39 +03005018 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5019 goto mst_fail;
Dave Airlie0e32b392014-05-02 14:02:48 +10005020
5021 if (!intel_dp_get_dpcd(intel_dp)) {
5022 goto mst_fail;
5023 }
5024
5025 intel_dp_probe_oui(intel_dp);
5026
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03005027 if (!intel_dp_probe_mst(intel_dp)) {
5028 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5029 intel_dp_check_link_status(intel_dp);
5030 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10005031 goto mst_fail;
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03005032 }
Dave Airlie0e32b392014-05-02 14:02:48 +10005033 } else {
5034 if (intel_dp->is_mst) {
Imre Deak1c767b32014-08-18 14:42:42 +03005035 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
Dave Airlie0e32b392014-05-02 14:02:48 +10005036 goto mst_fail;
5037 }
5038
5039 if (!intel_dp->is_mst) {
Dave Airlie5b215bc2014-08-05 10:40:20 +10005040 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
Dave Airlie0e32b392014-05-02 14:02:48 +10005041 intel_dp_check_link_status(intel_dp);
Dave Airlie5b215bc2014-08-05 10:40:20 +10005042 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10005043 }
5044 }
Daniel Vetterb2c5c182015-01-23 06:00:31 +01005045
5046 ret = IRQ_HANDLED;
5047
Imre Deak1c767b32014-08-18 14:42:42 +03005048 goto put_power;
Dave Airlie0e32b392014-05-02 14:02:48 +10005049mst_fail:
5050 /* if we were in MST mode, and device is not there get out of MST mode */
5051 if (intel_dp->is_mst) {
5052 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5053 intel_dp->is_mst = false;
5054 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5055 }
Imre Deak1c767b32014-08-18 14:42:42 +03005056put_power:
5057 intel_display_power_put(dev_priv, power_domain);
5058
5059 return ret;
Dave Airlie13cf5502014-06-18 11:29:35 +10005060}
5061
Rodrigo Vivi477ec322015-08-06 15:51:39 +08005062/* check the VBT to see whether the eDP is on another port */
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005063bool intel_dp_is_edp(struct drm_device *dev, enum port port)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005064{
5065 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni768f69c2013-09-11 18:02:47 -03005066 union child_device_config *p_child;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005067 int i;
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005068 static const short port_mapping[] = {
Rodrigo Vivi477ec322015-08-06 15:51:39 +08005069 [PORT_B] = DVO_PORT_DPB,
5070 [PORT_C] = DVO_PORT_DPC,
5071 [PORT_D] = DVO_PORT_DPD,
5072 [PORT_E] = DVO_PORT_DPE,
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005073 };
Zhao Yakui36e83a12010-06-12 14:32:21 +08005074
Ville Syrjälä53ce81a2015-09-11 21:04:38 +03005075 /*
5076 * eDP not supported on g4x. so bail out early just
5077 * for a bit extra safety in case the VBT is bonkers.
5078 */
5079 if (INTEL_INFO(dev)->gen < 5)
5080 return false;
5081
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005082 if (port == PORT_A)
5083 return true;
5084
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005085 if (!dev_priv->vbt.child_dev_num)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005086 return false;
5087
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005088 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5089 p_child = dev_priv->vbt.child_dev + i;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005090
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005091 if (p_child->common.dvo_port == port_mapping[port] &&
Ville Syrjäläf02586d2013-11-01 20:32:08 +02005092 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5093 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
Zhao Yakui36e83a12010-06-12 14:32:21 +08005094 return true;
5095 }
5096 return false;
5097}
5098
Dave Airlie0e32b392014-05-02 14:02:48 +10005099void
Chris Wilsonf6849602010-09-19 09:29:33 +01005100intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5101{
Yuly Novikov53b41832012-10-26 12:04:00 +03005102 struct intel_connector *intel_connector = to_intel_connector(connector);
5103
Chris Wilson3f43c482011-05-12 22:17:24 +01005104 intel_attach_force_audio_property(connector);
Chris Wilsone953fd72011-02-21 22:23:52 +00005105 intel_attach_broadcast_rgb_property(connector);
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02005106 intel_dp->color_range_auto = true;
Yuly Novikov53b41832012-10-26 12:04:00 +03005107
5108 if (is_edp(intel_dp)) {
5109 drm_mode_create_scaling_mode_property(connector->dev);
Rob Clark6de6d842012-10-11 20:36:04 -05005110 drm_object_attach_property(
5111 &connector->base,
Yuly Novikov53b41832012-10-26 12:04:00 +03005112 connector->dev->mode_config.scaling_mode_property,
Yuly Novikov8e740cd2012-10-26 12:04:01 +03005113 DRM_MODE_SCALE_ASPECT);
5114 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
Yuly Novikov53b41832012-10-26 12:04:00 +03005115 }
Chris Wilsonf6849602010-09-19 09:29:33 +01005116}
5117
Imre Deakdada1a92014-01-29 13:25:41 +02005118static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5119{
5120 intel_dp->last_power_cycle = jiffies;
5121 intel_dp->last_power_on = jiffies;
5122 intel_dp->last_backlight_off = jiffies;
5123}
5124
Daniel Vetter67a54562012-10-20 20:57:45 +02005125static void
5126intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005127 struct intel_dp *intel_dp)
Daniel Vetter67a54562012-10-20 20:57:45 +02005128{
5129 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005130 struct edp_power_seq cur, vbt, spec,
5131 *final = &intel_dp->pps_delays;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305132 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02005133 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
Jesse Barnes453c5422013-03-28 09:55:41 -07005134
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005135 lockdep_assert_held(&dev_priv->pps_mutex);
5136
Ville Syrjälä81ddbc62014-10-16 21:27:31 +03005137 /* already initialized? */
5138 if (final->t11_t12 != 0)
5139 return;
5140
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305141 if (IS_BROXTON(dev)) {
5142 /*
5143 * TODO: BXT has 2 sets of PPS registers.
5144 * Correct Register for Broxton need to be identified
5145 * using VBT. hardcoding for now
5146 */
5147 pp_ctrl_reg = BXT_PP_CONTROL(0);
5148 pp_on_reg = BXT_PP_ON_DELAYS(0);
5149 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5150 } else if (HAS_PCH_SPLIT(dev)) {
Jani Nikulabf13e812013-09-06 07:40:05 +03005151 pp_ctrl_reg = PCH_PP_CONTROL;
Jesse Barnes453c5422013-03-28 09:55:41 -07005152 pp_on_reg = PCH_PP_ON_DELAYS;
5153 pp_off_reg = PCH_PP_OFF_DELAYS;
5154 pp_div_reg = PCH_PP_DIVISOR;
5155 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005156 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5157
5158 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5159 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5160 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5161 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005162 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005163
5164 /* Workaround: Need to write PP_CONTROL with the unlock key as
5165 * the very first thing. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305166 pp_ctl = ironlake_get_pp_control(intel_dp);
Daniel Vetter67a54562012-10-20 20:57:45 +02005167
Jesse Barnes453c5422013-03-28 09:55:41 -07005168 pp_on = I915_READ(pp_on_reg);
5169 pp_off = I915_READ(pp_off_reg);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305170 if (!IS_BROXTON(dev)) {
5171 I915_WRITE(pp_ctrl_reg, pp_ctl);
5172 pp_div = I915_READ(pp_div_reg);
5173 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005174
5175 /* Pull timing values out of registers */
5176 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5177 PANEL_POWER_UP_DELAY_SHIFT;
5178
5179 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5180 PANEL_LIGHT_ON_DELAY_SHIFT;
5181
5182 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5183 PANEL_LIGHT_OFF_DELAY_SHIFT;
5184
5185 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5186 PANEL_POWER_DOWN_DELAY_SHIFT;
5187
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305188 if (IS_BROXTON(dev)) {
5189 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5190 BXT_POWER_CYCLE_DELAY_SHIFT;
5191 if (tmp > 0)
5192 cur.t11_t12 = (tmp - 1) * 1000;
5193 else
5194 cur.t11_t12 = 0;
5195 } else {
5196 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
Daniel Vetter67a54562012-10-20 20:57:45 +02005197 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305198 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005199
5200 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5201 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5202
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005203 vbt = dev_priv->vbt.edp_pps;
Daniel Vetter67a54562012-10-20 20:57:45 +02005204
5205 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5206 * our hw here, which are all in 100usec. */
5207 spec.t1_t3 = 210 * 10;
5208 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5209 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5210 spec.t10 = 500 * 10;
5211 /* This one is special and actually in units of 100ms, but zero
5212 * based in the hw (so we need to add 100 ms). But the sw vbt
5213 * table multiplies it with 1000 to make it in units of 100usec,
5214 * too. */
5215 spec.t11_t12 = (510 + 100) * 10;
5216
5217 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5218 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5219
5220 /* Use the max of the register settings and vbt. If both are
5221 * unset, fall back to the spec limits. */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005222#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
Daniel Vetter67a54562012-10-20 20:57:45 +02005223 spec.field : \
5224 max(cur.field, vbt.field))
5225 assign_final(t1_t3);
5226 assign_final(t8);
5227 assign_final(t9);
5228 assign_final(t10);
5229 assign_final(t11_t12);
5230#undef assign_final
5231
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005232#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
Daniel Vetter67a54562012-10-20 20:57:45 +02005233 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5234 intel_dp->backlight_on_delay = get_delay(t8);
5235 intel_dp->backlight_off_delay = get_delay(t9);
5236 intel_dp->panel_power_down_delay = get_delay(t10);
5237 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5238#undef get_delay
5239
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005240 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5241 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5242 intel_dp->panel_power_cycle_delay);
5243
5244 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5245 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005246}
5247
5248static void
5249intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005250 struct intel_dp *intel_dp)
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005251{
5252 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07005253 u32 pp_on, pp_off, pp_div, port_sel = 0;
5254 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02005255 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
Ville Syrjäläad933b52014-08-18 22:15:56 +03005256 enum port port = dp_to_dig_port(intel_dp)->port;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005257 const struct edp_power_seq *seq = &intel_dp->pps_delays;
Jesse Barnes453c5422013-03-28 09:55:41 -07005258
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005259 lockdep_assert_held(&dev_priv->pps_mutex);
Jesse Barnes453c5422013-03-28 09:55:41 -07005260
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305261 if (IS_BROXTON(dev)) {
5262 /*
5263 * TODO: BXT has 2 sets of PPS registers.
5264 * Correct Register for Broxton need to be identified
5265 * using VBT. hardcoding for now
5266 */
5267 pp_ctrl_reg = BXT_PP_CONTROL(0);
5268 pp_on_reg = BXT_PP_ON_DELAYS(0);
5269 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5270
5271 } else if (HAS_PCH_SPLIT(dev)) {
Jesse Barnes453c5422013-03-28 09:55:41 -07005272 pp_on_reg = PCH_PP_ON_DELAYS;
5273 pp_off_reg = PCH_PP_OFF_DELAYS;
5274 pp_div_reg = PCH_PP_DIVISOR;
5275 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005276 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5277
5278 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5279 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5280 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005281 }
5282
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005283 /*
5284 * And finally store the new values in the power sequencer. The
5285 * backlight delays are set to 1 because we do manual waits on them. For
5286 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5287 * we'll end up waiting for the backlight off delay twice: once when we
5288 * do the manual sleep, and once when we disable the panel and wait for
5289 * the PP_STATUS bit to become zero.
5290 */
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005291 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005292 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5293 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005294 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
Daniel Vetter67a54562012-10-20 20:57:45 +02005295 /* Compute the divisor for the pp clock, simply match the Bspec
5296 * formula. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305297 if (IS_BROXTON(dev)) {
5298 pp_div = I915_READ(pp_ctrl_reg);
5299 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5300 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5301 << BXT_POWER_CYCLE_DELAY_SHIFT);
5302 } else {
5303 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5304 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5305 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5306 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005307
5308 /* Haswell doesn't have any port selection bits for the panel
5309 * power sequencer any more. */
Wayne Boyer666a4532015-12-09 12:29:35 -08005310 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005311 port_sel = PANEL_PORT_SELECT_VLV(port);
Imre Deakbc7d38a2013-05-16 14:40:36 +03005312 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005313 if (port == PORT_A)
Jani Nikulaa24c1442013-09-05 16:44:46 +03005314 port_sel = PANEL_PORT_SELECT_DPA;
Daniel Vetter67a54562012-10-20 20:57:45 +02005315 else
Jani Nikulaa24c1442013-09-05 16:44:46 +03005316 port_sel = PANEL_PORT_SELECT_DPD;
Daniel Vetter67a54562012-10-20 20:57:45 +02005317 }
5318
Jesse Barnes453c5422013-03-28 09:55:41 -07005319 pp_on |= port_sel;
5320
5321 I915_WRITE(pp_on_reg, pp_on);
5322 I915_WRITE(pp_off_reg, pp_off);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305323 if (IS_BROXTON(dev))
5324 I915_WRITE(pp_ctrl_reg, pp_div);
5325 else
5326 I915_WRITE(pp_div_reg, pp_div);
Daniel Vetter67a54562012-10-20 20:57:45 +02005327
Daniel Vetter67a54562012-10-20 20:57:45 +02005328 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07005329 I915_READ(pp_on_reg),
5330 I915_READ(pp_off_reg),
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305331 IS_BROXTON(dev) ?
5332 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
Jesse Barnes453c5422013-03-28 09:55:41 -07005333 I915_READ(pp_div_reg));
Zhenyu Wange3421a12010-04-08 09:43:27 +08005334}
5335
Vandana Kannanb33a2812015-02-13 15:33:03 +05305336/**
5337 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5338 * @dev: DRM device
5339 * @refresh_rate: RR to be programmed
5340 *
5341 * This function gets called when refresh rate (RR) has to be changed from
5342 * one frequency to another. Switches can be between high and low RR
5343 * supported by the panel or to any other RR based on media playback (in
5344 * this case, RR value needs to be passed from user space).
5345 *
5346 * The caller of this function needs to take a lock on dev_priv->drrs.
5347 */
Vandana Kannan96178ee2015-01-10 02:25:56 +05305348static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305349{
5350 struct drm_i915_private *dev_priv = dev->dev_private;
5351 struct intel_encoder *encoder;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305352 struct intel_digital_port *dig_port = NULL;
5353 struct intel_dp *intel_dp = dev_priv->drrs.dp;
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02005354 struct intel_crtc_state *config = NULL;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305355 struct intel_crtc *intel_crtc = NULL;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305356 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305357
5358 if (refresh_rate <= 0) {
5359 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5360 return;
5361 }
5362
Vandana Kannan96178ee2015-01-10 02:25:56 +05305363 if (intel_dp == NULL) {
5364 DRM_DEBUG_KMS("DRRS not supported.\n");
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305365 return;
5366 }
5367
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005368 /*
Rodrigo Vivie4d59f62014-11-20 02:22:08 -08005369 * FIXME: This needs proper synchronization with psr state for some
5370 * platforms that cannot have PSR and DRRS enabled at the same time.
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005371 */
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305372
Vandana Kannan96178ee2015-01-10 02:25:56 +05305373 dig_port = dp_to_dig_port(intel_dp);
5374 encoder = &dig_port->base;
Ander Conselvan de Oliveira723f9aa2015-03-20 16:18:18 +02005375 intel_crtc = to_intel_crtc(encoder->base.crtc);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305376
5377 if (!intel_crtc) {
5378 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5379 return;
5380 }
5381
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005382 config = intel_crtc->config;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305383
Vandana Kannan96178ee2015-01-10 02:25:56 +05305384 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305385 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5386 return;
5387 }
5388
Vandana Kannan96178ee2015-01-10 02:25:56 +05305389 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5390 refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305391 index = DRRS_LOW_RR;
5392
Vandana Kannan96178ee2015-01-10 02:25:56 +05305393 if (index == dev_priv->drrs.refresh_rate_type) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305394 DRM_DEBUG_KMS(
5395 "DRRS requested for previously set RR...ignoring\n");
5396 return;
5397 }
5398
5399 if (!intel_crtc->active) {
5400 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5401 return;
5402 }
5403
Durgadoss R44395bf2015-02-13 15:33:02 +05305404 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
Vandana Kannana4c30b12015-02-13 15:33:00 +05305405 switch (index) {
5406 case DRRS_HIGH_RR:
5407 intel_dp_set_m_n(intel_crtc, M1_N1);
5408 break;
5409 case DRRS_LOW_RR:
5410 intel_dp_set_m_n(intel_crtc, M2_N2);
5411 break;
5412 case DRRS_MAX_RR:
5413 default:
5414 DRM_ERROR("Unsupported refreshrate type\n");
5415 }
5416 } else if (INTEL_INFO(dev)->gen > 6) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02005417 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
Ville Syrjälä649636e2015-09-22 19:50:01 +03005418 u32 val;
Vandana Kannana4c30b12015-02-13 15:33:00 +05305419
Ville Syrjälä649636e2015-09-22 19:50:01 +03005420 val = I915_READ(reg);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305421 if (index > DRRS_HIGH_RR) {
Wayne Boyer666a4532015-12-09 12:29:35 -08005422 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305423 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5424 else
5425 val |= PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305426 } else {
Wayne Boyer666a4532015-12-09 12:29:35 -08005427 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305428 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5429 else
5430 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305431 }
5432 I915_WRITE(reg, val);
5433 }
5434
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305435 dev_priv->drrs.refresh_rate_type = index;
5436
5437 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5438}
5439
Vandana Kannanb33a2812015-02-13 15:33:03 +05305440/**
5441 * intel_edp_drrs_enable - init drrs struct if supported
5442 * @intel_dp: DP struct
5443 *
5444 * Initializes frontbuffer_bits and drrs.dp
5445 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305446void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5447{
5448 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5449 struct drm_i915_private *dev_priv = dev->dev_private;
5450 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5451 struct drm_crtc *crtc = dig_port->base.base.crtc;
5452 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5453
5454 if (!intel_crtc->config->has_drrs) {
5455 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5456 return;
5457 }
5458
5459 mutex_lock(&dev_priv->drrs.mutex);
5460 if (WARN_ON(dev_priv->drrs.dp)) {
5461 DRM_ERROR("DRRS already enabled\n");
5462 goto unlock;
5463 }
5464
5465 dev_priv->drrs.busy_frontbuffer_bits = 0;
5466
5467 dev_priv->drrs.dp = intel_dp;
5468
5469unlock:
5470 mutex_unlock(&dev_priv->drrs.mutex);
5471}
5472
Vandana Kannanb33a2812015-02-13 15:33:03 +05305473/**
5474 * intel_edp_drrs_disable - Disable DRRS
5475 * @intel_dp: DP struct
5476 *
5477 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305478void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5479{
5480 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5481 struct drm_i915_private *dev_priv = dev->dev_private;
5482 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5483 struct drm_crtc *crtc = dig_port->base.base.crtc;
5484 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5485
5486 if (!intel_crtc->config->has_drrs)
5487 return;
5488
5489 mutex_lock(&dev_priv->drrs.mutex);
5490 if (!dev_priv->drrs.dp) {
5491 mutex_unlock(&dev_priv->drrs.mutex);
5492 return;
5493 }
5494
5495 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5496 intel_dp_set_drrs_state(dev_priv->dev,
5497 intel_dp->attached_connector->panel.
5498 fixed_mode->vrefresh);
5499
5500 dev_priv->drrs.dp = NULL;
5501 mutex_unlock(&dev_priv->drrs.mutex);
5502
5503 cancel_delayed_work_sync(&dev_priv->drrs.work);
5504}
5505
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305506static void intel_edp_drrs_downclock_work(struct work_struct *work)
5507{
5508 struct drm_i915_private *dev_priv =
5509 container_of(work, typeof(*dev_priv), drrs.work.work);
5510 struct intel_dp *intel_dp;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305511
Vandana Kannan96178ee2015-01-10 02:25:56 +05305512 mutex_lock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305513
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305514 intel_dp = dev_priv->drrs.dp;
5515
5516 if (!intel_dp)
5517 goto unlock;
5518
5519 /*
5520 * The delayed work can race with an invalidate hence we need to
5521 * recheck.
5522 */
5523
5524 if (dev_priv->drrs.busy_frontbuffer_bits)
5525 goto unlock;
5526
5527 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5528 intel_dp_set_drrs_state(dev_priv->dev,
5529 intel_dp->attached_connector->panel.
5530 downclock_mode->vrefresh);
5531
5532unlock:
Vandana Kannan96178ee2015-01-10 02:25:56 +05305533 mutex_unlock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305534}
5535
Vandana Kannanb33a2812015-02-13 15:33:03 +05305536/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305537 * intel_edp_drrs_invalidate - Disable Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305538 * @dev: DRM device
5539 * @frontbuffer_bits: frontbuffer plane tracking bits
5540 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305541 * This function gets called everytime rendering on the given planes start.
5542 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
Vandana Kannanb33a2812015-02-13 15:33:03 +05305543 *
5544 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5545 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305546void intel_edp_drrs_invalidate(struct drm_device *dev,
5547 unsigned frontbuffer_bits)
5548{
5549 struct drm_i915_private *dev_priv = dev->dev_private;
5550 struct drm_crtc *crtc;
5551 enum pipe pipe;
5552
Daniel Vetter9da7d692015-04-09 16:44:15 +02005553 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305554 return;
5555
Daniel Vetter88f933a2015-04-09 16:44:16 +02005556 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305557
Vandana Kannana93fad02015-01-10 02:25:59 +05305558 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005559 if (!dev_priv->drrs.dp) {
5560 mutex_unlock(&dev_priv->drrs.mutex);
5561 return;
5562 }
5563
Vandana Kannana93fad02015-01-10 02:25:59 +05305564 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5565 pipe = to_intel_crtc(crtc)->pipe;
5566
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005567 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5568 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5569
Ramalingam C0ddfd202015-06-15 20:50:05 +05305570 /* invalidate means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005571 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Vandana Kannana93fad02015-01-10 02:25:59 +05305572 intel_dp_set_drrs_state(dev_priv->dev,
5573 dev_priv->drrs.dp->attached_connector->panel.
5574 fixed_mode->vrefresh);
Vandana Kannana93fad02015-01-10 02:25:59 +05305575
Vandana Kannana93fad02015-01-10 02:25:59 +05305576 mutex_unlock(&dev_priv->drrs.mutex);
5577}
5578
Vandana Kannanb33a2812015-02-13 15:33:03 +05305579/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305580 * intel_edp_drrs_flush - Restart Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305581 * @dev: DRM device
5582 * @frontbuffer_bits: frontbuffer plane tracking bits
5583 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305584 * This function gets called every time rendering on the given planes has
5585 * completed or flip on a crtc is completed. So DRRS should be upclocked
5586 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5587 * if no other planes are dirty.
Vandana Kannanb33a2812015-02-13 15:33:03 +05305588 *
5589 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5590 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305591void intel_edp_drrs_flush(struct drm_device *dev,
5592 unsigned frontbuffer_bits)
5593{
5594 struct drm_i915_private *dev_priv = dev->dev_private;
5595 struct drm_crtc *crtc;
5596 enum pipe pipe;
5597
Daniel Vetter9da7d692015-04-09 16:44:15 +02005598 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305599 return;
5600
Daniel Vetter88f933a2015-04-09 16:44:16 +02005601 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305602
Vandana Kannana93fad02015-01-10 02:25:59 +05305603 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005604 if (!dev_priv->drrs.dp) {
5605 mutex_unlock(&dev_priv->drrs.mutex);
5606 return;
5607 }
5608
Vandana Kannana93fad02015-01-10 02:25:59 +05305609 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5610 pipe = to_intel_crtc(crtc)->pipe;
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005611
5612 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Vandana Kannana93fad02015-01-10 02:25:59 +05305613 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5614
Ramalingam C0ddfd202015-06-15 20:50:05 +05305615 /* flush means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005616 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Ramalingam C0ddfd202015-06-15 20:50:05 +05305617 intel_dp_set_drrs_state(dev_priv->dev,
5618 dev_priv->drrs.dp->attached_connector->panel.
5619 fixed_mode->vrefresh);
5620
5621 /*
5622 * flush also means no more activity hence schedule downclock, if all
5623 * other fbs are quiescent too
5624 */
5625 if (!dev_priv->drrs.busy_frontbuffer_bits)
Vandana Kannana93fad02015-01-10 02:25:59 +05305626 schedule_delayed_work(&dev_priv->drrs.work,
5627 msecs_to_jiffies(1000));
5628 mutex_unlock(&dev_priv->drrs.mutex);
5629}
5630
Vandana Kannanb33a2812015-02-13 15:33:03 +05305631/**
5632 * DOC: Display Refresh Rate Switching (DRRS)
5633 *
5634 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5635 * which enables swtching between low and high refresh rates,
5636 * dynamically, based on the usage scenario. This feature is applicable
5637 * for internal panels.
5638 *
5639 * Indication that the panel supports DRRS is given by the panel EDID, which
5640 * would list multiple refresh rates for one resolution.
5641 *
5642 * DRRS is of 2 types - static and seamless.
5643 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5644 * (may appear as a blink on screen) and is used in dock-undock scenario.
5645 * Seamless DRRS involves changing RR without any visual effect to the user
5646 * and can be used during normal system usage. This is done by programming
5647 * certain registers.
5648 *
5649 * Support for static/seamless DRRS may be indicated in the VBT based on
5650 * inputs from the panel spec.
5651 *
5652 * DRRS saves power by switching to low RR based on usage scenarios.
5653 *
5654 * eDP DRRS:-
5655 * The implementation is based on frontbuffer tracking implementation.
5656 * When there is a disturbance on the screen triggered by user activity or a
5657 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5658 * When there is no movement on screen, after a timeout of 1 second, a switch
5659 * to low RR is made.
5660 * For integration with frontbuffer tracking code,
5661 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5662 *
5663 * DRRS can be further extended to support other internal panels and also
5664 * the scenario of video playback wherein RR is set based on the rate
5665 * requested by userspace.
5666 */
5667
5668/**
5669 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5670 * @intel_connector: eDP connector
5671 * @fixed_mode: preferred mode of panel
5672 *
5673 * This function is called only once at driver load to initialize basic
5674 * DRRS stuff.
5675 *
5676 * Returns:
5677 * Downclock mode if panel supports it, else return NULL.
5678 * DRRS support is determined by the presence of downclock mode (apart
5679 * from VBT setting).
5680 */
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305681static struct drm_display_mode *
Vandana Kannan96178ee2015-01-10 02:25:56 +05305682intel_dp_drrs_init(struct intel_connector *intel_connector,
5683 struct drm_display_mode *fixed_mode)
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305684{
5685 struct drm_connector *connector = &intel_connector->base;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305686 struct drm_device *dev = connector->dev;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305687 struct drm_i915_private *dev_priv = dev->dev_private;
5688 struct drm_display_mode *downclock_mode = NULL;
5689
Daniel Vetter9da7d692015-04-09 16:44:15 +02005690 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5691 mutex_init(&dev_priv->drrs.mutex);
5692
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305693 if (INTEL_INFO(dev)->gen <= 6) {
5694 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5695 return NULL;
5696 }
5697
5698 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005699 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305700 return NULL;
5701 }
5702
5703 downclock_mode = intel_find_panel_downclock
5704 (dev, fixed_mode, connector);
5705
5706 if (!downclock_mode) {
Ramalingam Ca1d26342015-02-23 17:38:33 +05305707 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305708 return NULL;
5709 }
5710
Vandana Kannan96178ee2015-01-10 02:25:56 +05305711 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305712
Vandana Kannan96178ee2015-01-10 02:25:56 +05305713 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005714 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305715 return downclock_mode;
5716}
5717
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005718static bool intel_edp_init_connector(struct intel_dp *intel_dp,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005719 struct intel_connector *intel_connector)
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005720{
5721 struct drm_connector *connector = &intel_connector->base;
5722 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005723 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5724 struct drm_device *dev = intel_encoder->base.dev;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005725 struct drm_i915_private *dev_priv = dev->dev_private;
5726 struct drm_display_mode *fixed_mode = NULL;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305727 struct drm_display_mode *downclock_mode = NULL;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005728 bool has_dpcd;
5729 struct drm_display_mode *scan;
5730 struct edid *edid;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005731 enum pipe pipe = INVALID_PIPE;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005732
5733 if (!is_edp(intel_dp))
5734 return true;
5735
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02005736 pps_lock(intel_dp);
5737 intel_edp_panel_vdd_sanitize(intel_dp);
5738 pps_unlock(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005739
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005740 /* Cache DPCD and EDID for edp. */
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005741 has_dpcd = intel_dp_get_dpcd(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005742
5743 if (has_dpcd) {
5744 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5745 dev_priv->no_aux_handshake =
5746 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5747 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5748 } else {
5749 /* if this fails, presume the device is a ghost */
5750 DRM_INFO("failed to retrieve link info, disabling eDP\n");
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005751 return false;
5752 }
5753
5754 /* We now know it's not a ghost, init power sequence regs. */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005755 pps_lock(intel_dp);
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005756 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005757 pps_unlock(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005758
Daniel Vetter060c8772014-03-21 23:22:35 +01005759 mutex_lock(&dev->mode_config.mutex);
Jani Nikula0b998362014-03-14 16:51:17 +02005760 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005761 if (edid) {
5762 if (drm_add_edid_modes(connector, edid)) {
5763 drm_mode_connector_update_edid_property(connector,
5764 edid);
5765 drm_edid_to_eld(connector, edid);
5766 } else {
5767 kfree(edid);
5768 edid = ERR_PTR(-EINVAL);
5769 }
5770 } else {
5771 edid = ERR_PTR(-ENOENT);
5772 }
5773 intel_connector->edid = edid;
5774
5775 /* prefer fixed mode from EDID if available */
5776 list_for_each_entry(scan, &connector->probed_modes, head) {
5777 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5778 fixed_mode = drm_mode_duplicate(dev, scan);
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305779 downclock_mode = intel_dp_drrs_init(
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305780 intel_connector, fixed_mode);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005781 break;
5782 }
5783 }
5784
5785 /* fallback to VBT if available for eDP */
5786 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5787 fixed_mode = drm_mode_duplicate(dev,
5788 dev_priv->vbt.lfp_lvds_vbt_mode);
5789 if (fixed_mode)
5790 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5791 }
Daniel Vetter060c8772014-03-21 23:22:35 +01005792 mutex_unlock(&dev->mode_config.mutex);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005793
Wayne Boyer666a4532015-12-09 12:29:35 -08005794 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
Clint Taylor01527b32014-07-07 13:01:46 -07005795 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5796 register_reboot_notifier(&intel_dp->edp_notifier);
Ville Syrjälä6517d272014-11-07 11:16:02 +02005797
5798 /*
5799 * Figure out the current pipe for the initial backlight setup.
5800 * If the current pipe isn't valid, try the PPS pipe, and if that
5801 * fails just assume pipe A.
5802 */
5803 if (IS_CHERRYVIEW(dev))
5804 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5805 else
5806 pipe = PORT_TO_PIPE(intel_dp->DP);
5807
5808 if (pipe != PIPE_A && pipe != PIPE_B)
5809 pipe = intel_dp->pps_pipe;
5810
5811 if (pipe != PIPE_A && pipe != PIPE_B)
5812 pipe = PIPE_A;
5813
5814 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5815 pipe_name(pipe));
Clint Taylor01527b32014-07-07 13:01:46 -07005816 }
5817
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305818 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
Jani Nikula5507fae2015-09-14 14:03:48 +03005819 intel_connector->panel.backlight.power = intel_edp_backlight_power;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005820 intel_panel_setup_backlight(connector, pipe);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005821
5822 return true;
5823}
5824
Paulo Zanoni16c25532013-06-12 17:27:25 -03005825bool
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005826intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5827 struct intel_connector *intel_connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005828{
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005829 struct drm_connector *connector = &intel_connector->base;
5830 struct intel_dp *intel_dp = &intel_dig_port->dp;
5831 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5832 struct drm_device *dev = intel_encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005833 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni174edf12012-10-26 19:05:50 -02005834 enum port port = intel_dig_port->port;
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02005835 int type, ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005836
Ville Syrjäläccb1a832015-12-08 19:59:38 +02005837 if (WARN(intel_dig_port->max_lanes < 1,
5838 "Not enough lanes (%d) for DP on port %c\n",
5839 intel_dig_port->max_lanes, port_name(port)))
5840 return false;
5841
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005842 intel_dp->pps_pipe = INVALID_PIPE;
5843
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005844 /* intel_dp vfuncs */
Damien Lespiaub6b5e382014-01-20 16:00:59 +00005845 if (INTEL_INFO(dev)->gen >= 9)
5846 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
Wayne Boyer666a4532015-12-09 12:29:35 -08005847 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005848 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5849 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5850 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5851 else if (HAS_PCH_SPLIT(dev))
5852 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5853 else
5854 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5855
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +00005856 if (INTEL_INFO(dev)->gen >= 9)
5857 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5858 else
5859 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
Damien Lespiau153b1102014-01-21 13:37:15 +00005860
Ander Conselvan de Oliveiraad642172015-10-23 13:01:49 +03005861 if (HAS_DDI(dev))
5862 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5863
Daniel Vetter07679352012-09-06 22:15:42 +02005864 /* Preserve the current hw state. */
5865 intel_dp->DP = I915_READ(intel_dp->output_reg);
Jani Nikuladd06f902012-10-19 14:51:50 +03005866 intel_dp->attached_connector = intel_connector;
Chris Wilson3d3dc142011-02-12 10:33:12 +00005867
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005868 if (intel_dp_is_edp(dev, port))
Gajanan Bhat19c03922012-09-27 19:13:07 +05305869 type = DRM_MODE_CONNECTOR_eDP;
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005870 else
5871 type = DRM_MODE_CONNECTOR_DisplayPort;
Adam Jacksonb3295302010-07-16 14:46:28 -04005872
Imre Deakf7d24902013-05-08 13:14:05 +03005873 /*
5874 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5875 * for DP the encoder type can be set by the caller to
5876 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5877 */
5878 if (type == DRM_MODE_CONNECTOR_eDP)
5879 intel_encoder->type = INTEL_OUTPUT_EDP;
5880
Ville Syrjäläc17ed5b2014-10-16 21:27:27 +03005881 /* eDP only on port B and/or C on vlv/chv */
Wayne Boyer666a4532015-12-09 12:29:35 -08005882 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5883 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
Ville Syrjäläc17ed5b2014-10-16 21:27:27 +03005884 return false;
5885
Imre Deake7281ea2013-05-08 13:14:08 +03005886 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5887 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5888 port_name(port));
5889
Adam Jacksonb3295302010-07-16 14:46:28 -04005890 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005891 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5892
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005893 connector->interlace_allowed = true;
5894 connector->doublescan_allowed = 0;
Ma Lingf8aed702009-08-24 13:50:24 +08005895
Daniel Vetter66a92782012-07-12 20:08:18 +02005896 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
Daniel Vetter4be73782014-01-17 14:39:48 +01005897 edp_panel_vdd_work);
Zhenyu Wang6251ec02010-01-12 05:38:32 +08005898
Chris Wilsondf0e9242010-09-09 16:20:55 +01005899 intel_connector_attach_encoder(intel_connector, intel_encoder);
Thomas Wood34ea3d32014-05-29 16:57:41 +01005900 drm_connector_register(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005901
Paulo Zanoniaffa9352012-11-23 15:30:39 -02005902 if (HAS_DDI(dev))
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005903 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5904 else
5905 intel_connector->get_hw_state = intel_connector_get_hw_state;
Imre Deak80f65de2014-02-11 17:12:49 +02005906 intel_connector->unregister = intel_dp_connector_unregister;
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005907
Jani Nikula0b998362014-03-14 16:51:17 +02005908 /* Set up the hotplug pin. */
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005909 switch (port) {
5910 case PORT_A:
Egbert Eich1d843f92013-02-25 12:06:49 -05005911 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005912 break;
5913 case PORT_B:
Egbert Eich1d843f92013-02-25 12:06:49 -05005914 intel_encoder->hpd_pin = HPD_PORT_B;
Jani Nikulae87a0052015-10-20 15:22:02 +03005915 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
Sonika Jindalcf1d5882015-08-10 10:35:36 +05305916 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005917 break;
5918 case PORT_C:
Egbert Eich1d843f92013-02-25 12:06:49 -05005919 intel_encoder->hpd_pin = HPD_PORT_C;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005920 break;
5921 case PORT_D:
Egbert Eich1d843f92013-02-25 12:06:49 -05005922 intel_encoder->hpd_pin = HPD_PORT_D;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005923 break;
Xiong Zhang26951ca2015-08-17 15:55:50 +08005924 case PORT_E:
5925 intel_encoder->hpd_pin = HPD_PORT_E;
5926 break;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005927 default:
Damien Lespiauad1c0b12013-03-07 15:30:28 +00005928 BUG();
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08005929 }
5930
Imre Deakdada1a92014-01-29 13:25:41 +02005931 if (is_edp(intel_dp)) {
Ville Syrjälä773538e82014-09-04 14:54:56 +03005932 pps_lock(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005933 intel_dp_init_panel_power_timestamps(intel_dp);
Wayne Boyer666a4532015-12-09 12:29:35 -08005934 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005935 vlv_initial_power_sequencer_setup(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005936 else
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005937 intel_dp_init_panel_power_sequencer(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005938 pps_unlock(intel_dp);
Imre Deakdada1a92014-01-29 13:25:41 +02005939 }
Paulo Zanoni0095e6d2013-12-19 14:29:39 -02005940
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02005941 ret = intel_dp_aux_init(intel_dp, intel_connector);
5942 if (ret)
5943 goto fail;
Dave Airliec1f05262012-08-30 11:06:18 +10005944
Dave Airlie0e32b392014-05-02 14:02:48 +10005945 /* init MST on ports that can support it */
Jani Nikula0c9b3712015-05-18 17:10:01 +03005946 if (HAS_DP_MST(dev) &&
5947 (port == PORT_B || port == PORT_C || port == PORT_D))
5948 intel_dp_mst_encoder_init(intel_dig_port,
5949 intel_connector->base.base.id);
Dave Airlie0e32b392014-05-02 14:02:48 +10005950
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005951 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02005952 intel_dp_aux_fini(intel_dp);
5953 intel_dp_mst_encoder_cleanup(intel_dig_port);
5954 goto fail;
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005955 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08005956
Chris Wilsonf6849602010-09-19 09:29:33 +01005957 intel_dp_add_properties(intel_dp, connector);
5958
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005959 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5960 * 0xd. Failure to do so will result in spurious interrupts being
5961 * generated on the port when a cable is not attached.
5962 */
5963 if (IS_G4X(dev) && !IS_GM45(dev)) {
5964 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5965 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5966 }
Paulo Zanoni16c25532013-06-12 17:27:25 -03005967
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005968 i915_debugfs_connector_add(connector);
5969
Paulo Zanoni16c25532013-06-12 17:27:25 -03005970 return true;
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02005971
5972fail:
5973 if (is_edp(intel_dp)) {
5974 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5975 /*
5976 * vdd might still be enabled do to the delayed vdd off.
5977 * Make sure vdd is actually turned off here.
5978 */
5979 pps_lock(intel_dp);
5980 edp_panel_vdd_off_sync(intel_dp);
5981 pps_unlock(intel_dp);
5982 }
5983 drm_connector_unregister(connector);
5984 drm_connector_cleanup(connector);
5985
5986 return false;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005987}
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005988
5989void
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02005990intel_dp_init(struct drm_device *dev,
5991 i915_reg_t output_reg, enum port port)
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005992{
Dave Airlie13cf5502014-06-18 11:29:35 +10005993 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005994 struct intel_digital_port *intel_dig_port;
5995 struct intel_encoder *intel_encoder;
5996 struct drm_encoder *encoder;
5997 struct intel_connector *intel_connector;
5998
Daniel Vetterb14c5672013-09-19 12:18:32 +02005999 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006000 if (!intel_dig_port)
6001 return;
6002
Ander Conselvan de Oliveira08d9bc92015-04-10 10:59:10 +03006003 intel_connector = intel_connector_alloc();
Sudip Mukherjee11aee0f2015-10-08 19:27:59 +05306004 if (!intel_connector)
6005 goto err_connector_alloc;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006006
6007 intel_encoder = &intel_dig_port->base;
6008 encoder = &intel_encoder->base;
6009
Sudip Mukherjee893da0c2015-10-08 19:28:00 +05306010 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6011 DRM_MODE_ENCODER_TMDS))
6012 goto err_encoder_init;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006013
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01006014 intel_encoder->compute_config = intel_dp_compute_config;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02006015 intel_encoder->disable = intel_disable_dp;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02006016 intel_encoder->get_hw_state = intel_dp_get_hw_state;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07006017 intel_encoder->get_config = intel_dp_get_config;
Imre Deak07f9cd02014-08-18 14:42:45 +03006018 intel_encoder->suspend = intel_dp_encoder_suspend;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03006019 if (IS_CHERRYVIEW(dev)) {
Ville Syrjälä9197c882014-04-09 13:29:05 +03006020 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03006021 intel_encoder->pre_enable = chv_pre_enable_dp;
6022 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä580d3812014-04-09 13:29:00 +03006023 intel_encoder->post_disable = chv_post_disable_dp;
Ville Syrjäläd6db9952015-07-08 23:45:49 +03006024 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03006025 } else if (IS_VALLEYVIEW(dev)) {
Jani Nikulaecff4f32013-09-06 07:38:29 +03006026 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03006027 intel_encoder->pre_enable = vlv_pre_enable_dp;
6028 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä49277c32014-03-31 18:21:26 +03006029 intel_encoder->post_disable = vlv_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03006030 } else {
Jani Nikulaecff4f32013-09-06 07:38:29 +03006031 intel_encoder->pre_enable = g4x_pre_enable_dp;
6032 intel_encoder->enable = g4x_enable_dp;
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03006033 if (INTEL_INFO(dev)->gen >= 5)
6034 intel_encoder->post_disable = ilk_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03006035 }
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006036
Paulo Zanoni174edf12012-10-26 19:05:50 -02006037 intel_dig_port->port = port;
Takashi Iwai0bdf5a02015-11-30 18:19:39 +01006038 dev_priv->dig_port_map[port] = intel_encoder;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006039 intel_dig_port->dp.output_reg = output_reg;
Ville Syrjäläccb1a832015-12-08 19:59:38 +02006040 intel_dig_port->max_lanes = 4;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006041
Paulo Zanoni00c09d72012-10-26 19:05:52 -02006042 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Ville Syrjälä882ec382014-04-28 14:07:43 +03006043 if (IS_CHERRYVIEW(dev)) {
6044 if (port == PORT_D)
6045 intel_encoder->crtc_mask = 1 << 2;
6046 else
6047 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6048 } else {
6049 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6050 }
Ville Syrjäläbc079e82014-03-03 16:15:28 +02006051 intel_encoder->cloneable = 0;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006052
Dave Airlie13cf5502014-06-18 11:29:35 +10006053 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
Jani Nikula5fcece82015-05-27 15:03:42 +03006054 dev_priv->hotplug.irq_port[port] = intel_dig_port;
Dave Airlie13cf5502014-06-18 11:29:35 +10006055
Sudip Mukherjee11aee0f2015-10-08 19:27:59 +05306056 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6057 goto err_init_connector;
6058
6059 return;
6060
6061err_init_connector:
6062 drm_encoder_cleanup(encoder);
Sudip Mukherjee893da0c2015-10-08 19:28:00 +05306063err_encoder_init:
Sudip Mukherjee11aee0f2015-10-08 19:27:59 +05306064 kfree(intel_connector);
6065err_connector_alloc:
6066 kfree(intel_dig_port);
6067
6068 return;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006069}
Dave Airlie0e32b392014-05-02 14:02:48 +10006070
6071void intel_dp_mst_suspend(struct drm_device *dev)
6072{
6073 struct drm_i915_private *dev_priv = dev->dev_private;
6074 int i;
6075
6076 /* disable MST */
6077 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006078 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006079 if (!intel_dig_port)
6080 continue;
6081
6082 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6083 if (!intel_dig_port->dp.can_mst)
6084 continue;
6085 if (intel_dig_port->dp.is_mst)
6086 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6087 }
6088 }
6089}
6090
6091void intel_dp_mst_resume(struct drm_device *dev)
6092{
6093 struct drm_i915_private *dev_priv = dev->dev_private;
6094 int i;
6095
6096 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006097 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006098 if (!intel_dig_port)
6099 continue;
6100 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6101 int ret;
6102
6103 if (!intel_dig_port->dp.can_mst)
6104 continue;
6105
6106 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6107 if (ret != 0) {
6108 intel_dp_check_mst_status(&intel_dig_port->dp);
6109 }
6110 }
6111 }
6112}