blob: 798429a5d373ed07caa07691cb3857aa98ada5ad [file] [log] [blame]
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040030#include <linux/export.h>
Clint Taylor01527b32014-07-07 13:01:46 -070031#include <linux/notifier.h>
32#include <linux/reboot.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
Matt Roperc6f95f22015-01-22 16:50:32 -080034#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010035#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070038#include "intel_drv.h"
David Howells760285e2012-10-02 18:01:07 +010039#include <drm/i915_drm.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070040#include "i915_drv.h"
Keith Packarda4fc5ed2009-04-07 16:16:42 -070041
Keith Packarda4fc5ed2009-04-07 16:16:42 -070042#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
Todd Previte559be302015-05-04 07:48:20 -070044/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080050struct dp_link_dpll {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030051 int clock;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080052 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030056 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080057 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030058 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080059 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030063 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080064 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030065 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080066 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080069static const struct dp_link_dpll vlv_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030070 { 162000,
Chon Ming Lee58f6e632013-09-25 15:47:51 +080071 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030072 { 270000,
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080073 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
Chon Ming Leeef9348c2014-04-09 13:28:18 +030076/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
Ville Syrjälä840b32b2015-08-11 20:21:46 +030086 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030087 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030088 { 270000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030089 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030090 { 540000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030091 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
Sonika Jindal637a9c62015-05-07 09:52:08 +053093
Sonika Jindal64987fc2015-05-26 17:50:13 +053094static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
Sonika Jindal637a9c62015-05-07 09:52:08 +053096static const int skl_rates[] = { 162000, 216000, 270000,
Ville Syrjäläf4896f12015-03-12 17:10:27 +020097 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
Chon Ming Leeef9348c2014-04-09 13:28:18 +030099
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
Paulo Zanonida63a9f2012-10-26 19:05:46 -0200109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700112}
113
Imre Deak68b4d822013-05-08 13:14:06 +0300114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700115{
Imre Deak68b4d822013-05-08 13:14:06 +0300116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700119}
120
Chris Wilsondf0e9242010-09-09 16:20:55 +0100121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
Paulo Zanonifa90ece2012-10-26 19:05:44 -0200123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Chris Wilsondf0e9242010-09-09 16:20:55 +0100124}
125
Chris Wilsonea5b2132010-08-04 13:50:23 +0100126static void intel_dp_link_down(struct intel_dp *intel_dp);
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +0100128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
Ville Syrjälä093e3f12014-10-16 21:27:33 +0300129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700132
Ville Syrjäläe0fce782015-07-08 23:45:54 +0300133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
Ville Syrjäläed4e9c12015-03-12 17:10:36 +0200138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700140{
Jesse Barnes7183dc22011-07-07 11:10:58 -0700141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
Ville Syrjälä1db10e22015-03-12 17:10:32 +0200146 case DP_LINK_BW_5_4:
Imre Deakd4eead52013-07-09 17:05:26 +0300147 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700148 default:
Imre Deakd4eead52013-07-09 17:05:26 +0300149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
Paulo Zanonieeb63242014-05-06 14:56:50 +0300157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 struct drm_device *dev = intel_dig_port->base.base.dev;
161 u8 source_max, sink_max;
162
163 source_max = 4;
164 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 source_max = 2;
167
168 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170 return min(source_max, sink_max);
171}
172
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400173/*
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
176 *
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178 *
179 * 270000 * 1 * 8 / 10 == 216000
180 *
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
185 *
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
188 */
189
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700190static int
Keith Packardc8982612012-01-25 08:16:25 -0800191intel_dp_link_required(int pixel_clock, int bpp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700192{
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400193 return (pixel_clock * bpp + 9) / 10;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700194}
195
196static int
Dave Airliefe27d532010-06-30 11:46:17 +1000197intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198{
199 return (max_link_clock * max_lanes * 8) / 10;
200}
201
Damien Lespiauc19de8e2013-11-28 15:29:18 +0000202static enum drm_mode_status
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700203intel_dp_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
205{
Chris Wilsondf0e9242010-09-09 16:20:55 +0100206 struct intel_dp *intel_dp = intel_attached_dp(connector);
Jani Nikuladd06f902012-10-19 14:51:50 +0300207 struct intel_connector *intel_connector = to_intel_connector(connector);
208 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
Daniel Vetter36008362013-03-27 00:44:59 +0100209 int target_clock = mode->clock;
210 int max_rate, mode_rate, max_lanes, max_link_clock;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700211
Jani Nikuladd06f902012-10-19 14:51:50 +0300212 if (is_edp(intel_dp) && fixed_mode) {
213 if (mode->hdisplay > fixed_mode->hdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100214 return MODE_PANEL;
215
Jani Nikuladd06f902012-10-19 14:51:50 +0300216 if (mode->vdisplay > fixed_mode->vdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100217 return MODE_PANEL;
Daniel Vetter03afc4a2013-04-02 23:42:31 +0200218
219 target_clock = fixed_mode->clock;
Zhao Yakui7de56f42010-07-19 09:43:14 +0100220 }
221
Ville Syrjälä50fec212015-03-12 17:10:34 +0200222 max_link_clock = intel_dp_max_link_rate(intel_dp);
Paulo Zanonieeb63242014-05-06 14:56:50 +0300223 max_lanes = intel_dp_max_lane_count(intel_dp);
Daniel Vetter36008362013-03-27 00:44:59 +0100224
225 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 mode_rate = intel_dp_link_required(target_clock, 18);
227
228 if (mode_rate > max_rate)
Daniel Vetterc4867932012-04-10 10:42:36 +0200229 return MODE_CLOCK_HIGH;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700230
231 if (mode->clock < 10000)
232 return MODE_CLOCK_LOW;
233
Daniel Vetter0af78a22012-05-23 11:30:55 +0200234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 return MODE_H_ILLEGAL;
236
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700237 return MODE_OK;
238}
239
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800240uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700241{
242 int i;
243 uint32_t v = 0;
244
245 if (src_bytes > 4)
246 src_bytes = 4;
247 for (i = 0; i < src_bytes; i++)
248 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249 return v;
250}
251
Damien Lespiauc2af70e2015-02-10 19:32:23 +0000252static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700253{
254 int i;
255 if (dst_bytes > 4)
256 dst_bytes = 4;
257 for (i = 0; i < dst_bytes; i++)
258 dst[i] = src >> ((3-i) * 8);
259}
260
Jani Nikulabf13e812013-09-06 07:40:05 +0300261static void
262intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300263 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300264static void
265intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300266 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300267
Ville Syrjälä773538e82014-09-04 14:54:56 +0300268static void pps_lock(struct intel_dp *intel_dp)
269{
270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 struct intel_encoder *encoder = &intel_dig_port->base;
272 struct drm_device *dev = encoder->base.dev;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 enum intel_display_power_domain power_domain;
275
276 /*
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
279 */
Ville Syrjälä25f78f52015-11-16 15:01:04 +0100280 power_domain = intel_display_port_aux_power_domain(encoder);
Ville Syrjälä773538e82014-09-04 14:54:56 +0300281 intel_display_power_get(dev_priv, power_domain);
282
283 mutex_lock(&dev_priv->pps_mutex);
284}
285
286static void pps_unlock(struct intel_dp *intel_dp)
287{
288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 struct intel_encoder *encoder = &intel_dig_port->base;
290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 enum intel_display_power_domain power_domain;
293
294 mutex_unlock(&dev_priv->pps_mutex);
295
Ville Syrjälä25f78f52015-11-16 15:01:04 +0100296 power_domain = intel_display_port_aux_power_domain(encoder);
Ville Syrjälä773538e82014-09-04 14:54:56 +0300297 intel_display_power_put(dev_priv, power_domain);
298}
299
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300300static void
301vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302{
303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 struct drm_device *dev = intel_dig_port->base.base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum pipe pipe = intel_dp->pps_pipe;
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300310 uint32_t DP;
311
312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe), port_name(intel_dig_port->port)))
315 return;
316
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe), port_name(intel_dig_port->port));
319
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
322 */
323 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 DP |= DP_PORT_WIDTH(1);
326 DP |= DP_LINK_TRAIN_PAT_1;
327
328 if (IS_CHERRYVIEW(dev))
329 DP |= DP_PIPE_SELECT_CHV(pipe);
330 else if (pipe == PIPE_B)
331 DP |= DP_PIPEB_SELECT;
332
Ville Syrjäläd288f652014-10-28 13:20:22 +0200333 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335 /*
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
338 */
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300339 if (!pll_enabled) {
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
Ville Syrjäläd288f652014-10-28 13:20:22 +0200343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300345 }
Ville Syrjäläd288f652014-10-28 13:20:22 +0200346
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200361
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300362 if (!pll_enabled) {
Ville Syrjäläd288f652014-10-28 13:20:22 +0200363 vlv_force_pll_off(dev, pipe);
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300368}
369
Jani Nikulabf13e812013-09-06 07:40:05 +0300370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300378 enum pipe pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300379
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300380 lockdep_assert_held(&dev_priv->pps_mutex);
381
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300387
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393 base.head) {
394 struct intel_dp *tmp;
395
396 if (encoder->type != INTEL_OUTPUT_EDP)
397 continue;
398
399 tmp = enc_to_intel_dp(&encoder->base);
400
401 if (tmp->pps_pipe != INVALID_PIPE)
402 pipes &= ~(1 << tmp->pps_pipe);
403 }
404
405 /*
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
408 */
409 if (WARN_ON(pipes == 0))
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300410 pipe = PIPE_A;
411 else
412 pipe = ffs(pipes) - 1;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300413
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300414 vlv_steal_power_sequencer(dev, pipe);
415 intel_dp->pps_pipe = pipe;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300416
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp->pps_pipe),
419 port_name(intel_dig_port->port));
420
421 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300422 intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300424
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300425 /*
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
428 */
429 vlv_power_sequencer_kick(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300430
431 return intel_dp->pps_pipe;
432}
433
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300434typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 enum pipe pipe);
436
437static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439{
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441}
442
443static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445{
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447}
448
449static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450 enum pipe pipe)
451{
452 return true;
453}
454
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300455static enum pipe
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300456vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 enum port port,
458 vlv_pipe_check pipe_check)
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300459{
Jani Nikulabf13e812013-09-06 07:40:05 +0300460 enum pipe pipe;
461
Jani Nikulabf13e812013-09-06 07:40:05 +0300462 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 PANEL_PORT_SELECT_MASK;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300465
466 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 continue;
468
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300469 if (!pipe_check(dev_priv, pipe))
470 continue;
471
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300472 return pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300473 }
474
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300475 return INVALID_PIPE;
476}
477
478static void
479vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480{
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300484 enum port port = intel_dig_port->port;
485
486 lockdep_assert_held(&dev_priv->pps_mutex);
487
488 /* try to find a pipe with this port selected */
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300489 /* first pick one where the panel is on */
490 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 vlv_pipe_has_pp_on);
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp->pps_pipe == INVALID_PIPE)
494 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 vlv_pipe_has_vdd_on);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp->pps_pipe == INVALID_PIPE)
498 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 vlv_pipe_any);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300500
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp->pps_pipe == INVALID_PIPE) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504 port_name(port));
505 return;
506 }
507
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port), pipe_name(intel_dp->pps_pipe));
510
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300511 intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300513}
514
Ville Syrjälä773538e82014-09-04 14:54:56 +0300515void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516{
517 struct drm_device *dev = dev_priv->dev;
518 struct intel_encoder *encoder;
519
Wayne Boyer666a4532015-12-09 12:29:35 -0800520 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
Ville Syrjälä773538e82014-09-04 14:54:56 +0300521 return;
522
523 /*
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
531 */
532
533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 struct intel_dp *intel_dp;
535
536 if (encoder->type != INTEL_OUTPUT_EDP)
537 continue;
538
539 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE;
541 }
Jani Nikulabf13e812013-09-06 07:40:05 +0300542}
543
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200544static i915_reg_t
545_pp_ctrl_reg(struct intel_dp *intel_dp)
Jani Nikulabf13e812013-09-06 07:40:05 +0300546{
547 struct drm_device *dev = intel_dp_to_dev(intel_dp);
548
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530549 if (IS_BROXTON(dev))
550 return BXT_PP_CONTROL(0);
551 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300552 return PCH_PP_CONTROL;
553 else
554 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
555}
556
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200557static i915_reg_t
558_pp_stat_reg(struct intel_dp *intel_dp)
Jani Nikulabf13e812013-09-06 07:40:05 +0300559{
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530562 if (IS_BROXTON(dev))
563 return BXT_PP_STATUS(0);
564 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300565 return PCH_PP_STATUS;
566 else
567 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
568}
569
Clint Taylor01527b32014-07-07 13:01:46 -0700570/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
571 This function only applicable when panel PM state is not to be tracked */
572static int edp_notify_handler(struct notifier_block *this, unsigned long code,
573 void *unused)
574{
575 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
576 edp_notifier);
577 struct drm_device *dev = intel_dp_to_dev(intel_dp);
578 struct drm_i915_private *dev_priv = dev->dev_private;
Clint Taylor01527b32014-07-07 13:01:46 -0700579
580 if (!is_edp(intel_dp) || code != SYS_RESTART)
581 return 0;
582
Ville Syrjälä773538e82014-09-04 14:54:56 +0300583 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300584
Wayne Boyer666a4532015-12-09 12:29:35 -0800585 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300586 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200587 i915_reg_t pp_ctrl_reg, pp_div_reg;
Ville Syrjälä649636e2015-09-22 19:50:01 +0300588 u32 pp_div;
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300589
Clint Taylor01527b32014-07-07 13:01:46 -0700590 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
591 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
592 pp_div = I915_READ(pp_div_reg);
593 pp_div &= PP_REFERENCE_DIVIDER_MASK;
594
595 /* 0x1F write to PP_DIV_REG sets max cycle delay */
596 I915_WRITE(pp_div_reg, pp_div | 0x1F);
597 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
598 msleep(intel_dp->panel_power_cycle_delay);
599 }
600
Ville Syrjälä773538e82014-09-04 14:54:56 +0300601 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300602
Clint Taylor01527b32014-07-07 13:01:46 -0700603 return 0;
604}
605
Daniel Vetter4be73782014-01-17 14:39:48 +0100606static bool edp_have_panel_power(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700607{
Paulo Zanoni30add222012-10-26 19:05:45 -0200608 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700609 struct drm_i915_private *dev_priv = dev->dev_private;
610
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300611 lockdep_assert_held(&dev_priv->pps_mutex);
612
Wayne Boyer666a4532015-12-09 12:29:35 -0800613 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
Ville Syrjälä9a423562014-10-16 21:29:48 +0300614 intel_dp->pps_pipe == INVALID_PIPE)
615 return false;
616
Jani Nikulabf13e812013-09-06 07:40:05 +0300617 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
Keith Packardebf33b12011-09-29 15:53:27 -0700618}
619
Daniel Vetter4be73782014-01-17 14:39:48 +0100620static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700621{
Paulo Zanoni30add222012-10-26 19:05:45 -0200622 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700623 struct drm_i915_private *dev_priv = dev->dev_private;
624
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300625 lockdep_assert_held(&dev_priv->pps_mutex);
626
Wayne Boyer666a4532015-12-09 12:29:35 -0800627 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
Ville Syrjälä9a423562014-10-16 21:29:48 +0300628 intel_dp->pps_pipe == INVALID_PIPE)
629 return false;
630
Ville Syrjälä773538e82014-09-04 14:54:56 +0300631 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -0700632}
633
Keith Packard9b984da2011-09-19 13:54:47 -0700634static void
635intel_dp_check_edp(struct intel_dp *intel_dp)
636{
Paulo Zanoni30add222012-10-26 19:05:45 -0200637 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard9b984da2011-09-19 13:54:47 -0700638 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packardebf33b12011-09-29 15:53:27 -0700639
Keith Packard9b984da2011-09-19 13:54:47 -0700640 if (!is_edp(intel_dp))
641 return;
Jesse Barnes453c5422013-03-28 09:55:41 -0700642
Daniel Vetter4be73782014-01-17 14:39:48 +0100643 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
Keith Packard9b984da2011-09-19 13:54:47 -0700644 WARN(1, "eDP powered off while attempting aux channel communication.\n");
645 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
Jani Nikulabf13e812013-09-06 07:40:05 +0300646 I915_READ(_pp_stat_reg(intel_dp)),
647 I915_READ(_pp_ctrl_reg(intel_dp)));
Keith Packard9b984da2011-09-19 13:54:47 -0700648 }
649}
650
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100651static uint32_t
652intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
653{
654 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
655 struct drm_device *dev = intel_dig_port->base.base.dev;
656 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200657 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100658 uint32_t status;
659 bool done;
660
Daniel Vetteref04f002012-12-01 21:03:59 +0100661#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100662 if (has_aux_irq)
Paulo Zanonib18ac462013-02-18 19:00:24 -0300663 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
Imre Deak35987062013-05-21 20:03:20 +0300664 msecs_to_jiffies_timeout(10));
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100665 else
666 done = wait_for_atomic(C, 10) == 0;
667 if (!done)
668 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
669 has_aux_irq);
670#undef C
671
672 return status;
673}
674
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000675static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
676{
677 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
678 struct drm_device *dev = intel_dig_port->base.base.dev;
679
680 /*
681 * The clock divider is based off the hrawclk, and would like to run at
682 * 2MHz. So, take the hrawclk value and divide by 2 and use that
683 */
Ville Syrjäläfce18c42015-11-30 16:23:46 +0200684 return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000685}
686
687static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
688{
689 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
690 struct drm_device *dev = intel_dig_port->base.base.dev;
Ville Syrjälä469d4b22015-03-31 14:11:59 +0300691 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000692
693 if (index)
694 return 0;
695
696 if (intel_dig_port->port == PORT_A) {
Ville Syrjäläfce18c42015-11-30 16:23:46 +0200697 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
Ville Syrjälä05024da2015-06-03 15:45:08 +0300698
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000699 } else {
Ville Syrjäläfce18c42015-11-30 16:23:46 +0200700 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000701 }
702}
703
704static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300705{
706 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707 struct drm_device *dev = intel_dig_port->base.base.dev;
708 struct drm_i915_private *dev_priv = dev->dev_private;
709
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000710 if (intel_dig_port->port == PORT_A) {
Chris Wilsonbc866252013-07-21 16:00:03 +0100711 if (index)
712 return 0;
Ville Syrjälä05024da2015-06-03 15:45:08 +0300713 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
Ville Syrjälä56f5f702015-11-30 16:23:44 +0200714 } else if (HAS_PCH_LPT_H(dev_priv)) {
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300715 /* Workaround for non-ULT HSW */
Chris Wilsonbc866252013-07-21 16:00:03 +0100716 switch (index) {
717 case 0: return 63;
718 case 1: return 72;
719 default: return 0;
720 }
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000721 } else {
Ville Syrjäläfce18c42015-11-30 16:23:46 +0200722 return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300723 }
724}
725
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000726static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
727{
728 return index ? 0 : 100;
729}
730
Damien Lespiaub6b5e382014-01-20 16:00:59 +0000731static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
732{
733 /*
734 * SKL doesn't need us to program the AUX clock divider (Hardware will
735 * derive the clock from CDCLK automatically). We still implement the
736 * get_aux_clock_divider vfunc to plug-in into the existing code.
737 */
738 return index ? 0 : 1;
739}
740
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000741static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
742 bool has_aux_irq,
743 int send_bytes,
744 uint32_t aux_clock_divider)
745{
746 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
747 struct drm_device *dev = intel_dig_port->base.base.dev;
748 uint32_t precharge, timeout;
749
750 if (IS_GEN6(dev))
751 precharge = 3;
752 else
753 precharge = 5;
754
Ville Syrjäläf3c6a3a2015-11-11 20:34:10 +0200755 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000756 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
757 else
758 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
759
760 return DP_AUX_CH_CTL_SEND_BUSY |
Damien Lespiau788d4432014-01-20 15:52:31 +0000761 DP_AUX_CH_CTL_DONE |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000762 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000763 DP_AUX_CH_CTL_TIME_OUT_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000764 timeout |
Damien Lespiau788d4432014-01-20 15:52:31 +0000765 DP_AUX_CH_CTL_RECEIVE_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000766 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
767 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000768 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000769}
770
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +0000771static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
772 bool has_aux_irq,
773 int send_bytes,
774 uint32_t unused)
775{
776 return DP_AUX_CH_CTL_SEND_BUSY |
777 DP_AUX_CH_CTL_DONE |
778 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
779 DP_AUX_CH_CTL_TIME_OUT_ERROR |
780 DP_AUX_CH_CTL_TIME_OUT_1600us |
781 DP_AUX_CH_CTL_RECEIVE_ERROR |
782 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
783 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
784}
785
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700786static int
Chris Wilsonea5b2132010-08-04 13:50:23 +0100787intel_dp_aux_ch(struct intel_dp *intel_dp,
Daniel Vetterbd9f74a2014-10-02 09:45:35 +0200788 const uint8_t *send, int send_bytes,
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700789 uint8_t *recv, int recv_size)
790{
Paulo Zanoni174edf12012-10-26 19:05:50 -0200791 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
792 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700793 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200794 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Chris Wilsonbc866252013-07-21 16:00:03 +0100795 uint32_t aux_clock_divider;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100796 int i, ret, recv_bytes;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700797 uint32_t status;
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000798 int try, clock = 0;
Daniel Vetter4e6b7882014-02-07 16:33:20 +0100799 bool has_aux_irq = HAS_AUX_IRQ(dev);
Jani Nikula884f19e2014-03-14 16:51:14 +0200800 bool vdd;
801
Ville Syrjälä773538e82014-09-04 14:54:56 +0300802 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300803
Ville Syrjälä72c35002014-08-18 22:16:00 +0300804 /*
805 * We will be called with VDD already enabled for dpcd/edid/oui reads.
806 * In such cases we want to leave VDD enabled and it's up to upper layers
807 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
808 * ourselves.
809 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300810 vdd = edp_panel_vdd_on(intel_dp);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100811
812 /* dp aux is extremely sensitive to irq latency, hence request the
813 * lowest possible wakeup latency and so prevent the cpu from going into
814 * deep sleep states.
815 */
816 pm_qos_update_request(&dev_priv->pm_qos, 0);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700817
Keith Packard9b984da2011-09-19 13:54:47 -0700818 intel_dp_check_edp(intel_dp);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +0800819
Jesse Barnes11bee432011-08-01 15:02:20 -0700820 /* Try to wait for any previous AUX channel activity */
821 for (try = 0; try < 3; try++) {
Daniel Vetteref04f002012-12-01 21:03:59 +0100822 status = I915_READ_NOTRACE(ch_ctl);
Jesse Barnes11bee432011-08-01 15:02:20 -0700823 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
824 break;
825 msleep(1);
826 }
827
828 if (try == 3) {
Mika Kuoppala02196c72015-08-06 16:48:58 +0300829 static u32 last_status = -1;
830 const u32 status = I915_READ(ch_ctl);
831
832 if (status != last_status) {
833 WARN(1, "dp_aux_ch not started status 0x%08x\n",
834 status);
835 last_status = status;
836 }
837
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100838 ret = -EBUSY;
839 goto out;
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100840 }
841
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300842 /* Only 5 data registers! */
843 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
844 ret = -E2BIG;
845 goto out;
846 }
847
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000848 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
Damien Lespiau153b1102014-01-21 13:37:15 +0000849 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
850 has_aux_irq,
851 send_bytes,
852 aux_clock_divider);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000853
Chris Wilsonbc866252013-07-21 16:00:03 +0100854 /* Must try at least 3 times according to DP spec */
855 for (try = 0; try < 5; try++) {
856 /* Load the send data into the aux channel data registers */
857 for (i = 0; i < send_bytes; i += 4)
Ville Syrjälä330e20e2015-11-11 20:34:14 +0200858 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800859 intel_dp_pack_aux(send + i,
860 send_bytes - i));
Akshay Joshi0206e352011-08-16 15:34:10 -0400861
Chris Wilsonbc866252013-07-21 16:00:03 +0100862 /* Send the command and wait for it to complete */
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000863 I915_WRITE(ch_ctl, send_ctl);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100864
Chris Wilsonbc866252013-07-21 16:00:03 +0100865 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
Akshay Joshi0206e352011-08-16 15:34:10 -0400866
Chris Wilsonbc866252013-07-21 16:00:03 +0100867 /* Clear done status and any errors */
868 I915_WRITE(ch_ctl,
869 status |
870 DP_AUX_CH_CTL_DONE |
871 DP_AUX_CH_CTL_TIME_OUT_ERROR |
872 DP_AUX_CH_CTL_RECEIVE_ERROR);
Adam Jacksond7e96fe2011-07-26 15:39:46 -0400873
Todd Previte74ebf292015-04-15 08:38:41 -0700874 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
Chris Wilsonbc866252013-07-21 16:00:03 +0100875 continue;
Todd Previte74ebf292015-04-15 08:38:41 -0700876
877 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
878 * 400us delay required for errors and timeouts
879 * Timeout errors from the HW already meet this
880 * requirement so skip to next iteration
881 */
882 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
883 usleep_range(400, 500);
884 continue;
885 }
Chris Wilsonbc866252013-07-21 16:00:03 +0100886 if (status & DP_AUX_CH_CTL_DONE)
Jim Bridee058c942015-05-27 10:21:48 -0700887 goto done;
Chris Wilsonbc866252013-07-21 16:00:03 +0100888 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700889 }
890
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700891 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700892 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100893 ret = -EBUSY;
894 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700895 }
896
Jim Bridee058c942015-05-27 10:21:48 -0700897done:
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700898 /* Check for timeout or receive error.
899 * Timeouts occur when the sink is not connected
900 */
Keith Packarda5b3da52009-06-11 22:30:32 -0700901 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700902 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100903 ret = -EIO;
904 goto out;
Keith Packarda5b3da52009-06-11 22:30:32 -0700905 }
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700906
907 /* Timeouts occur when the device isn't connected, so they're
908 * "normal" -- don't fill the kernel log with these */
Keith Packarda5b3da52009-06-11 22:30:32 -0700909 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
Zhao Yakui28c97732009-10-09 11:39:41 +0800910 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100911 ret = -ETIMEDOUT;
912 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700913 }
914
915 /* Unload any bytes sent back from the other side */
916 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
917 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
Rodrigo Vivi14e01882015-12-10 11:12:27 -0800918
919 /*
920 * By BSpec: "Message sizes of 0 or >20 are not allowed."
921 * We have no idea of what happened so we return -EBUSY so
922 * drm layer takes care for the necessary retries.
923 */
924 if (recv_bytes == 0 || recv_bytes > 20) {
925 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
926 recv_bytes);
927 /*
928 * FIXME: This patch was created on top of a series that
929 * organize the retries at drm level. There EBUSY should
930 * also take care for 1ms wait before retrying.
931 * That aux retries re-org is still needed and after that is
932 * merged we remove this sleep from here.
933 */
934 usleep_range(1000, 1500);
935 ret = -EBUSY;
936 goto out;
937 }
938
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700939 if (recv_bytes > recv_size)
940 recv_bytes = recv_size;
Akshay Joshi0206e352011-08-16 15:34:10 -0400941
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100942 for (i = 0; i < recv_bytes; i += 4)
Ville Syrjälä330e20e2015-11-11 20:34:14 +0200943 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800944 recv + i, recv_bytes - i);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700945
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100946 ret = recv_bytes;
947out:
948 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
949
Jani Nikula884f19e2014-03-14 16:51:14 +0200950 if (vdd)
951 edp_panel_vdd_off(intel_dp, false);
952
Ville Syrjälä773538e82014-09-04 14:54:56 +0300953 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300954
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100955 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700956}
957
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300958#define BARE_ADDRESS_SIZE 3
959#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
Jani Nikula9d1a1032014-03-14 16:51:15 +0200960static ssize_t
961intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700962{
Jani Nikula9d1a1032014-03-14 16:51:15 +0200963 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
964 uint8_t txbuf[20], rxbuf[20];
965 size_t txsize, rxsize;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700966 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700967
Ville Syrjäläd2d9cbb2015-03-19 11:44:06 +0200968 txbuf[0] = (msg->request << 4) |
969 ((msg->address >> 16) & 0xf);
970 txbuf[1] = (msg->address >> 8) & 0xff;
Jani Nikula9d1a1032014-03-14 16:51:15 +0200971 txbuf[2] = msg->address & 0xff;
972 txbuf[3] = msg->size - 1;
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300973
Jani Nikula9d1a1032014-03-14 16:51:15 +0200974 switch (msg->request & ~DP_AUX_I2C_MOT) {
975 case DP_AUX_NATIVE_WRITE:
976 case DP_AUX_I2C_WRITE:
Ville Syrjäläc1e741222015-08-27 17:23:27 +0300977 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300978 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200979 rxsize = 2; /* 0 or 1 data bytes */
Jani Nikulaf51a44b2014-02-11 11:52:05 +0200980
Jani Nikula9d1a1032014-03-14 16:51:15 +0200981 if (WARN_ON(txsize > 20))
982 return -E2BIG;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700983
Jani Nikula9d1a1032014-03-14 16:51:15 +0200984 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700985
Jani Nikula9d1a1032014-03-14 16:51:15 +0200986 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
987 if (ret > 0) {
988 msg->reply = rxbuf[0] >> 4;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700989
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200990 if (ret > 1) {
991 /* Number of bytes written in a short write. */
992 ret = clamp_t(int, rxbuf[1], 0, msg->size);
993 } else {
994 /* Return payload size. */
995 ret = msg->size;
996 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700997 }
Jani Nikula9d1a1032014-03-14 16:51:15 +0200998 break;
999
1000 case DP_AUX_NATIVE_READ:
1001 case DP_AUX_I2C_READ:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +03001002 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
Jani Nikula9d1a1032014-03-14 16:51:15 +02001003 rxsize = msg->size + 1;
1004
1005 if (WARN_ON(rxsize > 20))
1006 return -E2BIG;
1007
1008 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1009 if (ret > 0) {
1010 msg->reply = rxbuf[0] >> 4;
1011 /*
1012 * Assume happy day, and copy the data. The caller is
1013 * expected to check msg->reply before touching it.
1014 *
1015 * Return payload size.
1016 */
1017 ret--;
1018 memcpy(msg->buffer, rxbuf + 1, ret);
1019 }
1020 break;
1021
1022 default:
1023 ret = -EINVAL;
1024 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001025 }
Jani Nikulaf51a44b2014-02-11 11:52:05 +02001026
Jani Nikula9d1a1032014-03-14 16:51:15 +02001027 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001028}
1029
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001030static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1031 enum port port)
Ville Syrjäläda00bdc2015-11-11 20:34:13 +02001032{
1033 switch (port) {
1034 case PORT_B:
1035 case PORT_C:
1036 case PORT_D:
1037 return DP_AUX_CH_CTL(port);
1038 default:
1039 MISSING_CASE(port);
1040 return DP_AUX_CH_CTL(PORT_B);
1041 }
1042}
1043
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001044static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1045 enum port port, int index)
Ville Syrjälä330e20e2015-11-11 20:34:14 +02001046{
1047 switch (port) {
1048 case PORT_B:
1049 case PORT_C:
1050 case PORT_D:
1051 return DP_AUX_CH_DATA(port, index);
1052 default:
1053 MISSING_CASE(port);
1054 return DP_AUX_CH_DATA(PORT_B, index);
1055 }
1056}
1057
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001058static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1059 enum port port)
Ville Syrjäläda00bdc2015-11-11 20:34:13 +02001060{
1061 switch (port) {
1062 case PORT_A:
1063 return DP_AUX_CH_CTL(port);
1064 case PORT_B:
1065 case PORT_C:
1066 case PORT_D:
1067 return PCH_DP_AUX_CH_CTL(port);
1068 default:
1069 MISSING_CASE(port);
1070 return DP_AUX_CH_CTL(PORT_A);
1071 }
1072}
1073
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001074static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1075 enum port port, int index)
Ville Syrjälä330e20e2015-11-11 20:34:14 +02001076{
1077 switch (port) {
1078 case PORT_A:
1079 return DP_AUX_CH_DATA(port, index);
1080 case PORT_B:
1081 case PORT_C:
1082 case PORT_D:
1083 return PCH_DP_AUX_CH_DATA(port, index);
1084 default:
1085 MISSING_CASE(port);
1086 return DP_AUX_CH_DATA(PORT_A, index);
1087 }
1088}
1089
Ville Syrjäläda00bdc2015-11-11 20:34:13 +02001090/*
1091 * On SKL we don't have Aux for port E so we rely
1092 * on VBT to set a proper alternate aux channel.
1093 */
1094static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1095{
1096 const struct ddi_vbt_port_info *info =
1097 &dev_priv->vbt.ddi_port_info[PORT_E];
1098
1099 switch (info->alternate_aux_channel) {
1100 case DP_AUX_A:
1101 return PORT_A;
1102 case DP_AUX_B:
1103 return PORT_B;
1104 case DP_AUX_C:
1105 return PORT_C;
1106 case DP_AUX_D:
1107 return PORT_D;
1108 default:
1109 MISSING_CASE(info->alternate_aux_channel);
1110 return PORT_A;
1111 }
1112}
1113
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001114static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1115 enum port port)
Ville Syrjäläda00bdc2015-11-11 20:34:13 +02001116{
1117 if (port == PORT_E)
1118 port = skl_porte_aux_port(dev_priv);
1119
1120 switch (port) {
1121 case PORT_A:
1122 case PORT_B:
1123 case PORT_C:
1124 case PORT_D:
1125 return DP_AUX_CH_CTL(port);
1126 default:
1127 MISSING_CASE(port);
1128 return DP_AUX_CH_CTL(PORT_A);
1129 }
1130}
1131
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001132static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1133 enum port port, int index)
Ville Syrjälä330e20e2015-11-11 20:34:14 +02001134{
1135 if (port == PORT_E)
1136 port = skl_porte_aux_port(dev_priv);
1137
1138 switch (port) {
1139 case PORT_A:
1140 case PORT_B:
1141 case PORT_C:
1142 case PORT_D:
1143 return DP_AUX_CH_DATA(port, index);
1144 default:
1145 MISSING_CASE(port);
1146 return DP_AUX_CH_DATA(PORT_A, index);
1147 }
1148}
1149
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001150static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1151 enum port port)
Ville Syrjälä330e20e2015-11-11 20:34:14 +02001152{
1153 if (INTEL_INFO(dev_priv)->gen >= 9)
1154 return skl_aux_ctl_reg(dev_priv, port);
1155 else if (HAS_PCH_SPLIT(dev_priv))
1156 return ilk_aux_ctl_reg(dev_priv, port);
1157 else
1158 return g4x_aux_ctl_reg(dev_priv, port);
1159}
1160
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001161static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1162 enum port port, int index)
Ville Syrjälä330e20e2015-11-11 20:34:14 +02001163{
1164 if (INTEL_INFO(dev_priv)->gen >= 9)
1165 return skl_aux_data_reg(dev_priv, port, index);
1166 else if (HAS_PCH_SPLIT(dev_priv))
1167 return ilk_aux_data_reg(dev_priv, port, index);
1168 else
1169 return g4x_aux_data_reg(dev_priv, port, index);
1170}
1171
1172static void intel_aux_reg_init(struct intel_dp *intel_dp)
1173{
1174 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1175 enum port port = dp_to_dig_port(intel_dp)->port;
1176 int i;
1177
1178 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1179 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1180 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1181}
1182
Jani Nikula9d1a1032014-03-14 16:51:15 +02001183static void
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001184intel_dp_aux_fini(struct intel_dp *intel_dp)
1185{
1186 drm_dp_aux_unregister(&intel_dp->aux);
1187 kfree(intel_dp->aux.name);
1188}
1189
1190static int
Jani Nikula9d1a1032014-03-14 16:51:15 +02001191intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001192{
Jani Nikula9d1a1032014-03-14 16:51:15 +02001193 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jani Nikula33ad6622014-03-14 16:51:16 +02001194 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1195 enum port port = intel_dig_port->port;
Dave Airlieab2c0672009-12-04 10:55:24 +10001196 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001197
Ville Syrjälä330e20e2015-11-11 20:34:14 +02001198 intel_aux_reg_init(intel_dp);
David Flynn8316f332010-12-08 16:10:21 +00001199
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001200 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1201 if (!intel_dp->aux.name)
1202 return -ENOMEM;
1203
Jani Nikula9d1a1032014-03-14 16:51:15 +02001204 intel_dp->aux.dev = dev->dev;
1205 intel_dp->aux.transfer = intel_dp_aux_transfer;
David Flynn8316f332010-12-08 16:10:21 +00001206
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001207 DRM_DEBUG_KMS("registering %s bus for %s\n",
1208 intel_dp->aux.name,
Jani Nikula0b998362014-03-14 16:51:17 +02001209 connector->base.kdev->kobj.name);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001210
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001211 ret = drm_dp_aux_register(&intel_dp->aux);
Jani Nikula0b998362014-03-14 16:51:17 +02001212 if (ret < 0) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001213 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001214 intel_dp->aux.name, ret);
1215 kfree(intel_dp->aux.name);
1216 return ret;
Dave Airlieab2c0672009-12-04 10:55:24 +10001217 }
David Flynn8316f332010-12-08 16:10:21 +00001218
Jani Nikula0b998362014-03-14 16:51:17 +02001219 ret = sysfs_create_link(&connector->base.kdev->kobj,
1220 &intel_dp->aux.ddc.dev.kobj,
1221 intel_dp->aux.ddc.dev.kobj.name);
1222 if (ret < 0) {
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001223 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1224 intel_dp->aux.name, ret);
1225 intel_dp_aux_fini(intel_dp);
1226 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001227 }
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001228
1229 return 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001230}
1231
Imre Deak80f65de2014-02-11 17:12:49 +02001232static void
1233intel_dp_connector_unregister(struct intel_connector *intel_connector)
1234{
1235 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1236
Dave Airlie0e32b392014-05-02 14:02:48 +10001237 if (!intel_connector->mst_port)
1238 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1239 intel_dp->aux.ddc.dev.kobj.name);
Imre Deak80f65de2014-02-11 17:12:49 +02001240 intel_connector_unregister(intel_connector);
1241}
1242
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001243static void
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001244skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
Damien Lespiau5416d872014-11-14 17:24:33 +00001245{
1246 u32 ctrl1;
1247
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03001248 memset(&pipe_config->dpll_hw_state, 0,
1249 sizeof(pipe_config->dpll_hw_state));
1250
Damien Lespiau5416d872014-11-14 17:24:33 +00001251 pipe_config->ddi_pll_sel = SKL_DPLL0;
1252 pipe_config->dpll_hw_state.cfgcr1 = 0;
1253 pipe_config->dpll_hw_state.cfgcr2 = 0;
1254
1255 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001256 switch (pipe_config->port_clock / 2) {
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301257 case 81000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001258 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
Damien Lespiau5416d872014-11-14 17:24:33 +00001259 SKL_DPLL0);
1260 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301261 case 135000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001262 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
Damien Lespiau5416d872014-11-14 17:24:33 +00001263 SKL_DPLL0);
1264 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301265 case 270000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001266 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
Damien Lespiau5416d872014-11-14 17:24:33 +00001267 SKL_DPLL0);
1268 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301269 case 162000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001270 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301271 SKL_DPLL0);
1272 break;
1273 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1274 results in CDCLK change. Need to handle the change of CDCLK by
1275 disabling pipes and re-enabling them */
1276 case 108000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001277 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301278 SKL_DPLL0);
1279 break;
1280 case 216000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001281 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301282 SKL_DPLL0);
1283 break;
1284
Damien Lespiau5416d872014-11-14 17:24:33 +00001285 }
1286 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1287}
1288
Ander Conselvan de Oliveira6fa2d192015-08-31 11:23:28 +03001289void
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001290hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
Daniel Vetter0e503382014-07-04 11:26:04 -03001291{
Ander Conselvan de Oliveiraee46f3c72015-06-30 16:10:38 +03001292 memset(&pipe_config->dpll_hw_state, 0,
1293 sizeof(pipe_config->dpll_hw_state));
1294
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001295 switch (pipe_config->port_clock / 2) {
1296 case 81000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001297 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1298 break;
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001299 case 135000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001300 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1301 break;
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001302 case 270000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001303 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1304 break;
1305 }
1306}
1307
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301308static int
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001309intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301310{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001311 if (intel_dp->num_sink_rates) {
1312 *sink_rates = intel_dp->sink_rates;
1313 return intel_dp->num_sink_rates;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301314 }
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001315
1316 *sink_rates = default_rates;
1317
1318 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301319}
1320
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001321bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301322{
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001323 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1324 struct drm_device *dev = dig_port->base.base.dev;
1325
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301326 /* WaDisableHBR2:skl */
Jani Nikulae87a0052015-10-20 15:22:02 +03001327 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301328 return false;
1329
1330 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1331 (INTEL_INFO(dev)->gen >= 9))
1332 return true;
1333 else
1334 return false;
1335}
1336
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301337static int
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001338intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301339{
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001340 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1341 struct drm_device *dev = dig_port->base.base.dev;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301342 int size;
1343
Sonika Jindal64987fc2015-05-26 17:50:13 +05301344 if (IS_BROXTON(dev)) {
1345 *source_rates = bxt_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301346 size = ARRAY_SIZE(bxt_rates);
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001347 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
Sonika Jindal637a9c62015-05-07 09:52:08 +05301348 *source_rates = skl_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301349 size = ARRAY_SIZE(skl_rates);
1350 } else {
1351 *source_rates = default_rates;
1352 size = ARRAY_SIZE(default_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301353 }
Ville Syrjälä636280b2015-03-12 17:10:29 +02001354
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301355 /* This depends on the fact that 5.4 is last value in the array */
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001356 if (!intel_dp_source_supports_hbr2(intel_dp))
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301357 size--;
Ville Syrjälä636280b2015-03-12 17:10:29 +02001358
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301359 return size;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301360}
1361
Daniel Vetter0e503382014-07-04 11:26:04 -03001362static void
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001363intel_dp_set_clock(struct intel_encoder *encoder,
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001364 struct intel_crtc_state *pipe_config)
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001365{
1366 struct drm_device *dev = encoder->base.dev;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001367 const struct dp_link_dpll *divisor = NULL;
1368 int i, count = 0;
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001369
1370 if (IS_G4X(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001371 divisor = gen4_dpll;
1372 count = ARRAY_SIZE(gen4_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001373 } else if (HAS_PCH_SPLIT(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001374 divisor = pch_dpll;
1375 count = ARRAY_SIZE(pch_dpll);
Chon Ming Leeef9348c2014-04-09 13:28:18 +03001376 } else if (IS_CHERRYVIEW(dev)) {
1377 divisor = chv_dpll;
1378 count = ARRAY_SIZE(chv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001379 } else if (IS_VALLEYVIEW(dev)) {
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +08001380 divisor = vlv_dpll;
1381 count = ARRAY_SIZE(vlv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001382 }
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001383
1384 if (divisor && count) {
1385 for (i = 0; i < count; i++) {
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001386 if (pipe_config->port_clock == divisor[i].clock) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001387 pipe_config->dpll = divisor[i].dpll;
1388 pipe_config->clock_set = true;
1389 break;
1390 }
1391 }
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001392 }
1393}
1394
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001395static int intersect_rates(const int *source_rates, int source_len,
1396 const int *sink_rates, int sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001397 int *common_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301398{
1399 int i = 0, j = 0, k = 0;
1400
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301401 while (i < source_len && j < sink_len) {
1402 if (source_rates[i] == sink_rates[j]) {
Ville Syrjäläe6bda3e2015-03-12 17:10:37 +02001403 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1404 return k;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001405 common_rates[k] = source_rates[i];
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301406 ++k;
1407 ++i;
1408 ++j;
1409 } else if (source_rates[i] < sink_rates[j]) {
1410 ++i;
1411 } else {
1412 ++j;
1413 }
1414 }
1415 return k;
1416}
1417
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001418static int intel_dp_common_rates(struct intel_dp *intel_dp,
1419 int *common_rates)
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001420{
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001421 const int *source_rates, *sink_rates;
1422 int source_len, sink_len;
1423
1424 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001425 source_len = intel_dp_source_rates(intel_dp, &source_rates);
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001426
1427 return intersect_rates(source_rates, source_len,
1428 sink_rates, sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001429 common_rates);
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001430}
1431
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001432static void snprintf_int_array(char *str, size_t len,
1433 const int *array, int nelem)
1434{
1435 int i;
1436
1437 str[0] = '\0';
1438
1439 for (i = 0; i < nelem; i++) {
Jani Nikulab2f505b2015-05-18 16:01:45 +03001440 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001441 if (r >= len)
1442 return;
1443 str += r;
1444 len -= r;
1445 }
1446}
1447
1448static void intel_dp_print_rates(struct intel_dp *intel_dp)
1449{
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001450 const int *source_rates, *sink_rates;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001451 int source_len, sink_len, common_len;
1452 int common_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001453 char str[128]; /* FIXME: too big for stack? */
1454
1455 if ((drm_debug & DRM_UT_KMS) == 0)
1456 return;
1457
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001458 source_len = intel_dp_source_rates(intel_dp, &source_rates);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001459 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1460 DRM_DEBUG_KMS("source rates: %s\n", str);
1461
1462 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1463 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1464 DRM_DEBUG_KMS("sink rates: %s\n", str);
1465
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001466 common_len = intel_dp_common_rates(intel_dp, common_rates);
1467 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1468 DRM_DEBUG_KMS("common rates: %s\n", str);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001469}
1470
Ville Syrjäläf4896f12015-03-12 17:10:27 +02001471static int rate_to_index(int find, const int *rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301472{
1473 int i = 0;
1474
1475 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1476 if (find == rates[i])
1477 break;
1478
1479 return i;
1480}
1481
Ville Syrjälä50fec212015-03-12 17:10:34 +02001482int
1483intel_dp_max_link_rate(struct intel_dp *intel_dp)
1484{
1485 int rates[DP_MAX_SUPPORTED_RATES] = {};
1486 int len;
1487
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001488 len = intel_dp_common_rates(intel_dp, rates);
Ville Syrjälä50fec212015-03-12 17:10:34 +02001489 if (WARN_ON(len <= 0))
1490 return 162000;
1491
1492 return rates[rate_to_index(0, rates) - 1];
1493}
1494
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001495int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1496{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001497 return rate_to_index(rate, intel_dp->sink_rates);
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001498}
1499
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03001500void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1501 uint8_t *link_bw, uint8_t *rate_select)
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001502{
1503 if (intel_dp->num_sink_rates) {
1504 *link_bw = 0;
1505 *rate_select =
1506 intel_dp_rate_select(intel_dp, port_clock);
1507 } else {
1508 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1509 *rate_select = 0;
1510 }
1511}
1512
Paulo Zanoni00c09d72012-10-26 19:05:52 -02001513bool
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001514intel_dp_compute_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02001515 struct intel_crtc_state *pipe_config)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001516{
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001517 struct drm_device *dev = encoder->base.dev;
Daniel Vetter36008362013-03-27 00:44:59 +01001518 struct drm_i915_private *dev_priv = dev->dev_private;
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02001519 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001520 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001521 enum port port = dp_to_dig_port(intel_dp)->port;
Ander Conselvan de Oliveira84556d52015-03-20 16:18:10 +02001522 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
Jani Nikuladd06f902012-10-19 14:51:50 +03001523 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001524 int lane_count, clock;
Jani Nikula56071a22014-05-06 14:56:52 +03001525 int min_lane_count = 1;
Paulo Zanonieeb63242014-05-06 14:56:50 +03001526 int max_lane_count = intel_dp_max_lane_count(intel_dp);
Todd Previte06ea66b2014-01-20 10:19:39 -07001527 /* Conveniently, the link BW constants become indices with a shift...*/
Jani Nikula56071a22014-05-06 14:56:52 +03001528 int min_clock = 0;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301529 int max_clock;
Daniel Vetter083f9562012-04-20 20:23:49 +02001530 int bpp, mode_rate;
Daniel Vetterff9a6752013-06-01 17:16:21 +02001531 int link_avail, link_clock;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001532 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1533 int common_len;
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001534 uint8_t link_bw, rate_select;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301535
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001536 common_len = intel_dp_common_rates(intel_dp, common_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301537
1538 /* No common link rates between source and sink */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001539 WARN_ON(common_len <= 0);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301540
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001541 max_clock = common_len - 1;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001542
Imre Deakbc7d38a2013-05-16 14:40:36 +03001543 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001544 pipe_config->has_pch_encoder = true;
1545
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001546 pipe_config->has_dp_encoder = true;
Vandana Kannanf769cd22014-08-05 07:51:22 -07001547 pipe_config->has_drrs = false;
Jani Nikula9fcb1702015-05-05 16:32:12 +03001548 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001549
Jani Nikuladd06f902012-10-19 14:51:50 +03001550 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1551 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1552 adjusted_mode);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001553
1554 if (INTEL_INFO(dev)->gen >= 9) {
1555 int ret;
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02001556 ret = skl_update_scaler_crtc(pipe_config);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001557 if (ret)
1558 return ret;
1559 }
1560
Matt Roperb56676272015-11-04 09:05:27 -08001561 if (HAS_GMCH_DISPLAY(dev))
Jesse Barnes2dd24552013-04-25 12:55:01 -07001562 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1563 intel_connector->panel.fitting_mode);
1564 else
Jesse Barnesb074cec2013-04-25 12:55:02 -07001565 intel_pch_panel_fitting(intel_crtc, pipe_config,
1566 intel_connector->panel.fitting_mode);
Zhao Yakui0d3a1be2010-07-19 09:43:13 +01001567 }
1568
Daniel Vettercb1793c2012-06-04 18:39:21 +02001569 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
Daniel Vetter0af78a22012-05-23 11:30:55 +02001570 return false;
1571
Daniel Vetter083f9562012-04-20 20:23:49 +02001572 DRM_DEBUG_KMS("DP link computation with max lane count %i "
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301573 "max bw %d pixel clock %iKHz\n",
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001574 max_lane_count, common_rates[max_clock],
Damien Lespiau241bfc32013-09-25 16:45:37 +01001575 adjusted_mode->crtc_clock);
Daniel Vetter083f9562012-04-20 20:23:49 +02001576
Daniel Vetter36008362013-03-27 00:44:59 +01001577 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1578 * bpc in between. */
Daniel Vetter3e7ca982013-06-01 19:45:56 +02001579 bpp = pipe_config->pipe_bpp;
Jani Nikula56071a22014-05-06 14:56:52 +03001580 if (is_edp(intel_dp)) {
Thulasimani,Sivakumar22ce5622015-07-31 11:05:27 +05301581
1582 /* Get bpp from vbt only for panels that dont have bpp in edid */
1583 if (intel_connector->base.display_info.bpc == 0 &&
1584 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
Jani Nikula56071a22014-05-06 14:56:52 +03001585 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1586 dev_priv->vbt.edp_bpp);
1587 bpp = dev_priv->vbt.edp_bpp;
1588 }
1589
Jani Nikula344c5bb2014-09-09 11:25:13 +03001590 /*
1591 * Use the maximum clock and number of lanes the eDP panel
1592 * advertizes being capable of. The panels are generally
1593 * designed to support only a single clock and lane
1594 * configuration, and typically these values correspond to the
1595 * native resolution of the panel.
1596 */
1597 min_lane_count = max_lane_count;
1598 min_clock = max_clock;
Imre Deak79842112013-07-18 17:44:13 +03001599 }
Daniel Vetter657445f2013-05-04 10:09:18 +02001600
Daniel Vetter36008362013-03-27 00:44:59 +01001601 for (; bpp >= 6*3; bpp -= 2*3) {
Damien Lespiau241bfc32013-09-25 16:45:37 +01001602 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1603 bpp);
Daniel Vetterc4867932012-04-10 10:42:36 +02001604
Dave Airliec6930992014-07-14 11:04:39 +10001605 for (clock = min_clock; clock <= max_clock; clock++) {
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301606 for (lane_count = min_lane_count;
1607 lane_count <= max_lane_count;
1608 lane_count <<= 1) {
1609
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001610 link_clock = common_rates[clock];
Daniel Vetter36008362013-03-27 00:44:59 +01001611 link_avail = intel_dp_max_data_rate(link_clock,
1612 lane_count);
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001613
Daniel Vetter36008362013-03-27 00:44:59 +01001614 if (mode_rate <= link_avail) {
1615 goto found;
1616 }
1617 }
1618 }
1619 }
1620
1621 return false;
1622
1623found:
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001624 if (intel_dp->color_range_auto) {
1625 /*
1626 * See:
1627 * CEA-861-E - 5.1 Default Encoding Parameters
1628 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1629 */
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001630 pipe_config->limited_color_range =
1631 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1632 } else {
1633 pipe_config->limited_color_range =
1634 intel_dp->limited_color_range;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001635 }
1636
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03001637 pipe_config->lane_count = lane_count;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301638
Daniel Vetter657445f2013-05-04 10:09:18 +02001639 pipe_config->pipe_bpp = bpp;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001640 pipe_config->port_clock = common_rates[clock];
Daniel Vetterc4867932012-04-10 10:42:36 +02001641
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001642 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1643 &link_bw, &rate_select);
1644
1645 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1646 link_bw, rate_select, pipe_config->lane_count,
Daniel Vetterff9a6752013-06-01 17:16:21 +02001647 pipe_config->port_clock, bpp);
Daniel Vetter36008362013-03-27 00:44:59 +01001648 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1649 mode_rate, link_avail);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001650
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001651 intel_link_compute_m_n(bpp, lane_count,
Damien Lespiau241bfc32013-09-25 16:45:37 +01001652 adjusted_mode->crtc_clock,
1653 pipe_config->port_clock,
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001654 &pipe_config->dp_m_n);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001655
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301656 if (intel_connector->panel.downclock_mode != NULL &&
Vandana Kannan96178ee2015-01-10 02:25:56 +05301657 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
Vandana Kannanf769cd22014-08-05 07:51:22 -07001658 pipe_config->has_drrs = true;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301659 intel_link_compute_m_n(bpp, lane_count,
1660 intel_connector->panel.downclock_mode->clock,
1661 pipe_config->port_clock,
1662 &pipe_config->dp_m2_n2);
1663 }
1664
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001665 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001666 skl_edp_set_pll_config(pipe_config);
Satheeshakrishna M977bb382014-08-22 09:49:12 +05301667 else if (IS_BROXTON(dev))
1668 /* handled in ddi */;
Damien Lespiau5416d872014-11-14 17:24:33 +00001669 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001670 hsw_dp_set_ddi_pll_sel(pipe_config);
Daniel Vetter0e503382014-07-04 11:26:04 -03001671 else
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001672 intel_dp_set_clock(encoder, pipe_config);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001673
Daniel Vetter36008362013-03-27 00:44:59 +01001674 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001675}
1676
Ville Syrjälä901c2da2015-08-17 18:05:12 +03001677void intel_dp_set_link_params(struct intel_dp *intel_dp,
1678 const struct intel_crtc_state *pipe_config)
1679{
1680 intel_dp->link_rate = pipe_config->port_clock;
1681 intel_dp->lane_count = pipe_config->lane_count;
1682}
1683
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02001684static void intel_dp_prepare(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001685{
Daniel Vetterb934223d2013-07-21 21:37:05 +02001686 struct drm_device *dev = encoder->base.dev;
Keith Packard417e8222011-11-01 19:54:11 -07001687 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001688 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001689 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001690 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03001691 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001692
Ville Syrjälä901c2da2015-08-17 18:05:12 +03001693 intel_dp_set_link_params(intel_dp, crtc->config);
1694
Keith Packard417e8222011-11-01 19:54:11 -07001695 /*
Keith Packard1a2eb462011-11-16 16:26:07 -08001696 * There are four kinds of DP registers:
Keith Packard417e8222011-11-01 19:54:11 -07001697 *
1698 * IBX PCH
Keith Packard1a2eb462011-11-16 16:26:07 -08001699 * SNB CPU
1700 * IVB CPU
Keith Packard417e8222011-11-01 19:54:11 -07001701 * CPT PCH
1702 *
1703 * IBX PCH and CPU are the same for almost everything,
1704 * except that the CPU DP PLL is configured in this
1705 * register
1706 *
1707 * CPT PCH is quite different, having many bits moved
1708 * to the TRANS_DP_CTL register instead. That
1709 * configuration happens (oddly) in ironlake_pch_enable
1710 */
Adam Jackson9c9e7922010-04-05 17:57:59 -04001711
Keith Packard417e8222011-11-01 19:54:11 -07001712 /* Preserve the BIOS-computed detected bit. This is
1713 * supposed to be read-only.
1714 */
1715 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001716
Keith Packard417e8222011-11-01 19:54:11 -07001717 /* Handle DP bits in common between all three register formats */
Keith Packard417e8222011-11-01 19:54:11 -07001718 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03001719 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001720
Keith Packard417e8222011-11-01 19:54:11 -07001721 /* Split out the IBX/CPU vs CPT settings */
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001722
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001723 if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08001724 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1725 intel_dp->DP |= DP_SYNC_HS_HIGH;
1726 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1727 intel_dp->DP |= DP_SYNC_VS_HIGH;
1728 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1729
Jani Nikula6aba5b62013-10-04 15:08:10 +03001730 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard1a2eb462011-11-16 16:26:07 -08001731 intel_dp->DP |= DP_ENHANCED_FRAMING;
1732
Daniel Vetter7c62a162013-06-01 17:16:20 +02001733 intel_dp->DP |= crtc->pipe << 29;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001734 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001735 u32 trans_dp;
1736
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001737 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001738
1739 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1740 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1741 trans_dp |= TRANS_DP_ENH_FRAMING;
1742 else
1743 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1744 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001745 } else {
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001746 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
Wayne Boyer666a4532015-12-09 12:29:35 -08001747 !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001748 intel_dp->DP |= DP_COLOR_RANGE_16_235;
Keith Packard417e8222011-11-01 19:54:11 -07001749
1750 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1751 intel_dp->DP |= DP_SYNC_HS_HIGH;
1752 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1753 intel_dp->DP |= DP_SYNC_VS_HIGH;
1754 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1755
Jani Nikula6aba5b62013-10-04 15:08:10 +03001756 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard417e8222011-11-01 19:54:11 -07001757 intel_dp->DP |= DP_ENHANCED_FRAMING;
1758
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001759 if (IS_CHERRYVIEW(dev))
Chon Ming Lee44f37d12014-04-09 13:28:21 +03001760 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001761 else if (crtc->pipe == PIPE_B)
1762 intel_dp->DP |= DP_PIPEB_SELECT;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001763 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001764}
1765
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001766#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1767#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001768
Paulo Zanoni1a5ef5b2013-12-19 14:29:43 -02001769#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1770#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
Keith Packard99ea7122011-11-01 19:57:50 -07001771
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001772#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1773#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001774
Daniel Vetter4be73782014-01-17 14:39:48 +01001775static void wait_panel_status(struct intel_dp *intel_dp,
Keith Packard99ea7122011-11-01 19:57:50 -07001776 u32 mask,
1777 u32 value)
1778{
Paulo Zanoni30add222012-10-26 19:05:45 -02001779 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001780 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001781 i915_reg_t pp_stat_reg, pp_ctrl_reg;
Jesse Barnes453c5422013-03-28 09:55:41 -07001782
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001783 lockdep_assert_held(&dev_priv->pps_mutex);
1784
Jani Nikulabf13e812013-09-06 07:40:05 +03001785 pp_stat_reg = _pp_stat_reg(intel_dp);
1786 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001787
1788 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001789 mask, value,
1790 I915_READ(pp_stat_reg),
1791 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001792
Jesse Barnes453c5422013-03-28 09:55:41 -07001793 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
Keith Packard99ea7122011-11-01 19:57:50 -07001794 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001795 I915_READ(pp_stat_reg),
1796 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001797 }
Chris Wilson54c136d2013-12-02 09:57:16 +00001798
1799 DRM_DEBUG_KMS("Wait complete\n");
Keith Packard99ea7122011-11-01 19:57:50 -07001800}
1801
Daniel Vetter4be73782014-01-17 14:39:48 +01001802static void wait_panel_on(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001803{
1804 DRM_DEBUG_KMS("Wait for panel power on\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001805 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001806}
1807
Daniel Vetter4be73782014-01-17 14:39:48 +01001808static void wait_panel_off(struct intel_dp *intel_dp)
Keith Packardbd943152011-09-18 23:09:52 -07001809{
Keith Packardbd943152011-09-18 23:09:52 -07001810 DRM_DEBUG_KMS("Wait for panel power off time\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001811 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
Keith Packardbd943152011-09-18 23:09:52 -07001812}
Keith Packardbd943152011-09-18 23:09:52 -07001813
Daniel Vetter4be73782014-01-17 14:39:48 +01001814static void wait_panel_power_cycle(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001815{
1816 DRM_DEBUG_KMS("Wait for panel power cycle\n");
Paulo Zanonidce56b32013-12-19 14:29:40 -02001817
1818 /* When we disable the VDD override bit last we have to do the manual
1819 * wait. */
1820 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1821 intel_dp->panel_power_cycle_delay);
1822
Daniel Vetter4be73782014-01-17 14:39:48 +01001823 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001824}
Keith Packardbd943152011-09-18 23:09:52 -07001825
Daniel Vetter4be73782014-01-17 14:39:48 +01001826static void wait_backlight_on(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001827{
1828 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1829 intel_dp->backlight_on_delay);
1830}
1831
Daniel Vetter4be73782014-01-17 14:39:48 +01001832static void edp_wait_backlight_off(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001833{
1834 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1835 intel_dp->backlight_off_delay);
1836}
Keith Packard99ea7122011-11-01 19:57:50 -07001837
Keith Packard832dd3c2011-11-01 19:34:06 -07001838/* Read the current pp_control value, unlocking the register if it
1839 * is locked
1840 */
1841
Jesse Barnes453c5422013-03-28 09:55:41 -07001842static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
Keith Packard832dd3c2011-11-01 19:34:06 -07001843{
Jesse Barnes453c5422013-03-28 09:55:41 -07001844 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1845 struct drm_i915_private *dev_priv = dev->dev_private;
1846 u32 control;
Jesse Barnes453c5422013-03-28 09:55:41 -07001847
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001848 lockdep_assert_held(&dev_priv->pps_mutex);
1849
Jani Nikulabf13e812013-09-06 07:40:05 +03001850 control = I915_READ(_pp_ctrl_reg(intel_dp));
Vandana Kannanb0a08be2015-06-18 11:00:55 +05301851 if (!IS_BROXTON(dev)) {
1852 control &= ~PANEL_UNLOCK_MASK;
1853 control |= PANEL_UNLOCK_REGS;
1854 }
Keith Packard832dd3c2011-11-01 19:34:06 -07001855 return control;
Keith Packardbd943152011-09-18 23:09:52 -07001856}
1857
Ville Syrjälä951468f2014-09-04 14:55:31 +03001858/*
1859 * Must be paired with edp_panel_vdd_off().
1860 * Must hold pps_mutex around the whole on/off sequence.
1861 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1862 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +03001863static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001864{
Paulo Zanoni30add222012-10-26 19:05:45 -02001865 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001866 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1867 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Jesse Barnes5d613502011-01-24 17:10:54 -08001868 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02001869 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001870 u32 pp;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001871 i915_reg_t pp_stat_reg, pp_ctrl_reg;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001872 bool need_to_disable = !intel_dp->want_panel_vdd;
Jesse Barnes5d613502011-01-24 17:10:54 -08001873
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001874 lockdep_assert_held(&dev_priv->pps_mutex);
1875
Keith Packard97af61f572011-09-28 16:23:51 -07001876 if (!is_edp(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001877 return false;
Keith Packardbd943152011-09-18 23:09:52 -07001878
Egbert Eich2c623c12014-11-25 12:54:57 +01001879 cancel_delayed_work(&intel_dp->panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001880 intel_dp->want_panel_vdd = true;
Keith Packard99ea7122011-11-01 19:57:50 -07001881
Daniel Vetter4be73782014-01-17 14:39:48 +01001882 if (edp_have_panel_vdd(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001883 return need_to_disable;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001884
Ville Syrjälä25f78f52015-11-16 15:01:04 +01001885 power_domain = intel_display_port_aux_power_domain(intel_encoder);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001886 intel_display_power_get(dev_priv, power_domain);
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001887
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001888 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1889 port_name(intel_dig_port->port));
Keith Packardbd943152011-09-18 23:09:52 -07001890
Daniel Vetter4be73782014-01-17 14:39:48 +01001891 if (!edp_have_panel_power(intel_dp))
1892 wait_panel_power_cycle(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001893
Jesse Barnes453c5422013-03-28 09:55:41 -07001894 pp = ironlake_get_pp_control(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001895 pp |= EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -07001896
Jani Nikulabf13e812013-09-06 07:40:05 +03001897 pp_stat_reg = _pp_stat_reg(intel_dp);
1898 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001899
1900 I915_WRITE(pp_ctrl_reg, pp);
1901 POSTING_READ(pp_ctrl_reg);
1902 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1903 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Keith Packardebf33b12011-09-29 15:53:27 -07001904 /*
1905 * If the panel wasn't on, delay before accessing aux channel
1906 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001907 if (!edp_have_panel_power(intel_dp)) {
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001908 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1909 port_name(intel_dig_port->port));
Keith Packardf01eca22011-09-28 16:48:10 -07001910 msleep(intel_dp->panel_power_up_delay);
Keith Packardf01eca22011-09-28 16:48:10 -07001911 }
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001912
1913 return need_to_disable;
1914}
1915
Ville Syrjälä951468f2014-09-04 14:55:31 +03001916/*
1917 * Must be paired with intel_edp_panel_vdd_off() or
1918 * intel_edp_panel_off().
1919 * Nested calls to these functions are not allowed since
1920 * we drop the lock. Caller must use some higher level
1921 * locking to prevent nested calls from other threads.
1922 */
Daniel Vetterb80d6c72014-03-19 15:54:37 +01001923void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001924{
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001925 bool vdd;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001926
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001927 if (!is_edp(intel_dp))
1928 return;
1929
Ville Syrjälä773538e82014-09-04 14:54:56 +03001930 pps_lock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001931 vdd = edp_panel_vdd_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001932 pps_unlock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001933
Rob Clarke2c719b2014-12-15 13:56:32 -05001934 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001935 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes5d613502011-01-24 17:10:54 -08001936}
1937
Daniel Vetter4be73782014-01-17 14:39:48 +01001938static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001939{
Paulo Zanoni30add222012-10-26 19:05:45 -02001940 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001941 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001942 struct intel_digital_port *intel_dig_port =
1943 dp_to_dig_port(intel_dp);
1944 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1945 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001946 u32 pp;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001947 i915_reg_t pp_stat_reg, pp_ctrl_reg;
Jesse Barnes5d613502011-01-24 17:10:54 -08001948
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001949 lockdep_assert_held(&dev_priv->pps_mutex);
Daniel Vettera0e99e62012-12-02 01:05:46 +01001950
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001951 WARN_ON(intel_dp->want_panel_vdd);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001952
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001953 if (!edp_have_panel_vdd(intel_dp))
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001954 return;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001955
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001956 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1957 port_name(intel_dig_port->port));
Jesse Barnes453c5422013-03-28 09:55:41 -07001958
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001959 pp = ironlake_get_pp_control(intel_dp);
1960 pp &= ~EDP_FORCE_VDD;
Jesse Barnes453c5422013-03-28 09:55:41 -07001961
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001962 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1963 pp_stat_reg = _pp_stat_reg(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001964
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001965 I915_WRITE(pp_ctrl_reg, pp);
1966 POSTING_READ(pp_ctrl_reg);
Paulo Zanoni90791a52013-12-06 17:32:42 -02001967
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001968 /* Make sure sequencer is idle before allowing subsequent activity */
1969 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1970 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001971
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001972 if ((pp & POWER_TARGET_ON) == 0)
1973 intel_dp->last_power_cycle = jiffies;
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001974
Ville Syrjälä25f78f52015-11-16 15:01:04 +01001975 power_domain = intel_display_port_aux_power_domain(intel_encoder);
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001976 intel_display_power_put(dev_priv, power_domain);
Keith Packardbd943152011-09-18 23:09:52 -07001977}
1978
Daniel Vetter4be73782014-01-17 14:39:48 +01001979static void edp_panel_vdd_work(struct work_struct *__work)
Keith Packardbd943152011-09-18 23:09:52 -07001980{
1981 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1982 struct intel_dp, panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001983
Ville Syrjälä773538e82014-09-04 14:54:56 +03001984 pps_lock(intel_dp);
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001985 if (!intel_dp->want_panel_vdd)
1986 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001987 pps_unlock(intel_dp);
Keith Packardbd943152011-09-18 23:09:52 -07001988}
1989
Imre Deakaba86892014-07-30 15:57:31 +03001990static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1991{
1992 unsigned long delay;
1993
1994 /*
1995 * Queue the timer to fire a long time from now (relative to the power
1996 * down delay) to keep the panel power up across a sequence of
1997 * operations.
1998 */
1999 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2000 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2001}
2002
Ville Syrjälä951468f2014-09-04 14:55:31 +03002003/*
2004 * Must be paired with edp_panel_vdd_on().
2005 * Must hold pps_mutex around the whole on/off sequence.
2006 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2007 */
Daniel Vetter4be73782014-01-17 14:39:48 +01002008static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
Keith Packardbd943152011-09-18 23:09:52 -07002009{
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002010 struct drm_i915_private *dev_priv =
2011 intel_dp_to_dev(intel_dp)->dev_private;
2012
2013 lockdep_assert_held(&dev_priv->pps_mutex);
2014
Keith Packard97af61f572011-09-28 16:23:51 -07002015 if (!is_edp(intel_dp))
2016 return;
Jesse Barnes5d613502011-01-24 17:10:54 -08002017
Rob Clarke2c719b2014-12-15 13:56:32 -05002018 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002019 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packardf2e8b182011-11-01 20:01:35 -07002020
Keith Packardbd943152011-09-18 23:09:52 -07002021 intel_dp->want_panel_vdd = false;
2022
Imre Deakaba86892014-07-30 15:57:31 +03002023 if (sync)
Daniel Vetter4be73782014-01-17 14:39:48 +01002024 edp_panel_vdd_off_sync(intel_dp);
Imre Deakaba86892014-07-30 15:57:31 +03002025 else
2026 edp_panel_vdd_schedule_off(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08002027}
2028
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002029static void edp_panel_on(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07002030{
Paulo Zanoni30add222012-10-26 19:05:45 -02002031 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002032 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packard99ea7122011-11-01 19:57:50 -07002033 u32 pp;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02002034 i915_reg_t pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07002035
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002036 lockdep_assert_held(&dev_priv->pps_mutex);
2037
Keith Packard97af61f572011-09-28 16:23:51 -07002038 if (!is_edp(intel_dp))
Keith Packardbd943152011-09-18 23:09:52 -07002039 return;
Keith Packard99ea7122011-11-01 19:57:50 -07002040
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002041 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2042 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packard99ea7122011-11-01 19:57:50 -07002043
Ville Syrjäläe7a89ac2014-10-16 21:30:07 +03002044 if (WARN(edp_have_panel_power(intel_dp),
2045 "eDP port %c panel power already on\n",
2046 port_name(dp_to_dig_port(intel_dp)->port)))
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002047 return;
Jesse Barnes9934c132010-07-22 13:18:19 -07002048
Daniel Vetter4be73782014-01-17 14:39:48 +01002049 wait_panel_power_cycle(intel_dp);
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002050
Jani Nikulabf13e812013-09-06 07:40:05 +03002051 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002052 pp = ironlake_get_pp_control(intel_dp);
Keith Packard05ce1a42011-09-29 16:33:01 -07002053 if (IS_GEN5(dev)) {
2054 /* ILK workaround: disable reset around power sequence */
2055 pp &= ~PANEL_POWER_RESET;
Jani Nikulabf13e812013-09-06 07:40:05 +03002056 I915_WRITE(pp_ctrl_reg, pp);
2057 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07002058 }
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002059
Keith Packard1c0ae802011-09-19 13:59:29 -07002060 pp |= POWER_TARGET_ON;
Keith Packard99ea7122011-11-01 19:57:50 -07002061 if (!IS_GEN5(dev))
2062 pp |= PANEL_POWER_RESET;
2063
Jesse Barnes453c5422013-03-28 09:55:41 -07002064 I915_WRITE(pp_ctrl_reg, pp);
2065 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07002066
Daniel Vetter4be73782014-01-17 14:39:48 +01002067 wait_panel_on(intel_dp);
Paulo Zanonidce56b32013-12-19 14:29:40 -02002068 intel_dp->last_power_on = jiffies;
Jesse Barnes9934c132010-07-22 13:18:19 -07002069
Keith Packard05ce1a42011-09-29 16:33:01 -07002070 if (IS_GEN5(dev)) {
2071 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
Jani Nikulabf13e812013-09-06 07:40:05 +03002072 I915_WRITE(pp_ctrl_reg, pp);
2073 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07002074 }
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002075}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002076
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002077void intel_edp_panel_on(struct intel_dp *intel_dp)
2078{
2079 if (!is_edp(intel_dp))
2080 return;
2081
2082 pps_lock(intel_dp);
2083 edp_panel_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002084 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002085}
2086
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002087
2088static void edp_panel_off(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07002089{
Imre Deak4e6e1a52014-03-27 17:45:11 +02002090 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2091 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanoni30add222012-10-26 19:05:45 -02002092 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002093 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02002094 enum intel_display_power_domain power_domain;
Keith Packard99ea7122011-11-01 19:57:50 -07002095 u32 pp;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02002096 i915_reg_t pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07002097
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002098 lockdep_assert_held(&dev_priv->pps_mutex);
2099
Keith Packard97af61f572011-09-28 16:23:51 -07002100 if (!is_edp(intel_dp))
2101 return;
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002102
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002103 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2104 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002105
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002106 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2107 port_name(dp_to_dig_port(intel_dp)->port));
Jani Nikula24f3e092014-03-17 16:43:36 +02002108
Jesse Barnes453c5422013-03-28 09:55:41 -07002109 pp = ironlake_get_pp_control(intel_dp);
Daniel Vetter35a38552012-08-12 22:17:14 +02002110 /* We need to switch off panel power _and_ force vdd, for otherwise some
2111 * panels get very unhappy and cease to work. */
Patrik Jakobssonb3064152014-03-04 00:42:44 +01002112 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2113 EDP_BLC_ENABLE);
Jesse Barnes453c5422013-03-28 09:55:41 -07002114
Jani Nikulabf13e812013-09-06 07:40:05 +03002115 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002116
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002117 intel_dp->want_panel_vdd = false;
2118
Jesse Barnes453c5422013-03-28 09:55:41 -07002119 I915_WRITE(pp_ctrl_reg, pp);
2120 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07002121
Paulo Zanonidce56b32013-12-19 14:29:40 -02002122 intel_dp->last_power_cycle = jiffies;
Daniel Vetter4be73782014-01-17 14:39:48 +01002123 wait_panel_off(intel_dp);
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002124
2125 /* We got a reference when we enabled the VDD. */
Ville Syrjälä25f78f52015-11-16 15:01:04 +01002126 power_domain = intel_display_port_aux_power_domain(intel_encoder);
Imre Deak4e6e1a52014-03-27 17:45:11 +02002127 intel_display_power_put(dev_priv, power_domain);
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002128}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002129
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002130void intel_edp_panel_off(struct intel_dp *intel_dp)
2131{
2132 if (!is_edp(intel_dp))
2133 return;
2134
2135 pps_lock(intel_dp);
2136 edp_panel_off(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002137 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002138}
2139
Jani Nikula1250d102014-08-12 17:11:39 +03002140/* Enable backlight in the panel power control. */
2141static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002142{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002143 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2144 struct drm_device *dev = intel_dig_port->base.base.dev;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002145 struct drm_i915_private *dev_priv = dev->dev_private;
2146 u32 pp;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02002147 i915_reg_t pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002148
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07002149 /*
2150 * If we enable the backlight right away following a panel power
2151 * on, we may see slight flicker as the panel syncs with the eDP
2152 * link. So delay a bit to make sure the image is solid before
2153 * allowing it to appear.
2154 */
Daniel Vetter4be73782014-01-17 14:39:48 +01002155 wait_backlight_on(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002156
Ville Syrjälä773538e82014-09-04 14:54:56 +03002157 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002158
Jesse Barnes453c5422013-03-28 09:55:41 -07002159 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002160 pp |= EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002161
Jani Nikulabf13e812013-09-06 07:40:05 +03002162 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002163
2164 I915_WRITE(pp_ctrl_reg, pp);
2165 POSTING_READ(pp_ctrl_reg);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002166
Ville Syrjälä773538e82014-09-04 14:54:56 +03002167 pps_unlock(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002168}
2169
Jani Nikula1250d102014-08-12 17:11:39 +03002170/* Enable backlight PWM and backlight PP control. */
2171void intel_edp_backlight_on(struct intel_dp *intel_dp)
2172{
2173 if (!is_edp(intel_dp))
2174 return;
2175
2176 DRM_DEBUG_KMS("\n");
2177
2178 intel_panel_enable_backlight(intel_dp->attached_connector);
2179 _intel_edp_backlight_on(intel_dp);
2180}
2181
2182/* Disable backlight in the panel power control. */
2183static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002184{
Paulo Zanoni30add222012-10-26 19:05:45 -02002185 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002186 struct drm_i915_private *dev_priv = dev->dev_private;
2187 u32 pp;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02002188 i915_reg_t pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002189
Keith Packardf01eca22011-09-28 16:48:10 -07002190 if (!is_edp(intel_dp))
2191 return;
2192
Ville Syrjälä773538e82014-09-04 14:54:56 +03002193 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002194
Jesse Barnes453c5422013-03-28 09:55:41 -07002195 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002196 pp &= ~EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002197
Jani Nikulabf13e812013-09-06 07:40:05 +03002198 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002199
2200 I915_WRITE(pp_ctrl_reg, pp);
2201 POSTING_READ(pp_ctrl_reg);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002202
Ville Syrjälä773538e82014-09-04 14:54:56 +03002203 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002204
Paulo Zanonidce56b32013-12-19 14:29:40 -02002205 intel_dp->last_backlight_off = jiffies;
Jesse Barnesf7d23232014-03-31 11:13:56 -07002206 edp_wait_backlight_off(intel_dp);
Jani Nikula1250d102014-08-12 17:11:39 +03002207}
Jesse Barnesf7d23232014-03-31 11:13:56 -07002208
Jani Nikula1250d102014-08-12 17:11:39 +03002209/* Disable backlight PP control and backlight PWM. */
2210void intel_edp_backlight_off(struct intel_dp *intel_dp)
2211{
2212 if (!is_edp(intel_dp))
2213 return;
2214
2215 DRM_DEBUG_KMS("\n");
2216
2217 _intel_edp_backlight_off(intel_dp);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002218 intel_panel_disable_backlight(intel_dp->attached_connector);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002219}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002220
Jani Nikula73580fb72014-08-12 17:11:41 +03002221/*
2222 * Hook for controlling the panel power control backlight through the bl_power
2223 * sysfs attribute. Take care to handle multiple calls.
2224 */
2225static void intel_edp_backlight_power(struct intel_connector *connector,
2226 bool enable)
2227{
2228 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002229 bool is_enabled;
2230
Ville Syrjälä773538e82014-09-04 14:54:56 +03002231 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002232 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002233 pps_unlock(intel_dp);
Jani Nikula73580fb72014-08-12 17:11:41 +03002234
2235 if (is_enabled == enable)
2236 return;
2237
Jani Nikula23ba9372014-08-27 14:08:43 +03002238 DRM_DEBUG_KMS("panel power control backlight %s\n",
2239 enable ? "enable" : "disable");
Jani Nikula73580fb72014-08-12 17:11:41 +03002240
2241 if (enable)
2242 _intel_edp_backlight_on(intel_dp);
2243 else
2244 _intel_edp_backlight_off(intel_dp);
2245}
2246
Ville Syrjälä64e10772015-10-29 21:26:01 +02002247static const char *state_string(bool enabled)
2248{
2249 return enabled ? "on" : "off";
2250}
2251
2252static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2253{
2254 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2255 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2256 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2257
2258 I915_STATE_WARN(cur_state != state,
2259 "DP port %c state assertion failure (expected %s, current %s)\n",
2260 port_name(dig_port->port),
2261 state_string(state), state_string(cur_state));
2262}
2263#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2264
2265static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2266{
2267 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2268
2269 I915_STATE_WARN(cur_state != state,
2270 "eDP PLL state assertion failure (expected %s, current %s)\n",
2271 state_string(state), state_string(cur_state));
2272}
2273#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2274#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2275
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002276static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002277{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002278 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä64e10772015-10-29 21:26:01 +02002279 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2280 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesd240f202010-08-13 15:43:26 -07002281
Ville Syrjälä64e10772015-10-29 21:26:01 +02002282 assert_pipe_disabled(dev_priv, crtc->pipe);
2283 assert_dp_port_disabled(intel_dp);
2284 assert_edp_pll_disabled(dev_priv);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002285
Ville Syrjäläabfce942015-10-29 21:26:03 +02002286 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2287 crtc->config->port_clock);
2288
2289 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2290
2291 if (crtc->config->port_clock == 162000)
2292 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2293 else
2294 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2295
2296 I915_WRITE(DP_A, intel_dp->DP);
2297 POSTING_READ(DP_A);
2298 udelay(500);
2299
Daniel Vetter07679352012-09-06 22:15:42 +02002300 intel_dp->DP |= DP_PLL_ENABLE;
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002301
Daniel Vetter07679352012-09-06 22:15:42 +02002302 I915_WRITE(DP_A, intel_dp->DP);
Jesse Barnes298b0b32010-10-07 16:01:24 -07002303 POSTING_READ(DP_A);
2304 udelay(200);
Jesse Barnesd240f202010-08-13 15:43:26 -07002305}
2306
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002307static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002308{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002309 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä64e10772015-10-29 21:26:01 +02002310 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2311 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesd240f202010-08-13 15:43:26 -07002312
Ville Syrjälä64e10772015-10-29 21:26:01 +02002313 assert_pipe_disabled(dev_priv, crtc->pipe);
2314 assert_dp_port_disabled(intel_dp);
2315 assert_edp_pll_enabled(dev_priv);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002316
Ville Syrjäläabfce942015-10-29 21:26:03 +02002317 DRM_DEBUG_KMS("disabling eDP PLL\n");
2318
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002319 intel_dp->DP &= ~DP_PLL_ENABLE;
Daniel Vetter07679352012-09-06 22:15:42 +02002320
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002321 I915_WRITE(DP_A, intel_dp->DP);
Chris Wilson1af5fa12010-09-08 21:07:28 +01002322 POSTING_READ(DP_A);
Jesse Barnesd240f202010-08-13 15:43:26 -07002323 udelay(200);
2324}
2325
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002326/* If the sink supports it, try to set the power state appropriately */
Paulo Zanonic19b0662012-10-15 15:51:41 -03002327void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002328{
2329 int ret, i;
2330
2331 /* Should have a valid DPCD by this point */
2332 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2333 return;
2334
2335 if (mode != DRM_MODE_DPMS_ON) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002336 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2337 DP_SET_POWER_D3);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002338 } else {
2339 /*
2340 * When turning on, we need to retry for 1ms to give the sink
2341 * time to wake up.
2342 */
2343 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002344 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2345 DP_SET_POWER_D0);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002346 if (ret == 1)
2347 break;
2348 msleep(1);
2349 }
2350 }
Jani Nikulaf9cac722014-09-02 16:33:52 +03002351
2352 if (ret != 1)
2353 DRM_DEBUG_KMS("failed to %s sink power state\n",
2354 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002355}
2356
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002357static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2358 enum pipe *pipe)
Jesse Barnesd240f202010-08-13 15:43:26 -07002359{
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002360 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03002361 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002362 struct drm_device *dev = encoder->base.dev;
2363 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak6d129be2014-03-05 16:20:54 +02002364 enum intel_display_power_domain power_domain;
2365 u32 tmp;
2366
2367 power_domain = intel_display_port_power_domain(encoder);
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002368 if (!intel_display_power_is_enabled(dev_priv, power_domain))
Imre Deak6d129be2014-03-05 16:20:54 +02002369 return false;
2370
2371 tmp = I915_READ(intel_dp->output_reg);
Jesse Barnesd240f202010-08-13 15:43:26 -07002372
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002373 if (!(tmp & DP_PORT_EN))
2374 return false;
2375
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002376 if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002377 *pipe = PORT_TO_PIPE_CPT(tmp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002378 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002379 enum pipe p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002380
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002381 for_each_pipe(dev_priv, p) {
2382 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2383 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2384 *pipe = p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002385 return true;
2386 }
2387 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002388
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002389 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02002390 i915_mmio_reg_offset(intel_dp->output_reg));
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002391 } else if (IS_CHERRYVIEW(dev)) {
2392 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2393 } else {
2394 *pipe = PORT_TO_PIPE(tmp);
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002395 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002396
2397 return true;
2398}
2399
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002400static void intel_dp_get_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02002401 struct intel_crtc_state *pipe_config)
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002402{
2403 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002404 u32 tmp, flags = 0;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002405 struct drm_device *dev = encoder->base.dev;
2406 struct drm_i915_private *dev_priv = dev->dev_private;
2407 enum port port = dp_to_dig_port(intel_dp)->port;
2408 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjälä18442d02013-09-13 16:00:08 +03002409 int dotclock;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002410
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002411 tmp = I915_READ(intel_dp->output_reg);
Jani Nikula9fcb1702015-05-05 16:32:12 +03002412
2413 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002414
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002415 if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläb81e34c2015-07-06 15:10:03 +03002416 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2417
2418 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
Xiong Zhang63000ef2013-06-28 12:59:06 +08002419 flags |= DRM_MODE_FLAG_PHSYNC;
2420 else
2421 flags |= DRM_MODE_FLAG_NHSYNC;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002422
Ville Syrjäläb81e34c2015-07-06 15:10:03 +03002423 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
Xiong Zhang63000ef2013-06-28 12:59:06 +08002424 flags |= DRM_MODE_FLAG_PVSYNC;
2425 else
2426 flags |= DRM_MODE_FLAG_NVSYNC;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002427 } else {
2428 if (tmp & DP_SYNC_HS_HIGH)
2429 flags |= DRM_MODE_FLAG_PHSYNC;
2430 else
2431 flags |= DRM_MODE_FLAG_NHSYNC;
2432
2433 if (tmp & DP_SYNC_VS_HIGH)
2434 flags |= DRM_MODE_FLAG_PVSYNC;
2435 else
2436 flags |= DRM_MODE_FLAG_NVSYNC;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002437 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002438
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002439 pipe_config->base.adjusted_mode.flags |= flags;
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002440
Ville Syrjälä8c875fc2014-09-12 15:46:29 +03002441 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
Wayne Boyer666a4532015-12-09 12:29:35 -08002442 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
Ville Syrjälä8c875fc2014-09-12 15:46:29 +03002443 pipe_config->limited_color_range = true;
2444
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002445 pipe_config->has_dp_encoder = true;
2446
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03002447 pipe_config->lane_count =
2448 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2449
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002450 intel_dp_get_m_n(crtc, pipe_config);
2451
Ville Syrjälä18442d02013-09-13 16:00:08 +03002452 if (port == PORT_A) {
Ville Syrjäläb377e0d2015-10-29 21:25:59 +02002453 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002454 pipe_config->port_clock = 162000;
2455 else
2456 pipe_config->port_clock = 270000;
2457 }
Ville Syrjälä18442d02013-09-13 16:00:08 +03002458
2459 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2460 &pipe_config->dp_m_n);
2461
2462 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2463 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2464
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002465 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
Daniel Vetter7f16e5c2013-11-04 16:28:47 +01002466
Jani Nikulac6cd2ee2013-10-21 10:52:07 +03002467 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2468 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2469 /*
2470 * This is a big fat ugly hack.
2471 *
2472 * Some machines in UEFI boot mode provide us a VBT that has 18
2473 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2474 * unknown we fail to light up. Yet the same BIOS boots up with
2475 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2476 * max, not what it tells us to use.
2477 *
2478 * Note: This will still be broken if the eDP panel is not lit
2479 * up by the BIOS, and thus we can't get the mode at module
2480 * load.
2481 */
2482 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2483 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2484 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2485 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002486}
2487
Daniel Vettere8cb4552012-07-01 13:05:48 +02002488static void intel_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002489{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002490 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002491 struct drm_device *dev = encoder->base.dev;
Jani Nikula495a5bb2014-10-27 16:26:55 +02002492 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2493
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002494 if (crtc->config->has_audio)
Jani Nikula495a5bb2014-10-27 16:26:55 +02002495 intel_audio_codec_disable(encoder);
Daniel Vetter6cb49832012-05-20 17:14:50 +02002496
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002497 if (HAS_PSR(dev) && !HAS_DDI(dev))
2498 intel_psr_disable(intel_dp);
2499
Daniel Vetter6cb49832012-05-20 17:14:50 +02002500 /* Make sure the panel is off before trying to change the mode. But also
2501 * ensure that we have vdd while we switch off the panel. */
Jani Nikula24f3e092014-03-17 16:43:36 +02002502 intel_edp_panel_vdd_on(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01002503 intel_edp_backlight_off(intel_dp);
Jani Nikulafdbc3b12013-11-12 17:10:13 +02002504 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
Daniel Vetter4be73782014-01-17 14:39:48 +01002505 intel_edp_panel_off(intel_dp);
Daniel Vetter37398502012-09-06 22:15:44 +02002506
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002507 /* disable the port before the pipe on g4x */
2508 if (INTEL_INFO(dev)->gen < 5)
Daniel Vetter37398502012-09-06 22:15:44 +02002509 intel_dp_link_down(intel_dp);
Jesse Barnesd240f202010-08-13 15:43:26 -07002510}
2511
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002512static void ilk_post_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002513{
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002514 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002515 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002516
Ville Syrjälä49277c32014-03-31 18:21:26 +03002517 intel_dp_link_down(intel_dp);
Ville Syrjäläabfce942015-10-29 21:26:03 +02002518
2519 /* Only ilk+ has port A */
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002520 if (port == PORT_A)
2521 ironlake_edp_pll_off(intel_dp);
Ville Syrjälä49277c32014-03-31 18:21:26 +03002522}
2523
2524static void vlv_post_disable_dp(struct intel_encoder *encoder)
2525{
2526 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2527
2528 intel_dp_link_down(intel_dp);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002529}
2530
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002531static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2532 bool reset)
2533{
2534 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2535 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2536 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2537 enum pipe pipe = crtc->pipe;
2538 uint32_t val;
2539
2540 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2541 if (reset)
2542 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2543 else
2544 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2545 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2546
2547 if (crtc->config->lane_count > 2) {
2548 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2549 if (reset)
2550 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2551 else
2552 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2553 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2554 }
2555
2556 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2557 val |= CHV_PCS_REQ_SOFTRESET_EN;
2558 if (reset)
2559 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2560 else
2561 val |= DPIO_PCS_CLK_SOFT_RESET;
2562 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2563
2564 if (crtc->config->lane_count > 2) {
2565 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2566 val |= CHV_PCS_REQ_SOFTRESET_EN;
2567 if (reset)
2568 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2569 else
2570 val |= DPIO_PCS_CLK_SOFT_RESET;
2571 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2572 }
2573}
2574
Ville Syrjälä580d3812014-04-09 13:29:00 +03002575static void chv_post_disable_dp(struct intel_encoder *encoder)
2576{
2577 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002578 struct drm_device *dev = encoder->base.dev;
2579 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä580d3812014-04-09 13:29:00 +03002580
2581 intel_dp_link_down(intel_dp);
2582
Ville Syrjäläa5805162015-05-26 20:42:30 +03002583 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002584
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002585 /* Assert data lane reset */
2586 chv_data_lane_soft_reset(encoder, true);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002587
Ville Syrjäläa5805162015-05-26 20:42:30 +03002588 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002589}
2590
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002591static void
2592_intel_dp_set_link_train(struct intel_dp *intel_dp,
2593 uint32_t *DP,
2594 uint8_t dp_train_pat)
2595{
2596 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2597 struct drm_device *dev = intel_dig_port->base.base.dev;
2598 struct drm_i915_private *dev_priv = dev->dev_private;
2599 enum port port = intel_dig_port->port;
2600
2601 if (HAS_DDI(dev)) {
2602 uint32_t temp = I915_READ(DP_TP_CTL(port));
2603
2604 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2605 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2606 else
2607 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2608
2609 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2610 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2611 case DP_TRAINING_PATTERN_DISABLE:
2612 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2613
2614 break;
2615 case DP_TRAINING_PATTERN_1:
2616 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2617 break;
2618 case DP_TRAINING_PATTERN_2:
2619 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2620 break;
2621 case DP_TRAINING_PATTERN_3:
2622 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2623 break;
2624 }
2625 I915_WRITE(DP_TP_CTL(port), temp);
2626
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002627 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2628 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002629 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2630
2631 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2632 case DP_TRAINING_PATTERN_DISABLE:
2633 *DP |= DP_LINK_TRAIN_OFF_CPT;
2634 break;
2635 case DP_TRAINING_PATTERN_1:
2636 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2637 break;
2638 case DP_TRAINING_PATTERN_2:
2639 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2640 break;
2641 case DP_TRAINING_PATTERN_3:
2642 DRM_ERROR("DP training pattern 3 not supported\n");
2643 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2644 break;
2645 }
2646
2647 } else {
2648 if (IS_CHERRYVIEW(dev))
2649 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2650 else
2651 *DP &= ~DP_LINK_TRAIN_MASK;
2652
2653 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2654 case DP_TRAINING_PATTERN_DISABLE:
2655 *DP |= DP_LINK_TRAIN_OFF;
2656 break;
2657 case DP_TRAINING_PATTERN_1:
2658 *DP |= DP_LINK_TRAIN_PAT_1;
2659 break;
2660 case DP_TRAINING_PATTERN_2:
2661 *DP |= DP_LINK_TRAIN_PAT_2;
2662 break;
2663 case DP_TRAINING_PATTERN_3:
2664 if (IS_CHERRYVIEW(dev)) {
2665 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2666 } else {
2667 DRM_ERROR("DP training pattern 3 not supported\n");
2668 *DP |= DP_LINK_TRAIN_PAT_2;
2669 }
2670 break;
2671 }
2672 }
2673}
2674
2675static void intel_dp_enable_port(struct intel_dp *intel_dp)
2676{
2677 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2678 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002679 struct intel_crtc *crtc =
2680 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002681
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002682 /* enable with pattern 1 (as per spec) */
2683 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2684 DP_TRAINING_PATTERN_1);
2685
2686 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2687 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b713f52014-10-16 21:27:35 +03002688
2689 /*
2690 * Magic for VLV/CHV. We _must_ first set up the register
2691 * without actually enabling the port, and then do another
2692 * write to enable the port. Otherwise link training will
2693 * fail when the power sequencer is freshly used for this port.
2694 */
2695 intel_dp->DP |= DP_PORT_EN;
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002696 if (crtc->config->has_audio)
2697 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
Ville Syrjälä7b713f52014-10-16 21:27:35 +03002698
2699 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2700 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002701}
2702
Daniel Vettere8cb4552012-07-01 13:05:48 +02002703static void intel_enable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002704{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002705 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2706 struct drm_device *dev = encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002707 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulac1dec792014-10-27 16:26:56 +02002708 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Chris Wilsonea5b2132010-08-04 13:50:23 +01002709 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002710 enum port port = dp_to_dig_port(intel_dp)->port;
2711 enum pipe pipe = crtc->pipe;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002712
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02002713 if (WARN_ON(dp_reg & DP_PORT_EN))
2714 return;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002715
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002716 pps_lock(intel_dp);
2717
Wayne Boyer666a4532015-12-09 12:29:35 -08002718 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002719 vlv_init_panel_power_sequencer(intel_dp);
2720
Ville Syrjälä78645782015-11-20 22:09:19 +02002721 /*
2722 * We get an occasional spurious underrun between the port
2723 * enable and vdd enable, when enabling port A eDP.
2724 *
2725 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2726 */
2727 if (port == PORT_A)
2728 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2729
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002730 intel_dp_enable_port(intel_dp);
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002731
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002732 if (port == PORT_A && IS_GEN5(dev_priv)) {
2733 /*
2734 * Underrun reporting for the other pipe was disabled in
2735 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2736 * enabled, so it's now safe to re-enable underrun reporting.
2737 */
2738 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2739 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2740 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2741 }
2742
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002743 edp_panel_vdd_on(intel_dp);
2744 edp_panel_on(intel_dp);
2745 edp_panel_vdd_off(intel_dp, true);
2746
Ville Syrjälä78645782015-11-20 22:09:19 +02002747 if (port == PORT_A)
2748 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2749
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002750 pps_unlock(intel_dp);
2751
Wayne Boyer666a4532015-12-09 12:29:35 -08002752 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002753 unsigned int lane_mask = 0x0;
2754
2755 if (IS_CHERRYVIEW(dev))
2756 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2757
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03002758 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2759 lane_mask);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002760 }
Ville Syrjälä61234fa2014-10-16 21:27:34 +03002761
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002762 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2763 intel_dp_start_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03002764 intel_dp_stop_link_train(intel_dp);
Jani Nikulac1dec792014-10-27 16:26:56 +02002765
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002766 if (crtc->config->has_audio) {
Jani Nikulac1dec792014-10-27 16:26:56 +02002767 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002768 pipe_name(pipe));
Jani Nikulac1dec792014-10-27 16:26:56 +02002769 intel_audio_codec_enable(encoder);
2770 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002771}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002772
Jani Nikulaecff4f32013-09-06 07:38:29 +03002773static void g4x_enable_dp(struct intel_encoder *encoder)
2774{
Jani Nikula828f5c62013-09-05 16:44:45 +03002775 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2776
Jani Nikulaecff4f32013-09-06 07:38:29 +03002777 intel_enable_dp(encoder);
Daniel Vetter4be73782014-01-17 14:39:48 +01002778 intel_edp_backlight_on(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002779}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002780
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002781static void vlv_enable_dp(struct intel_encoder *encoder)
2782{
Jani Nikula828f5c62013-09-05 16:44:45 +03002783 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2784
Daniel Vetter4be73782014-01-17 14:39:48 +01002785 intel_edp_backlight_on(intel_dp);
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002786 intel_psr_enable(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002787}
2788
Jani Nikulaecff4f32013-09-06 07:38:29 +03002789static void g4x_pre_enable_dp(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002790{
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002791 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002792 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002793 enum port port = dp_to_dig_port(intel_dp)->port;
2794 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002795
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002796 intel_dp_prepare(encoder);
2797
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002798 if (port == PORT_A && IS_GEN5(dev_priv)) {
2799 /*
2800 * We get FIFO underruns on the other pipe when
2801 * enabling the CPU eDP PLL, and when enabling CPU
2802 * eDP port. We could potentially avoid the PLL
2803 * underrun with a vblank wait just prior to enabling
2804 * the PLL, but that doesn't appear to help the port
2805 * enable case. Just sweep it all under the rug.
2806 */
2807 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2808 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2809 }
2810
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002811 /* Only ilk+ has port A */
Ville Syrjäläabfce942015-10-29 21:26:03 +02002812 if (port == PORT_A)
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002813 ironlake_edp_pll_on(intel_dp);
2814}
2815
Ville Syrjälä83b84592014-10-16 21:29:51 +03002816static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2817{
2818 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2819 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2820 enum pipe pipe = intel_dp->pps_pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02002821 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
Ville Syrjälä83b84592014-10-16 21:29:51 +03002822
2823 edp_panel_vdd_off_sync(intel_dp);
2824
2825 /*
2826 * VLV seems to get confused when multiple power seqeuencers
2827 * have the same port selected (even if only one has power/vdd
2828 * enabled). The failure manifests as vlv_wait_port_ready() failing
2829 * CHV on the other hand doesn't seem to mind having the same port
2830 * selected in multiple power seqeuencers, but let's clear the
2831 * port select always when logically disconnecting a power sequencer
2832 * from a port.
2833 */
2834 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2835 pipe_name(pipe), port_name(intel_dig_port->port));
2836 I915_WRITE(pp_on_reg, 0);
2837 POSTING_READ(pp_on_reg);
2838
2839 intel_dp->pps_pipe = INVALID_PIPE;
2840}
2841
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002842static void vlv_steal_power_sequencer(struct drm_device *dev,
2843 enum pipe pipe)
2844{
2845 struct drm_i915_private *dev_priv = dev->dev_private;
2846 struct intel_encoder *encoder;
2847
2848 lockdep_assert_held(&dev_priv->pps_mutex);
2849
Ville Syrjäläac3c12e2014-10-16 21:29:56 +03002850 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2851 return;
2852
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002853 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2854 base.head) {
2855 struct intel_dp *intel_dp;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002856 enum port port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002857
2858 if (encoder->type != INTEL_OUTPUT_EDP)
2859 continue;
2860
2861 intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002862 port = dp_to_dig_port(intel_dp)->port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002863
2864 if (intel_dp->pps_pipe != pipe)
2865 continue;
2866
2867 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
Ville Syrjälä773538e82014-09-04 14:54:56 +03002868 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002869
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02002870 WARN(encoder->base.crtc,
Ville Syrjälä034e43c2014-10-16 21:27:28 +03002871 "stealing pipe %c power sequencer from active eDP port %c\n",
2872 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002873
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002874 /* make sure vdd is off before we steal it */
Ville Syrjälä83b84592014-10-16 21:29:51 +03002875 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002876 }
2877}
2878
2879static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2880{
2881 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2882 struct intel_encoder *encoder = &intel_dig_port->base;
2883 struct drm_device *dev = encoder->base.dev;
2884 struct drm_i915_private *dev_priv = dev->dev_private;
2885 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002886
2887 lockdep_assert_held(&dev_priv->pps_mutex);
2888
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002889 if (!is_edp(intel_dp))
2890 return;
2891
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002892 if (intel_dp->pps_pipe == crtc->pipe)
2893 return;
2894
2895 /*
2896 * If another power sequencer was being used on this
2897 * port previously make sure to turn off vdd there while
2898 * we still have control of it.
2899 */
2900 if (intel_dp->pps_pipe != INVALID_PIPE)
Ville Syrjälä83b84592014-10-16 21:29:51 +03002901 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002902
2903 /*
2904 * We may be stealing the power
2905 * sequencer from another port.
2906 */
2907 vlv_steal_power_sequencer(dev, crtc->pipe);
2908
2909 /* now it's all ours */
2910 intel_dp->pps_pipe = crtc->pipe;
2911
2912 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2913 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2914
2915 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03002916 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2917 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002918}
2919
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002920static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2921{
2922 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2923 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Jesse Barnesb2634012013-03-28 09:55:40 -07002924 struct drm_device *dev = encoder->base.dev;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002925 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002926 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002927 enum dpio_channel port = vlv_dport_to_channel(dport);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002928 int pipe = intel_crtc->pipe;
2929 u32 val;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002930
Ville Syrjäläa5805162015-05-26 20:42:30 +03002931 mutex_lock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002932
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002933 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002934 val = 0;
2935 if (pipe)
2936 val |= (1<<21);
2937 else
2938 val &= ~(1<<21);
2939 val |= 0x001000c4;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002940 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2941 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2942 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002943
Ville Syrjäläa5805162015-05-26 20:42:30 +03002944 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002945
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002946 intel_enable_dp(encoder);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002947}
2948
Jani Nikulaecff4f32013-09-06 07:38:29 +03002949static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
Jesse Barnes89b667f2013-04-18 14:51:36 -07002950{
2951 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2952 struct drm_device *dev = encoder->base.dev;
2953 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002954 struct intel_crtc *intel_crtc =
2955 to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002956 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002957 int pipe = intel_crtc->pipe;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002958
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002959 intel_dp_prepare(encoder);
2960
Jesse Barnes89b667f2013-04-18 14:51:36 -07002961 /* Program Tx lane resets to default */
Ville Syrjäläa5805162015-05-26 20:42:30 +03002962 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002963 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002964 DPIO_PCS_TX_LANE2_RESET |
2965 DPIO_PCS_TX_LANE1_RESET);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002966 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002967 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2968 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2969 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2970 DPIO_PCS_CLK_SOFT_RESET);
2971
2972 /* Fix up inter-pair skew failure */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002973 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2974 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2975 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03002976 mutex_unlock(&dev_priv->sb_lock);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002977}
2978
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002979static void chv_pre_enable_dp(struct intel_encoder *encoder)
2980{
2981 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2982 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2983 struct drm_device *dev = encoder->base.dev;
2984 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002985 struct intel_crtc *intel_crtc =
2986 to_intel_crtc(encoder->base.crtc);
2987 enum dpio_channel ch = vlv_dport_to_channel(dport);
2988 int pipe = intel_crtc->pipe;
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002989 int data, i, stagger;
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002990 u32 val;
2991
Ville Syrjäläa5805162015-05-26 20:42:30 +03002992 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002993
Ville Syrjälä570e2a72014-08-18 14:42:46 +03002994 /* allow hardware to manage TX FIFO reset source */
2995 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2996 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2997 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2998
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002999 if (intel_crtc->config->lane_count > 2) {
3000 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3001 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3002 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3003 }
Ville Syrjälä570e2a72014-08-18 14:42:46 +03003004
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003005 /* Program Tx lane latency optimal setting*/
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003006 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003007 /* Set the upar bit */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003008 if (intel_crtc->config->lane_count == 1)
3009 data = 0x0;
3010 else
3011 data = (i == 1) ? 0x0 : 0x1;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003012 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3013 data << DPIO_UPAR_SHIFT);
3014 }
3015
3016 /* Data lane stagger programming */
Ville Syrjälä2e523e92015-04-10 18:21:27 +03003017 if (intel_crtc->config->port_clock > 270000)
3018 stagger = 0x18;
3019 else if (intel_crtc->config->port_clock > 135000)
3020 stagger = 0xd;
3021 else if (intel_crtc->config->port_clock > 67500)
3022 stagger = 0x7;
3023 else if (intel_crtc->config->port_clock > 33750)
3024 stagger = 0x4;
3025 else
3026 stagger = 0x2;
3027
3028 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3029 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3030 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3031
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003032 if (intel_crtc->config->lane_count > 2) {
3033 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3034 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3035 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3036 }
Ville Syrjälä2e523e92015-04-10 18:21:27 +03003037
3038 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3039 DPIO_LANESTAGGER_STRAP(stagger) |
3040 DPIO_LANESTAGGER_STRAP_OVRD |
3041 DPIO_TX1_STAGGER_MASK(0x1f) |
3042 DPIO_TX1_STAGGER_MULT(6) |
3043 DPIO_TX2_STAGGER_MULT(0));
3044
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003045 if (intel_crtc->config->lane_count > 2) {
3046 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3047 DPIO_LANESTAGGER_STRAP(stagger) |
3048 DPIO_LANESTAGGER_STRAP_OVRD |
3049 DPIO_TX1_STAGGER_MASK(0x1f) |
3050 DPIO_TX1_STAGGER_MULT(7) |
3051 DPIO_TX2_STAGGER_MULT(5));
3052 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003053
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03003054 /* Deassert data lane reset */
3055 chv_data_lane_soft_reset(encoder, false);
3056
Ville Syrjäläa5805162015-05-26 20:42:30 +03003057 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003058
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003059 intel_enable_dp(encoder);
Ville Syrjäläb0b33842015-07-08 23:45:55 +03003060
3061 /* Second common lane will stay alive on its own now */
3062 if (dport->release_cl2_override) {
3063 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3064 dport->release_cl2_override = false;
3065 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003066}
3067
Ville Syrjälä9197c882014-04-09 13:29:05 +03003068static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3069{
3070 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3071 struct drm_device *dev = encoder->base.dev;
3072 struct drm_i915_private *dev_priv = dev->dev_private;
3073 struct intel_crtc *intel_crtc =
3074 to_intel_crtc(encoder->base.crtc);
3075 enum dpio_channel ch = vlv_dport_to_channel(dport);
3076 enum pipe pipe = intel_crtc->pipe;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003077 unsigned int lane_mask =
3078 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
Ville Syrjälä9197c882014-04-09 13:29:05 +03003079 u32 val;
3080
Ville Syrjälä625695f2014-06-28 02:04:02 +03003081 intel_dp_prepare(encoder);
3082
Ville Syrjäläb0b33842015-07-08 23:45:55 +03003083 /*
3084 * Must trick the second common lane into life.
3085 * Otherwise we can't even access the PLL.
3086 */
3087 if (ch == DPIO_CH0 && pipe == PIPE_B)
3088 dport->release_cl2_override =
3089 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3090
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003091 chv_phy_powergate_lanes(encoder, true, lane_mask);
3092
Ville Syrjäläa5805162015-05-26 20:42:30 +03003093 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03003094
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03003095 /* Assert data lane reset */
3096 chv_data_lane_soft_reset(encoder, true);
3097
Ville Syrjäläb9e5ac32014-05-27 16:30:18 +03003098 /* program left/right clock distribution */
3099 if (pipe != PIPE_B) {
3100 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3101 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3102 if (ch == DPIO_CH0)
3103 val |= CHV_BUFLEFTENA1_FORCE;
3104 if (ch == DPIO_CH1)
3105 val |= CHV_BUFRIGHTENA1_FORCE;
3106 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3107 } else {
3108 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3109 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3110 if (ch == DPIO_CH0)
3111 val |= CHV_BUFLEFTENA2_FORCE;
3112 if (ch == DPIO_CH1)
3113 val |= CHV_BUFRIGHTENA2_FORCE;
3114 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3115 }
3116
Ville Syrjälä9197c882014-04-09 13:29:05 +03003117 /* program clock channel usage */
3118 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3119 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3120 if (pipe != PIPE_B)
3121 val &= ~CHV_PCS_USEDCLKCHANNEL;
3122 else
3123 val |= CHV_PCS_USEDCLKCHANNEL;
3124 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3125
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003126 if (intel_crtc->config->lane_count > 2) {
3127 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3128 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3129 if (pipe != PIPE_B)
3130 val &= ~CHV_PCS_USEDCLKCHANNEL;
3131 else
3132 val |= CHV_PCS_USEDCLKCHANNEL;
3133 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3134 }
Ville Syrjälä9197c882014-04-09 13:29:05 +03003135
3136 /*
3137 * This a a bit weird since generally CL
3138 * matches the pipe, but here we need to
3139 * pick the CL based on the port.
3140 */
3141 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3142 if (pipe != PIPE_B)
3143 val &= ~CHV_CMN_USEDCLKCHANNEL;
3144 else
3145 val |= CHV_CMN_USEDCLKCHANNEL;
3146 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3147
Ville Syrjäläa5805162015-05-26 20:42:30 +03003148 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03003149}
3150
Ville Syrjäläd6db9952015-07-08 23:45:49 +03003151static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3152{
3153 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3154 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3155 u32 val;
3156
3157 mutex_lock(&dev_priv->sb_lock);
3158
3159 /* disable left/right clock distribution */
3160 if (pipe != PIPE_B) {
3161 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3162 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3163 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3164 } else {
3165 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3166 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3167 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3168 }
3169
3170 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003171
Ville Syrjäläb0b33842015-07-08 23:45:55 +03003172 /*
3173 * Leave the power down bit cleared for at least one
3174 * lane so that chv_powergate_phy_ch() will power
3175 * on something when the channel is otherwise unused.
3176 * When the port is off and the override is removed
3177 * the lanes power down anyway, so otherwise it doesn't
3178 * really matter what the state of power down bits is
3179 * after this.
3180 */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003181 chv_phy_powergate_lanes(encoder, false, 0x0);
Ville Syrjäläd6db9952015-07-08 23:45:49 +03003182}
3183
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003184/*
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003185 * Native read with retry for link status and receiver capability reads for
3186 * cases where the sink may still be asleep.
Jani Nikula9d1a1032014-03-14 16:51:15 +02003187 *
3188 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3189 * supposed to retry 3 times per the spec.
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003190 */
Jani Nikula9d1a1032014-03-14 16:51:15 +02003191static ssize_t
3192intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3193 void *buffer, size_t size)
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003194{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003195 ssize_t ret;
3196 int i;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003197
Ville Syrjäläf6a19062014-10-16 20:46:09 +03003198 /*
3199 * Sometime we just get the same incorrect byte repeated
3200 * over the entire buffer. Doing just one throw away read
3201 * initially seems to "solve" it.
3202 */
3203 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3204
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003205 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003206 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3207 if (ret == size)
3208 return ret;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003209 msleep(1);
3210 }
3211
Jani Nikula9d1a1032014-03-14 16:51:15 +02003212 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003213}
3214
3215/*
3216 * Fetch AUX CH registers 0x202 - 0x207 which contain
3217 * link status information
3218 */
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003219bool
Keith Packard93f62da2011-11-01 19:45:03 -07003220intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003221{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003222 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3223 DP_LANE0_1_STATUS,
3224 link_status,
3225 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003226}
3227
Paulo Zanoni11002442014-06-13 18:45:41 -03003228/* These are source-specific values. */
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003229uint8_t
Keith Packard1a2eb462011-11-16 16:26:07 -08003230intel_dp_voltage_max(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003231{
Paulo Zanoni30add222012-10-26 19:05:45 -02003232 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303233 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003234 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003235
Vandana Kannan93147262014-11-18 15:45:29 +05303236 if (IS_BROXTON(dev))
3237 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3238 else if (INTEL_INFO(dev)->gen >= 9) {
Sonika Jindal9e458032015-05-06 17:35:48 +05303239 if (dev_priv->edp_low_vswing && port == PORT_A)
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303240 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003241 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Wayne Boyer666a4532015-12-09 12:29:35 -08003242 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
Sonika Jindalbd600182014-08-08 16:23:41 +05303243 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003244 else if (IS_GEN7(dev) && port == PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05303245 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003246 else if (HAS_PCH_CPT(dev) && port != PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05303247 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Keith Packard1a2eb462011-11-16 16:26:07 -08003248 else
Sonika Jindalbd600182014-08-08 16:23:41 +05303249 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Keith Packard1a2eb462011-11-16 16:26:07 -08003250}
3251
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003252uint8_t
Keith Packard1a2eb462011-11-16 16:26:07 -08003253intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3254{
Paulo Zanoni30add222012-10-26 19:05:45 -02003255 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003256 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003257
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003258 if (INTEL_INFO(dev)->gen >= 9) {
3259 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3260 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3261 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3262 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3263 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3265 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3267 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003268 default:
3269 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3270 }
3271 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003272 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303273 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3274 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3276 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3278 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3279 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003280 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303281 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003282 }
Wayne Boyer666a4532015-12-09 12:29:35 -08003283 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003284 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303285 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3286 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3287 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3288 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3289 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3290 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3291 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003292 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303293 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003294 }
Imre Deakbc7d38a2013-05-16 14:40:36 +03003295 } else if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08003296 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303297 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3298 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3300 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3301 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Keith Packard1a2eb462011-11-16 16:26:07 -08003302 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303303 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003304 }
3305 } else {
3306 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303307 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3308 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3310 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3312 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packard1a2eb462011-11-16 16:26:07 -08003314 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303315 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003316 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003317 }
3318}
3319
Daniel Vetter5829975c2015-04-16 11:36:52 +02003320static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003321{
3322 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3323 struct drm_i915_private *dev_priv = dev->dev_private;
3324 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003325 struct intel_crtc *intel_crtc =
3326 to_intel_crtc(dport->base.base.crtc);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003327 unsigned long demph_reg_value, preemph_reg_value,
3328 uniqtranscale_reg_value;
3329 uint8_t train_set = intel_dp->train_set[0];
Chon Ming Leee4607fc2013-11-06 14:36:35 +08003330 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003331 int pipe = intel_crtc->pipe;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003332
3333 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303334 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003335 preemph_reg_value = 0x0004000;
3336 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303337 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003338 demph_reg_value = 0x2B405555;
3339 uniqtranscale_reg_value = 0x552AB83A;
3340 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303341 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003342 demph_reg_value = 0x2B404040;
3343 uniqtranscale_reg_value = 0x5548B83A;
3344 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303345 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003346 demph_reg_value = 0x2B245555;
3347 uniqtranscale_reg_value = 0x5560B83A;
3348 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303349 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003350 demph_reg_value = 0x2B405555;
3351 uniqtranscale_reg_value = 0x5598DA3A;
3352 break;
3353 default:
3354 return 0;
3355 }
3356 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303357 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003358 preemph_reg_value = 0x0002000;
3359 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303360 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003361 demph_reg_value = 0x2B404040;
3362 uniqtranscale_reg_value = 0x5552B83A;
3363 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303364 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003365 demph_reg_value = 0x2B404848;
3366 uniqtranscale_reg_value = 0x5580B83A;
3367 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303368 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003369 demph_reg_value = 0x2B404040;
3370 uniqtranscale_reg_value = 0x55ADDA3A;
3371 break;
3372 default:
3373 return 0;
3374 }
3375 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303376 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003377 preemph_reg_value = 0x0000000;
3378 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303379 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003380 demph_reg_value = 0x2B305555;
3381 uniqtranscale_reg_value = 0x5570B83A;
3382 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303383 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003384 demph_reg_value = 0x2B2B4040;
3385 uniqtranscale_reg_value = 0x55ADDA3A;
3386 break;
3387 default:
3388 return 0;
3389 }
3390 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303391 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003392 preemph_reg_value = 0x0006000;
3393 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303394 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003395 demph_reg_value = 0x1B405555;
3396 uniqtranscale_reg_value = 0x55ADDA3A;
3397 break;
3398 default:
3399 return 0;
3400 }
3401 break;
3402 default:
3403 return 0;
3404 }
3405
Ville Syrjäläa5805162015-05-26 20:42:30 +03003406 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003407 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3408 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3409 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003410 uniqtranscale_reg_value);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003411 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3412 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3413 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3414 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03003415 mutex_unlock(&dev_priv->sb_lock);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003416
3417 return 0;
3418}
3419
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003420static bool chv_need_uniq_trans_scale(uint8_t train_set)
3421{
3422 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3423 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3424}
3425
Daniel Vetter5829975c2015-04-16 11:36:52 +02003426static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003427{
3428 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3429 struct drm_i915_private *dev_priv = dev->dev_private;
3430 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3431 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003432 u32 deemph_reg_value, margin_reg_value, val;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003433 uint8_t train_set = intel_dp->train_set[0];
3434 enum dpio_channel ch = vlv_dport_to_channel(dport);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003435 enum pipe pipe = intel_crtc->pipe;
3436 int i;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003437
3438 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303439 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003440 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303441 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003442 deemph_reg_value = 128;
3443 margin_reg_value = 52;
3444 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303445 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003446 deemph_reg_value = 128;
3447 margin_reg_value = 77;
3448 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303449 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003450 deemph_reg_value = 128;
3451 margin_reg_value = 102;
3452 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303453 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003454 deemph_reg_value = 128;
3455 margin_reg_value = 154;
3456 /* FIXME extra to set for 1200 */
3457 break;
3458 default:
3459 return 0;
3460 }
3461 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303462 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003463 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303464 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003465 deemph_reg_value = 85;
3466 margin_reg_value = 78;
3467 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303468 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003469 deemph_reg_value = 85;
3470 margin_reg_value = 116;
3471 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303472 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003473 deemph_reg_value = 85;
3474 margin_reg_value = 154;
3475 break;
3476 default:
3477 return 0;
3478 }
3479 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303480 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003481 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303482 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003483 deemph_reg_value = 64;
3484 margin_reg_value = 104;
3485 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303486 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003487 deemph_reg_value = 64;
3488 margin_reg_value = 154;
3489 break;
3490 default:
3491 return 0;
3492 }
3493 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303494 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003495 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303496 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003497 deemph_reg_value = 43;
3498 margin_reg_value = 154;
3499 break;
3500 default:
3501 return 0;
3502 }
3503 break;
3504 default:
3505 return 0;
3506 }
3507
Ville Syrjäläa5805162015-05-26 20:42:30 +03003508 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003509
3510 /* Clear calc init */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003511 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3512 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003513 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3514 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
Ville Syrjälä1966e592014-04-09 13:29:04 +03003515 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3516
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003517 if (intel_crtc->config->lane_count > 2) {
3518 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3519 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3520 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3521 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3522 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3523 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003524
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003525 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3526 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3527 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3528 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3529
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003530 if (intel_crtc->config->lane_count > 2) {
3531 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3532 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3533 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3534 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3535 }
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003536
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003537 /* Program swing deemph */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003538 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003539 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3540 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3541 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3542 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3543 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003544
3545 /* Program swing margin */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003546 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003547 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003548
Ville Syrjälä1fb44502014-06-28 02:04:03 +03003549 val &= ~DPIO_SWING_MARGIN000_MASK;
3550 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003551
3552 /*
3553 * Supposedly this value shouldn't matter when unique transition
3554 * scale is disabled, but in fact it does matter. Let's just
3555 * always program the same value and hope it's OK.
3556 */
3557 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3558 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3559
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003560 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3561 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003562
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003563 /*
3564 * The document said it needs to set bit 27 for ch0 and bit 26
3565 * for ch1. Might be a typo in the doc.
3566 * For now, for this unique transition scale selection, set bit
3567 * 27 for ch0 and ch1.
3568 */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003569 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003570 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003571 if (chv_need_uniq_trans_scale(train_set))
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003572 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003573 else
3574 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3575 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003576 }
3577
3578 /* Start swing calculation */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003579 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3580 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3581 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3582
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003583 if (intel_crtc->config->lane_count > 2) {
3584 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3585 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3586 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3587 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003588
Ville Syrjäläa5805162015-05-26 20:42:30 +03003589 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003590
3591 return 0;
3592}
3593
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003594static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003595gen4_signal_levels(uint8_t train_set)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003596{
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003597 uint32_t signal_levels = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003598
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003599 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303600 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003601 default:
3602 signal_levels |= DP_VOLTAGE_0_4;
3603 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303604 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003605 signal_levels |= DP_VOLTAGE_0_6;
3606 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303607 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003608 signal_levels |= DP_VOLTAGE_0_8;
3609 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303610 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003611 signal_levels |= DP_VOLTAGE_1_2;
3612 break;
3613 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003614 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303615 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003616 default:
3617 signal_levels |= DP_PRE_EMPHASIS_0;
3618 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303619 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003620 signal_levels |= DP_PRE_EMPHASIS_3_5;
3621 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303622 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003623 signal_levels |= DP_PRE_EMPHASIS_6;
3624 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303625 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003626 signal_levels |= DP_PRE_EMPHASIS_9_5;
3627 break;
3628 }
3629 return signal_levels;
3630}
3631
Zhenyu Wange3421a12010-04-08 09:43:27 +08003632/* Gen6's DP voltage swing and pre-emphasis control */
3633static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003634gen6_edp_signal_levels(uint8_t train_set)
Zhenyu Wange3421a12010-04-08 09:43:27 +08003635{
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003636 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3637 DP_TRAIN_PRE_EMPHASIS_MASK);
3638 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303639 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3640 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003641 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303642 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003643 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303644 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3645 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003646 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303647 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3648 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003649 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303650 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3651 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003652 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003653 default:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003654 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3655 "0x%x\n", signal_levels);
3656 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003657 }
3658}
3659
Keith Packard1a2eb462011-11-16 16:26:07 -08003660/* Gen7's DP voltage swing and pre-emphasis control */
3661static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003662gen7_edp_signal_levels(uint8_t train_set)
Keith Packard1a2eb462011-11-16 16:26:07 -08003663{
3664 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3665 DP_TRAIN_PRE_EMPHASIS_MASK);
3666 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303667 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003668 return EDP_LINK_TRAIN_400MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303669 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003670 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303671 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packard1a2eb462011-11-16 16:26:07 -08003672 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3673
Sonika Jindalbd600182014-08-08 16:23:41 +05303674 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003675 return EDP_LINK_TRAIN_600MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303676 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003677 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3678
Sonika Jindalbd600182014-08-08 16:23:41 +05303679 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003680 return EDP_LINK_TRAIN_800MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303681 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003682 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3683
3684 default:
3685 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3686 "0x%x\n", signal_levels);
3687 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3688 }
3689}
3690
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003691void
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003692intel_dp_set_signal_levels(struct intel_dp *intel_dp)
Paulo Zanonif0a34242012-12-06 16:51:50 -02003693{
3694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003695 enum port port = intel_dig_port->port;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003696 struct drm_device *dev = intel_dig_port->base.base.dev;
Ander Conselvan de Oliveirab905a912015-10-23 13:01:47 +03003697 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehallf8896f52015-06-25 11:11:03 +03003698 uint32_t signal_levels, mask = 0;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003699 uint8_t train_set = intel_dp->train_set[0];
3700
David Weinehallf8896f52015-06-25 11:11:03 +03003701 if (HAS_DDI(dev)) {
3702 signal_levels = ddi_signal_levels(intel_dp);
3703
3704 if (IS_BROXTON(dev))
3705 signal_levels = 0;
3706 else
3707 mask = DDI_BUF_EMP_MASK;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003708 } else if (IS_CHERRYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003709 signal_levels = chv_signal_levels(intel_dp);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003710 } else if (IS_VALLEYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003711 signal_levels = vlv_signal_levels(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003712 } else if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003713 signal_levels = gen7_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003714 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003715 } else if (IS_GEN6(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003716 signal_levels = gen6_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003717 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3718 } else {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003719 signal_levels = gen4_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003720 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3721 }
3722
Vandana Kannan96fb9f92014-11-18 15:45:27 +05303723 if (mask)
3724 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3725
3726 DRM_DEBUG_KMS("Using vswing level %d\n",
3727 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3728 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3729 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3730 DP_TRAIN_PRE_EMPHASIS_SHIFT);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003731
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003732 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
Ander Conselvan de Oliveirab905a912015-10-23 13:01:47 +03003733
3734 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3735 POSTING_READ(intel_dp->output_reg);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003736}
3737
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003738void
Ander Conselvan de Oliveirae9c176d2015-10-23 13:01:45 +03003739intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3740 uint8_t dp_train_pat)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003741{
Paulo Zanoni174edf12012-10-26 19:05:50 -02003742 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03003743 struct drm_i915_private *dev_priv =
3744 to_i915(intel_dig_port->base.base.dev);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003745
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003746 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003747
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003748 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
Chris Wilsonea5b2132010-08-04 13:50:23 +01003749 POSTING_READ(intel_dp->output_reg);
Ander Conselvan de Oliveirae9c176d2015-10-23 13:01:45 +03003750}
3751
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003752void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
Imre Deak3ab9c632013-05-03 12:57:41 +03003753{
3754 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3755 struct drm_device *dev = intel_dig_port->base.base.dev;
3756 struct drm_i915_private *dev_priv = dev->dev_private;
3757 enum port port = intel_dig_port->port;
3758 uint32_t val;
3759
3760 if (!HAS_DDI(dev))
3761 return;
3762
3763 val = I915_READ(DP_TP_CTL(port));
3764 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3765 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3766 I915_WRITE(DP_TP_CTL(port), val);
3767
3768 /*
3769 * On PORT_A we can have only eDP in SST mode. There the only reason
3770 * we need to set idle transmission mode is to work around a HW issue
3771 * where we enable the pipe while not in idle link-training mode.
3772 * In this case there is requirement to wait for a minimum number of
3773 * idle patterns to be sent.
3774 */
3775 if (port == PORT_A)
3776 return;
3777
3778 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3779 1))
3780 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3781}
3782
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003783static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01003784intel_dp_link_down(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003785{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003786 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003787 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003788 enum port port = intel_dig_port->port;
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003789 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003790 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonea5b2132010-08-04 13:50:23 +01003791 uint32_t DP = intel_dp->DP;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003792
Daniel Vetterbc76e3202014-05-20 22:46:50 +02003793 if (WARN_ON(HAS_DDI(dev)))
Paulo Zanonic19b0662012-10-15 15:51:41 -03003794 return;
3795
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02003796 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
Chris Wilson1b39d6f2010-12-06 11:20:45 +00003797 return;
3798
Zhao Yakui28c97732009-10-09 11:39:41 +08003799 DRM_DEBUG_KMS("\n");
Zhenyu Wang32f9d652009-07-24 01:00:32 +08003800
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03003801 if ((IS_GEN7(dev) && port == PORT_A) ||
3802 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Zhenyu Wange3421a12010-04-08 09:43:27 +08003803 DP &= ~DP_LINK_TRAIN_MASK_CPT;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003804 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003805 } else {
Ville Syrjäläaad3d142014-06-28 02:04:25 +03003806 if (IS_CHERRYVIEW(dev))
3807 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3808 else
3809 DP &= ~DP_LINK_TRAIN_MASK;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003810 DP |= DP_LINK_TRAIN_PAT_IDLE;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003811 }
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003812 I915_WRITE(intel_dp->output_reg, DP);
Chris Wilsonfe255d02010-09-11 21:37:48 +01003813 POSTING_READ(intel_dp->output_reg);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08003814
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003815 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3816 I915_WRITE(intel_dp->output_reg, DP);
3817 POSTING_READ(intel_dp->output_reg);
3818
3819 /*
3820 * HW workaround for IBX, we need to move the port
3821 * to transcoder A after disabling it to allow the
3822 * matching HDMI port to be enabled on transcoder A.
3823 */
3824 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
Ville Syrjälä0c241d52015-10-30 19:23:22 +02003825 /*
3826 * We get CPU/PCH FIFO underruns on the other pipe when
3827 * doing the workaround. Sweep them under the rug.
3828 */
3829 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3830 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3831
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003832 /* always enable with pattern 1 (as per spec) */
3833 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3834 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3835 I915_WRITE(intel_dp->output_reg, DP);
3836 POSTING_READ(intel_dp->output_reg);
3837
3838 DP &= ~DP_PORT_EN;
Eric Anholt5bddd172010-11-18 09:32:59 +08003839 I915_WRITE(intel_dp->output_reg, DP);
Daniel Vetter0ca09682014-11-24 16:54:11 +01003840 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä0c241d52015-10-30 19:23:22 +02003841
3842 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3843 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3844 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
Eric Anholt5bddd172010-11-18 09:32:59 +08003845 }
3846
Keith Packardf01eca22011-09-28 16:48:10 -07003847 msleep(intel_dp->panel_power_down_delay);
Ville Syrjälä6fec7662015-11-10 16:16:17 +02003848
3849 intel_dp->DP = DP;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003850}
3851
Keith Packard26d61aa2011-07-25 20:01:09 -07003852static bool
3853intel_dp_get_dpcd(struct intel_dp *intel_dp)
Keith Packard92fd8fd2011-07-25 19:50:10 -07003854{
Rodrigo Vivia031d702013-10-03 16:15:06 -03003855 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3856 struct drm_device *dev = dig_port->base.base.dev;
3857 struct drm_i915_private *dev_priv = dev->dev_private;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303858 uint8_t rev;
Rodrigo Vivia031d702013-10-03 16:15:06 -03003859
Jani Nikula9d1a1032014-03-14 16:51:15 +02003860 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3861 sizeof(intel_dp->dpcd)) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003862 return false; /* aux transfer failed */
Keith Packard92fd8fd2011-07-25 19:50:10 -07003863
Andy Shevchenkoa8e98152014-09-01 14:12:01 +03003864 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
Damien Lespiau577c7a52012-12-13 16:09:02 +00003865
Adam Jacksonedb39242012-09-18 10:58:49 -04003866 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3867 return false; /* DPCD not present */
3868
Shobhit Kumar2293bb52013-07-11 18:44:56 -03003869 /* Check if the panel supports PSR */
3870 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
Jani Nikula50003932013-09-20 16:42:17 +03003871 if (is_edp(intel_dp)) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003872 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3873 intel_dp->psr_dpcd,
3874 sizeof(intel_dp->psr_dpcd));
Rodrigo Vivia031d702013-10-03 16:15:06 -03003875 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3876 dev_priv->psr.sink_support = true;
Jani Nikula50003932013-09-20 16:42:17 +03003877 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
Rodrigo Vivia031d702013-10-03 16:15:06 -03003878 }
Sonika Jindal474d1ec2015-04-02 11:02:44 +05303879
3880 if (INTEL_INFO(dev)->gen >= 9 &&
3881 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3882 uint8_t frame_sync_cap;
3883
3884 dev_priv->psr.sink_support = true;
3885 intel_dp_dpcd_read_wake(&intel_dp->aux,
3886 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3887 &frame_sync_cap, 1);
3888 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3889 /* PSR2 needs frame sync as well */
3890 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3891 DRM_DEBUG_KMS("PSR2 %s on sink",
3892 dev_priv->psr.psr2_support ? "supported" : "not supported");
3893 }
Jani Nikula50003932013-09-20 16:42:17 +03003894 }
3895
Jani Nikulabc5133d2015-09-03 11:16:07 +03003896 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03003897 yesno(intel_dp_source_supports_hbr2(intel_dp)),
Jani Nikula742f4912015-09-03 11:16:09 +03003898 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
Todd Previte06ea66b2014-01-20 10:19:39 -07003899
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303900 /* Intermediate frequency support */
3901 if (is_edp(intel_dp) &&
3902 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3903 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3904 (rev >= 0x03)) { /* eDp v1.4 or higher */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003905 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003906 int i;
3907
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303908 intel_dp_dpcd_read_wake(&intel_dp->aux,
3909 DP_SUPPORTED_LINK_RATES,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003910 sink_rates,
3911 sizeof(sink_rates));
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003912
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003913 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3914 int val = le16_to_cpu(sink_rates[i]);
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003915
3916 if (val == 0)
3917 break;
3918
Sonika Jindalaf77b972015-05-07 13:59:28 +05303919 /* Value read is in kHz while drm clock is saved in deca-kHz */
3920 intel_dp->sink_rates[i] = (val * 200) / 10;
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003921 }
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003922 intel_dp->num_sink_rates = i;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303923 }
Ville Syrjälä0336400e2015-03-12 17:10:39 +02003924
3925 intel_dp_print_rates(intel_dp);
3926
Adam Jacksonedb39242012-09-18 10:58:49 -04003927 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3928 DP_DWN_STRM_PORT_PRESENT))
3929 return true; /* native DP sink */
3930
3931 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3932 return true; /* no per-port downstream info */
3933
Jani Nikula9d1a1032014-03-14 16:51:15 +02003934 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3935 intel_dp->downstream_ports,
3936 DP_MAX_DOWNSTREAM_PORTS) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003937 return false; /* downstream port status fetch failed */
3938
3939 return true;
Keith Packard92fd8fd2011-07-25 19:50:10 -07003940}
3941
Adam Jackson0d198322012-05-14 16:05:47 -04003942static void
3943intel_dp_probe_oui(struct intel_dp *intel_dp)
3944{
3945 u8 buf[3];
3946
3947 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3948 return;
3949
Jani Nikula9d1a1032014-03-14 16:51:15 +02003950 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04003951 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3952 buf[0], buf[1], buf[2]);
3953
Jani Nikula9d1a1032014-03-14 16:51:15 +02003954 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04003955 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3956 buf[0], buf[1], buf[2]);
3957}
3958
Dave Airlie0e32b392014-05-02 14:02:48 +10003959static bool
3960intel_dp_probe_mst(struct intel_dp *intel_dp)
3961{
3962 u8 buf[1];
3963
3964 if (!intel_dp->can_mst)
3965 return false;
3966
3967 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3968 return false;
3969
Dave Airlie0e32b392014-05-02 14:02:48 +10003970 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3971 if (buf[0] & DP_MST_CAP) {
3972 DRM_DEBUG_KMS("Sink is MST capable\n");
3973 intel_dp->is_mst = true;
3974 } else {
3975 DRM_DEBUG_KMS("Sink is not MST capable\n");
3976 intel_dp->is_mst = false;
3977 }
3978 }
Dave Airlie0e32b392014-05-02 14:02:48 +10003979
3980 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3981 return intel_dp->is_mst;
3982}
3983
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003984static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003985{
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003986 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
Rodrigo Vivid72f9d92015-11-05 10:50:19 -08003987 struct drm_device *dev = dig_port->base.base.dev;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003988 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
Rodrigo Viviad9dc912014-09-16 19:18:12 -04003989 u8 buf;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003990 int ret = 0;
Rodrigo Vivic6297842015-11-05 10:50:20 -08003991 int count = 0;
3992 int attempts = 10;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003993
3994 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003995 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003996 ret = -EIO;
3997 goto out;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003998 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003999
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004000 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004001 buf & ~DP_TEST_SINK_START) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004002 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004003 ret = -EIO;
4004 goto out;
4005 }
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004006
Rodrigo Vivic6297842015-11-05 10:50:20 -08004007 do {
4008 intel_wait_for_vblank(dev, intel_crtc->pipe);
4009
4010 if (drm_dp_dpcd_readb(&intel_dp->aux,
4011 DP_TEST_SINK_MISC, &buf) < 0) {
4012 ret = -EIO;
4013 goto out;
4014 }
4015 count = buf & DP_TEST_COUNT_MASK;
4016 } while (--attempts && count);
4017
4018 if (attempts == 0) {
4019 DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
4020 ret = -ETIMEDOUT;
4021 }
4022
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004023 out:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004024 hsw_enable_ips(intel_crtc);
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004025 return ret;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004026}
4027
4028static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4029{
4030 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
Rodrigo Vivid72f9d92015-11-05 10:50:19 -08004031 struct drm_device *dev = dig_port->base.base.dev;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004032 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4033 u8 buf;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004034 int ret;
4035
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004036 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4037 return -EIO;
4038
4039 if (!(buf & DP_TEST_CRC_SUPPORTED))
4040 return -ENOTTY;
4041
4042 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4043 return -EIO;
4044
Rodrigo Vivi6d8175d2015-11-05 10:50:22 -08004045 if (buf & DP_TEST_SINK_START) {
4046 ret = intel_dp_sink_crc_stop(intel_dp);
4047 if (ret)
4048 return ret;
4049 }
4050
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004051 hsw_disable_ips(intel_crtc);
4052
4053 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4054 buf | DP_TEST_SINK_START) < 0) {
4055 hsw_enable_ips(intel_crtc);
4056 return -EIO;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004057 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004058
Rodrigo Vivid72f9d92015-11-05 10:50:19 -08004059 intel_wait_for_vblank(dev, intel_crtc->pipe);
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004060 return 0;
4061}
4062
4063int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4064{
4065 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4066 struct drm_device *dev = dig_port->base.base.dev;
4067 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4068 u8 buf;
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004069 int count, ret;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004070 int attempts = 6;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004071
4072 ret = intel_dp_sink_crc_start(intel_dp);
4073 if (ret)
4074 return ret;
4075
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004076 do {
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004077 intel_wait_for_vblank(dev, intel_crtc->pipe);
4078
Rodrigo Vivi1dda5f92014-10-01 07:32:37 -07004079 if (drm_dp_dpcd_readb(&intel_dp->aux,
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004080 DP_TEST_SINK_MISC, &buf) < 0) {
4081 ret = -EIO;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004082 goto stop;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004083 }
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004084 count = buf & DP_TEST_COUNT_MASK;
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07004085
Rodrigo Vivi7e38eef2015-11-05 10:50:21 -08004086 } while (--attempts && count == 0);
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004087
4088 if (attempts == 0) {
Rodrigo Vivi7e38eef2015-11-05 10:50:21 -08004089 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4090 ret = -ETIMEDOUT;
4091 goto stop;
4092 }
4093
4094 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4095 ret = -EIO;
4096 goto stop;
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004097 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004098
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004099stop:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004100 intel_dp_sink_crc_stop(intel_dp);
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004101 return ret;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004102}
4103
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004104static bool
4105intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4106{
Jani Nikula9d1a1032014-03-14 16:51:15 +02004107 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4108 DP_DEVICE_SERVICE_IRQ_VECTOR,
4109 sink_irq_vector, 1) == 1;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004110}
4111
Dave Airlie0e32b392014-05-02 14:02:48 +10004112static bool
4113intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4114{
4115 int ret;
4116
4117 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4118 DP_SINK_COUNT_ESI,
4119 sink_irq_vector, 14);
4120 if (ret != 14)
4121 return false;
4122
4123 return true;
4124}
4125
Todd Previtec5d5ab72015-04-15 08:38:38 -07004126static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004127{
Todd Previtec5d5ab72015-04-15 08:38:38 -07004128 uint8_t test_result = DP_TEST_ACK;
4129 return test_result;
4130}
4131
4132static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4133{
4134 uint8_t test_result = DP_TEST_NAK;
4135 return test_result;
4136}
4137
4138static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4139{
4140 uint8_t test_result = DP_TEST_NAK;
Todd Previte559be302015-05-04 07:48:20 -07004141 struct intel_connector *intel_connector = intel_dp->attached_connector;
4142 struct drm_connector *connector = &intel_connector->base;
4143
4144 if (intel_connector->detect_edid == NULL ||
Daniel Vetterac6f2e22015-05-08 16:15:41 +02004145 connector->edid_corrupt ||
Todd Previte559be302015-05-04 07:48:20 -07004146 intel_dp->aux.i2c_defer_count > 6) {
4147 /* Check EDID read for NACKs, DEFERs and corruption
4148 * (DP CTS 1.2 Core r1.1)
4149 * 4.2.2.4 : Failed EDID read, I2C_NAK
4150 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4151 * 4.2.2.6 : EDID corruption detected
4152 * Use failsafe mode for all cases
4153 */
4154 if (intel_dp->aux.i2c_nack_count > 0 ||
4155 intel_dp->aux.i2c_defer_count > 0)
4156 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4157 intel_dp->aux.i2c_nack_count,
4158 intel_dp->aux.i2c_defer_count);
4159 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4160 } else {
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304161 struct edid *block = intel_connector->detect_edid;
4162
4163 /* We have to write the checksum
4164 * of the last block read
4165 */
4166 block += intel_connector->detect_edid->extensions;
4167
Todd Previte559be302015-05-04 07:48:20 -07004168 if (!drm_dp_dpcd_write(&intel_dp->aux,
4169 DP_TEST_EDID_CHECKSUM,
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304170 &block->checksum,
Dan Carpenter5a1cc652015-05-12 21:07:37 +03004171 1))
Todd Previte559be302015-05-04 07:48:20 -07004172 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4173
4174 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4175 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4176 }
4177
4178 /* Set test active flag here so userspace doesn't interrupt things */
4179 intel_dp->compliance_test_active = 1;
4180
Todd Previtec5d5ab72015-04-15 08:38:38 -07004181 return test_result;
4182}
4183
4184static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4185{
4186 uint8_t test_result = DP_TEST_NAK;
4187 return test_result;
4188}
4189
4190static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4191{
4192 uint8_t response = DP_TEST_NAK;
4193 uint8_t rxdata = 0;
4194 int status = 0;
4195
Todd Previtec5d5ab72015-04-15 08:38:38 -07004196 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4197 if (status <= 0) {
4198 DRM_DEBUG_KMS("Could not read test request from sink\n");
4199 goto update_status;
4200 }
4201
4202 switch (rxdata) {
4203 case DP_TEST_LINK_TRAINING:
4204 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4205 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4206 response = intel_dp_autotest_link_training(intel_dp);
4207 break;
4208 case DP_TEST_LINK_VIDEO_PATTERN:
4209 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4210 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4211 response = intel_dp_autotest_video_pattern(intel_dp);
4212 break;
4213 case DP_TEST_LINK_EDID_READ:
4214 DRM_DEBUG_KMS("EDID test requested\n");
4215 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4216 response = intel_dp_autotest_edid(intel_dp);
4217 break;
4218 case DP_TEST_LINK_PHY_TEST_PATTERN:
4219 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4220 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4221 response = intel_dp_autotest_phy_pattern(intel_dp);
4222 break;
4223 default:
4224 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4225 break;
4226 }
4227
4228update_status:
4229 status = drm_dp_dpcd_write(&intel_dp->aux,
4230 DP_TEST_RESPONSE,
4231 &response, 1);
4232 if (status <= 0)
4233 DRM_DEBUG_KMS("Could not write test response to sink\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004234}
4235
Dave Airlie0e32b392014-05-02 14:02:48 +10004236static int
4237intel_dp_check_mst_status(struct intel_dp *intel_dp)
4238{
4239 bool bret;
4240
4241 if (intel_dp->is_mst) {
4242 u8 esi[16] = { 0 };
4243 int ret = 0;
4244 int retry;
4245 bool handled;
4246 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4247go_again:
4248 if (bret == true) {
4249
4250 /* check link status - esi[10] = 0x200c */
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03004251 if (intel_dp->active_mst_links &&
Ville Syrjälä901c2da2015-08-17 18:05:12 +03004252 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
Dave Airlie0e32b392014-05-02 14:02:48 +10004253 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4254 intel_dp_start_link_train(intel_dp);
Dave Airlie0e32b392014-05-02 14:02:48 +10004255 intel_dp_stop_link_train(intel_dp);
4256 }
4257
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004258 DRM_DEBUG_KMS("got esi %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004259 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4260
4261 if (handled) {
4262 for (retry = 0; retry < 3; retry++) {
4263 int wret;
4264 wret = drm_dp_dpcd_write(&intel_dp->aux,
4265 DP_SINK_COUNT_ESI+1,
4266 &esi[1], 3);
4267 if (wret == 3) {
4268 break;
4269 }
4270 }
4271
4272 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4273 if (bret == true) {
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004274 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004275 goto go_again;
4276 }
4277 } else
4278 ret = 0;
4279
4280 return ret;
4281 } else {
4282 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4283 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4284 intel_dp->is_mst = false;
4285 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4286 /* send a hotplug event */
4287 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4288 }
4289 }
4290 return -EINVAL;
4291}
4292
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004293/*
4294 * According to DP spec
4295 * 5.1.2:
4296 * 1. Read DPCD
4297 * 2. Configure link according to Receiver Capabilities
4298 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4299 * 4. Check link status on receipt of hot-plug interrupt
4300 */
Damien Lespiaua5146202015-02-10 19:32:22 +00004301static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01004302intel_dp_check_link_status(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004303{
Dave Airlie5b215bc2014-08-05 10:40:20 +10004304 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004305 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004306 u8 sink_irq_vector;
Keith Packard93f62da2011-11-01 19:45:03 -07004307 u8 link_status[DP_LINK_STATUS_SIZE];
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004308
Dave Airlie5b215bc2014-08-05 10:40:20 +10004309 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4310
Shubhangi Shrivastava4df69602015-10-28 15:30:36 +05304311 /*
4312 * Clearing compliance test variables to allow capturing
4313 * of values for next automated test request.
4314 */
4315 intel_dp->compliance_test_active = 0;
4316 intel_dp->compliance_test_type = 0;
4317 intel_dp->compliance_test_data = 0;
4318
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02004319 if (!intel_encoder->base.crtc)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004320 return;
4321
Imre Deak1a125d82014-08-18 14:42:46 +03004322 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4323 return;
4324
Keith Packard92fd8fd2011-07-25 19:50:10 -07004325 /* Try to read receiver status if the link appears to be up */
Keith Packard93f62da2011-11-01 19:45:03 -07004326 if (!intel_dp_get_link_status(intel_dp, link_status)) {
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004327 return;
4328 }
4329
Keith Packard92fd8fd2011-07-25 19:50:10 -07004330 /* Now read the DPCD to see if it's actually running */
Keith Packard26d61aa2011-07-25 20:01:09 -07004331 if (!intel_dp_get_dpcd(intel_dp)) {
Jesse Barnes59cd09e2011-07-07 11:10:59 -07004332 return;
4333 }
4334
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004335 /* Try to read the source of the interrupt */
4336 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4337 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4338 /* Clear interrupt source */
Jani Nikula9d1a1032014-03-14 16:51:15 +02004339 drm_dp_dpcd_writeb(&intel_dp->aux,
4340 DP_DEVICE_SERVICE_IRQ_VECTOR,
4341 sink_irq_vector);
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004342
4343 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
Todd Previte09b1eb12015-04-20 15:27:34 -07004344 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004345 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4346 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4347 }
4348
Shubhangi Shrivastava14631e92015-10-14 14:56:49 +05304349 /* if link training is requested we should perform it always */
4350 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4351 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
Keith Packard92fd8fd2011-07-25 19:50:10 -07004352 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
Jani Nikula8e329a02014-06-03 14:56:21 +03004353 intel_encoder->base.name);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004354 intel_dp_start_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03004355 intel_dp_stop_link_train(intel_dp);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004356 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004357}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004358
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004359/* XXX this is probably wrong for multiple downstream ports */
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004360static enum drm_connector_status
Keith Packard26d61aa2011-07-25 20:01:09 -07004361intel_dp_detect_dpcd(struct intel_dp *intel_dp)
Adam Jackson71ba90002011-07-12 17:38:04 -04004362{
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004363 uint8_t *dpcd = intel_dp->dpcd;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004364 uint8_t type;
4365
4366 if (!intel_dp_get_dpcd(intel_dp))
4367 return connector_status_disconnected;
4368
4369 /* if there's no downstream port, we're done */
4370 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
Keith Packard26d61aa2011-07-25 20:01:09 -07004371 return connector_status_connected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004372
4373 /* If we're HPD-aware, SINK_COUNT changes dynamically */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004374 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4375 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
Adam Jackson23235172012-09-20 16:42:45 -04004376 uint8_t reg;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004377
4378 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4379 &reg, 1) < 0)
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004380 return connector_status_unknown;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004381
Adam Jackson23235172012-09-20 16:42:45 -04004382 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4383 : connector_status_disconnected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004384 }
4385
4386 /* If no HPD, poke DDC gently */
Jani Nikula0b998362014-03-14 16:51:17 +02004387 if (drm_probe_ddc(&intel_dp->aux.ddc))
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004388 return connector_status_connected;
4389
4390 /* Well we tried, say unknown for unreliable port types */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004391 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4392 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4393 if (type == DP_DS_PORT_TYPE_VGA ||
4394 type == DP_DS_PORT_TYPE_NON_EDID)
4395 return connector_status_unknown;
4396 } else {
4397 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4398 DP_DWN_STRM_PORT_TYPE_MASK;
4399 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4400 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4401 return connector_status_unknown;
4402 }
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004403
4404 /* Anything else is out of spec, warn and ignore */
4405 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
Keith Packard26d61aa2011-07-25 20:01:09 -07004406 return connector_status_disconnected;
Adam Jackson71ba90002011-07-12 17:38:04 -04004407}
4408
4409static enum drm_connector_status
Chris Wilsond410b562014-09-02 20:03:59 +01004410edp_detect(struct intel_dp *intel_dp)
4411{
4412 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4413 enum drm_connector_status status;
4414
4415 status = intel_panel_detect(dev);
4416 if (status == connector_status_unknown)
4417 status = connector_status_connected;
4418
4419 return status;
4420}
4421
Jani Nikulab93433c2015-08-20 10:47:36 +03004422static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4423 struct intel_digital_port *port)
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004424{
Jani Nikulab93433c2015-08-20 10:47:36 +03004425 u32 bit;
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07004426
Jani Nikula0df53b72015-08-20 10:47:40 +03004427 switch (port->port) {
4428 case PORT_A:
4429 return true;
4430 case PORT_B:
4431 bit = SDE_PORTB_HOTPLUG;
4432 break;
4433 case PORT_C:
4434 bit = SDE_PORTC_HOTPLUG;
4435 break;
4436 case PORT_D:
4437 bit = SDE_PORTD_HOTPLUG;
4438 break;
4439 default:
4440 MISSING_CASE(port->port);
4441 return false;
4442 }
4443
4444 return I915_READ(SDEISR) & bit;
4445}
4446
4447static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4448 struct intel_digital_port *port)
4449{
4450 u32 bit;
4451
4452 switch (port->port) {
4453 case PORT_A:
4454 return true;
4455 case PORT_B:
4456 bit = SDE_PORTB_HOTPLUG_CPT;
4457 break;
4458 case PORT_C:
4459 bit = SDE_PORTC_HOTPLUG_CPT;
4460 break;
4461 case PORT_D:
4462 bit = SDE_PORTD_HOTPLUG_CPT;
4463 break;
Jani Nikulaa78695d2015-09-18 15:54:50 +03004464 case PORT_E:
4465 bit = SDE_PORTE_HOTPLUG_SPT;
4466 break;
Jani Nikula0df53b72015-08-20 10:47:40 +03004467 default:
4468 MISSING_CASE(port->port);
4469 return false;
Jani Nikulab93433c2015-08-20 10:47:36 +03004470 }
Damien Lespiau1b469632012-12-13 16:09:01 +00004471
Jani Nikulab93433c2015-08-20 10:47:36 +03004472 return I915_READ(SDEISR) & bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004473}
4474
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004475static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
Jani Nikula1d245982015-08-20 10:47:37 +03004476 struct intel_digital_port *port)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004477{
Jani Nikula9642c812015-08-20 10:47:41 +03004478 u32 bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004479
Jani Nikula9642c812015-08-20 10:47:41 +03004480 switch (port->port) {
4481 case PORT_B:
4482 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4483 break;
4484 case PORT_C:
4485 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4486 break;
4487 case PORT_D:
4488 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4489 break;
4490 default:
4491 MISSING_CASE(port->port);
4492 return false;
4493 }
4494
4495 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4496}
4497
4498static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4499 struct intel_digital_port *port)
4500{
4501 u32 bit;
4502
4503 switch (port->port) {
4504 case PORT_B:
4505 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4506 break;
4507 case PORT_C:
4508 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4509 break;
4510 case PORT_D:
4511 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4512 break;
4513 default:
4514 MISSING_CASE(port->port);
4515 return false;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004516 }
4517
Jani Nikula1d245982015-08-20 10:47:37 +03004518 return I915_READ(PORT_HOTPLUG_STAT) & bit;
Dave Airlie2a592be2014-09-01 16:58:12 +10004519}
4520
Jani Nikulae464bfd2015-08-20 10:47:42 +03004521static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304522 struct intel_digital_port *intel_dig_port)
Jani Nikulae464bfd2015-08-20 10:47:42 +03004523{
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304524 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4525 enum port port;
Jani Nikulae464bfd2015-08-20 10:47:42 +03004526 u32 bit;
4527
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304528 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4529 switch (port) {
Jani Nikulae464bfd2015-08-20 10:47:42 +03004530 case PORT_A:
4531 bit = BXT_DE_PORT_HP_DDIA;
4532 break;
4533 case PORT_B:
4534 bit = BXT_DE_PORT_HP_DDIB;
4535 break;
4536 case PORT_C:
4537 bit = BXT_DE_PORT_HP_DDIC;
4538 break;
4539 default:
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304540 MISSING_CASE(port);
Jani Nikulae464bfd2015-08-20 10:47:42 +03004541 return false;
4542 }
4543
4544 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4545}
4546
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004547/*
4548 * intel_digital_port_connected - is the specified port connected?
4549 * @dev_priv: i915 private structure
4550 * @port: the port to test
4551 *
4552 * Return %true if @port is connected, %false otherwise.
4553 */
Sonika Jindal237ed862015-09-15 09:44:20 +05304554bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004555 struct intel_digital_port *port)
4556{
Jani Nikula0df53b72015-08-20 10:47:40 +03004557 if (HAS_PCH_IBX(dev_priv))
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004558 return ibx_digital_port_connected(dev_priv, port);
Jani Nikula0df53b72015-08-20 10:47:40 +03004559 if (HAS_PCH_SPLIT(dev_priv))
4560 return cpt_digital_port_connected(dev_priv, port);
Jani Nikulae464bfd2015-08-20 10:47:42 +03004561 else if (IS_BROXTON(dev_priv))
4562 return bxt_digital_port_connected(dev_priv, port);
Wayne Boyer666a4532015-12-09 12:29:35 -08004563 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Jani Nikula9642c812015-08-20 10:47:41 +03004564 return vlv_digital_port_connected(dev_priv, port);
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004565 else
4566 return g4x_digital_port_connected(dev_priv, port);
4567}
4568
Keith Packard8c241fe2011-09-28 16:38:44 -07004569static struct edid *
Chris Wilsonbeb60602014-09-02 20:04:00 +01004570intel_dp_get_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004571{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004572 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packard8c241fe2011-09-28 16:38:44 -07004573
Jani Nikula9cd300e2012-10-19 14:51:52 +03004574 /* use cached edid if we have one */
4575 if (intel_connector->edid) {
Jani Nikula9cd300e2012-10-19 14:51:52 +03004576 /* invalid edid */
4577 if (IS_ERR(intel_connector->edid))
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004578 return NULL;
4579
Jani Nikula55e9ede2013-10-01 10:38:54 +03004580 return drm_edid_duplicate(intel_connector->edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004581 } else
4582 return drm_get_edid(&intel_connector->base,
4583 &intel_dp->aux.ddc);
Keith Packard8c241fe2011-09-28 16:38:44 -07004584}
4585
Chris Wilsonbeb60602014-09-02 20:04:00 +01004586static void
4587intel_dp_set_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004588{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004589 struct intel_connector *intel_connector = intel_dp->attached_connector;
4590 struct edid *edid;
Keith Packard8c241fe2011-09-28 16:38:44 -07004591
Chris Wilsonbeb60602014-09-02 20:04:00 +01004592 edid = intel_dp_get_edid(intel_dp);
4593 intel_connector->detect_edid = edid;
Jani Nikula9cd300e2012-10-19 14:51:52 +03004594
Chris Wilsonbeb60602014-09-02 20:04:00 +01004595 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4596 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4597 else
4598 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4599}
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004600
Chris Wilsonbeb60602014-09-02 20:04:00 +01004601static void
4602intel_dp_unset_edid(struct intel_dp *intel_dp)
4603{
4604 struct intel_connector *intel_connector = intel_dp->attached_connector;
4605
4606 kfree(intel_connector->detect_edid);
4607 intel_connector->detect_edid = NULL;
4608
4609 intel_dp->has_audio = false;
4610}
4611
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004612static enum drm_connector_status
4613intel_dp_detect(struct drm_connector *connector, bool force)
4614{
4615 struct intel_dp *intel_dp = intel_attached_dp(connector);
Paulo Zanonid63885d2012-10-26 19:05:49 -02004616 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4617 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004618 struct drm_device *dev = connector->dev;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004619 enum drm_connector_status status;
Imre Deak671dedd2014-03-05 16:20:53 +02004620 enum intel_display_power_domain power_domain;
Dave Airlie0e32b392014-05-02 14:02:48 +10004621 bool ret;
Todd Previte09b1eb12015-04-20 15:27:34 -07004622 u8 sink_irq_vector;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004623
Chris Wilson164c8592013-07-20 20:27:08 +01004624 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03004625 connector->base.id, connector->name);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004626 intel_dp_unset_edid(intel_dp);
Chris Wilson164c8592013-07-20 20:27:08 +01004627
Dave Airlie0e32b392014-05-02 14:02:48 +10004628 if (intel_dp->is_mst) {
4629 /* MST devices are disconnected from a monitor POV */
4630 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4631 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004632 return connector_status_disconnected;
Dave Airlie0e32b392014-05-02 14:02:48 +10004633 }
4634
Ville Syrjälä25f78f52015-11-16 15:01:04 +01004635 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4636 intel_display_power_get(to_i915(dev), power_domain);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004637
Chris Wilsond410b562014-09-02 20:03:59 +01004638 /* Can't disconnect eDP, but you can close the lid... */
4639 if (is_edp(intel_dp))
4640 status = edp_detect(intel_dp);
Ander Conselvan de Oliveirac555a812015-11-18 17:19:30 +02004641 else if (intel_digital_port_connected(to_i915(dev),
4642 dp_to_dig_port(intel_dp)))
4643 status = intel_dp_detect_dpcd(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004644 else
Ander Conselvan de Oliveirac555a812015-11-18 17:19:30 +02004645 status = connector_status_disconnected;
4646
Shubhangi Shrivastava4df69602015-10-28 15:30:36 +05304647 if (status != connector_status_connected) {
4648 intel_dp->compliance_test_active = 0;
4649 intel_dp->compliance_test_type = 0;
4650 intel_dp->compliance_test_data = 0;
4651
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004652 goto out;
Shubhangi Shrivastava4df69602015-10-28 15:30:36 +05304653 }
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004654
Adam Jackson0d198322012-05-14 16:05:47 -04004655 intel_dp_probe_oui(intel_dp);
4656
Dave Airlie0e32b392014-05-02 14:02:48 +10004657 ret = intel_dp_probe_mst(intel_dp);
4658 if (ret) {
4659 /* if we are in MST mode then this connector
4660 won't appear connected or have anything with EDID on it */
4661 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4662 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4663 status = connector_status_disconnected;
4664 goto out;
4665 }
4666
Shubhangi Shrivastava4df69602015-10-28 15:30:36 +05304667 /*
4668 * Clearing NACK and defer counts to get their exact values
4669 * while reading EDID which are required by Compliance tests
4670 * 4.2.2.4 and 4.2.2.5
4671 */
4672 intel_dp->aux.i2c_nack_count = 0;
4673 intel_dp->aux.i2c_defer_count = 0;
4674
Chris Wilsonbeb60602014-09-02 20:04:00 +01004675 intel_dp_set_edid(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004676
Paulo Zanonid63885d2012-10-26 19:05:49 -02004677 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4678 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004679 status = connector_status_connected;
4680
Todd Previte09b1eb12015-04-20 15:27:34 -07004681 /* Try to read the source of the interrupt */
4682 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4683 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4684 /* Clear interrupt source */
4685 drm_dp_dpcd_writeb(&intel_dp->aux,
4686 DP_DEVICE_SERVICE_IRQ_VECTOR,
4687 sink_irq_vector);
4688
4689 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4690 intel_dp_handle_test_request(intel_dp);
4691 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4692 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4693 }
4694
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004695out:
Ville Syrjälä25f78f52015-11-16 15:01:04 +01004696 intel_display_power_put(to_i915(dev), power_domain);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004697 return status;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004698}
4699
Chris Wilsonbeb60602014-09-02 20:04:00 +01004700static void
4701intel_dp_force(struct drm_connector *connector)
4702{
4703 struct intel_dp *intel_dp = intel_attached_dp(connector);
4704 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
Ville Syrjälä25f78f52015-11-16 15:01:04 +01004705 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004706 enum intel_display_power_domain power_domain;
4707
4708 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4709 connector->base.id, connector->name);
4710 intel_dp_unset_edid(intel_dp);
4711
4712 if (connector->status != connector_status_connected)
4713 return;
4714
Ville Syrjälä25f78f52015-11-16 15:01:04 +01004715 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4716 intel_display_power_get(dev_priv, power_domain);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004717
4718 intel_dp_set_edid(intel_dp);
4719
Ville Syrjälä25f78f52015-11-16 15:01:04 +01004720 intel_display_power_put(dev_priv, power_domain);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004721
4722 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4723 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4724}
4725
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004726static int intel_dp_get_modes(struct drm_connector *connector)
4727{
Jani Nikuladd06f902012-10-19 14:51:50 +03004728 struct intel_connector *intel_connector = to_intel_connector(connector);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004729 struct edid *edid;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004730
Chris Wilsonbeb60602014-09-02 20:04:00 +01004731 edid = intel_connector->detect_edid;
4732 if (edid) {
4733 int ret = intel_connector_update_modes(connector, edid);
4734 if (ret)
4735 return ret;
4736 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004737
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004738 /* if eDP has no EDID, fall back to fixed mode */
Chris Wilsonbeb60602014-09-02 20:04:00 +01004739 if (is_edp(intel_attached_dp(connector)) &&
4740 intel_connector->panel.fixed_mode) {
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004741 struct drm_display_mode *mode;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004742
4743 mode = drm_mode_duplicate(connector->dev,
Jani Nikuladd06f902012-10-19 14:51:50 +03004744 intel_connector->panel.fixed_mode);
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004745 if (mode) {
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004746 drm_mode_probed_add(connector, mode);
4747 return 1;
4748 }
4749 }
Chris Wilsonbeb60602014-09-02 20:04:00 +01004750
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004751 return 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004752}
4753
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004754static bool
4755intel_dp_detect_audio(struct drm_connector *connector)
4756{
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004757 bool has_audio = false;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004758 struct edid *edid;
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004759
Chris Wilsonbeb60602014-09-02 20:04:00 +01004760 edid = to_intel_connector(connector)->detect_edid;
4761 if (edid)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004762 has_audio = drm_detect_monitor_audio(edid);
Imre Deak671dedd2014-03-05 16:20:53 +02004763
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004764 return has_audio;
4765}
4766
Chris Wilsonf6849602010-09-19 09:29:33 +01004767static int
4768intel_dp_set_property(struct drm_connector *connector,
4769 struct drm_property *property,
4770 uint64_t val)
4771{
Chris Wilsone953fd72011-02-21 22:23:52 +00004772 struct drm_i915_private *dev_priv = connector->dev->dev_private;
Yuly Novikov53b41832012-10-26 12:04:00 +03004773 struct intel_connector *intel_connector = to_intel_connector(connector);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004774 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4775 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonf6849602010-09-19 09:29:33 +01004776 int ret;
4777
Rob Clark662595d2012-10-11 20:36:04 -05004778 ret = drm_object_property_set_value(&connector->base, property, val);
Chris Wilsonf6849602010-09-19 09:29:33 +01004779 if (ret)
4780 return ret;
4781
Chris Wilson3f43c482011-05-12 22:17:24 +01004782 if (property == dev_priv->force_audio_property) {
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004783 int i = val;
4784 bool has_audio;
4785
4786 if (i == intel_dp->force_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004787 return 0;
4788
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004789 intel_dp->force_audio = i;
Chris Wilsonf6849602010-09-19 09:29:33 +01004790
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004791 if (i == HDMI_AUDIO_AUTO)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004792 has_audio = intel_dp_detect_audio(connector);
4793 else
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004794 has_audio = (i == HDMI_AUDIO_ON);
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004795
4796 if (has_audio == intel_dp->has_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004797 return 0;
4798
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004799 intel_dp->has_audio = has_audio;
Chris Wilsonf6849602010-09-19 09:29:33 +01004800 goto done;
4801 }
4802
Chris Wilsone953fd72011-02-21 22:23:52 +00004803 if (property == dev_priv->broadcast_rgb_property) {
Daniel Vetterae4edb82013-04-22 17:07:23 +02004804 bool old_auto = intel_dp->color_range_auto;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004805 bool old_range = intel_dp->limited_color_range;
Daniel Vetterae4edb82013-04-22 17:07:23 +02004806
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004807 switch (val) {
4808 case INTEL_BROADCAST_RGB_AUTO:
4809 intel_dp->color_range_auto = true;
4810 break;
4811 case INTEL_BROADCAST_RGB_FULL:
4812 intel_dp->color_range_auto = false;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004813 intel_dp->limited_color_range = false;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004814 break;
4815 case INTEL_BROADCAST_RGB_LIMITED:
4816 intel_dp->color_range_auto = false;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004817 intel_dp->limited_color_range = true;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004818 break;
4819 default:
4820 return -EINVAL;
4821 }
Daniel Vetterae4edb82013-04-22 17:07:23 +02004822
4823 if (old_auto == intel_dp->color_range_auto &&
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004824 old_range == intel_dp->limited_color_range)
Daniel Vetterae4edb82013-04-22 17:07:23 +02004825 return 0;
4826
Chris Wilsone953fd72011-02-21 22:23:52 +00004827 goto done;
4828 }
4829
Yuly Novikov53b41832012-10-26 12:04:00 +03004830 if (is_edp(intel_dp) &&
4831 property == connector->dev->mode_config.scaling_mode_property) {
4832 if (val == DRM_MODE_SCALE_NONE) {
4833 DRM_DEBUG_KMS("no scaling not supported\n");
4834 return -EINVAL;
4835 }
4836
4837 if (intel_connector->panel.fitting_mode == val) {
4838 /* the eDP scaling property is not changed */
4839 return 0;
4840 }
4841 intel_connector->panel.fitting_mode = val;
4842
4843 goto done;
4844 }
4845
Chris Wilsonf6849602010-09-19 09:29:33 +01004846 return -EINVAL;
4847
4848done:
Chris Wilsonc0c36b942012-12-19 16:08:43 +00004849 if (intel_encoder->base.crtc)
4850 intel_crtc_restore_mode(intel_encoder->base.crtc);
Chris Wilsonf6849602010-09-19 09:29:33 +01004851
4852 return 0;
4853}
4854
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004855static void
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004856intel_dp_connector_destroy(struct drm_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004857{
Jani Nikula1d508702012-10-19 14:51:49 +03004858 struct intel_connector *intel_connector = to_intel_connector(connector);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004859
Chris Wilson10e972d2014-09-04 21:43:45 +01004860 kfree(intel_connector->detect_edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004861
Jani Nikula9cd300e2012-10-19 14:51:52 +03004862 if (!IS_ERR_OR_NULL(intel_connector->edid))
4863 kfree(intel_connector->edid);
4864
Paulo Zanoniacd8db102013-06-12 17:27:23 -03004865 /* Can't call is_edp() since the encoder may have been destroyed
4866 * already. */
4867 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
Jani Nikula1d508702012-10-19 14:51:49 +03004868 intel_panel_fini(&intel_connector->panel);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004869
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004870 drm_connector_cleanup(connector);
Zhenyu Wang55f78c42010-03-29 16:13:57 +08004871 kfree(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004872}
4873
Paulo Zanoni00c09d72012-10-26 19:05:52 -02004874void intel_dp_encoder_destroy(struct drm_encoder *encoder)
Daniel Vetter24d05922010-08-20 18:08:28 +02004875{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004876 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4877 struct intel_dp *intel_dp = &intel_dig_port->dp;
Daniel Vetter24d05922010-08-20 18:08:28 +02004878
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02004879 intel_dp_aux_fini(intel_dp);
Dave Airlie0e32b392014-05-02 14:02:48 +10004880 intel_dp_mst_encoder_cleanup(intel_dig_port);
Keith Packardbd943152011-09-18 23:09:52 -07004881 if (is_edp(intel_dp)) {
4882 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03004883 /*
4884 * vdd might still be enabled do to the delayed vdd off.
4885 * Make sure vdd is actually turned off here.
4886 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03004887 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01004888 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004889 pps_unlock(intel_dp);
4890
Clint Taylor01527b32014-07-07 13:01:46 -07004891 if (intel_dp->edp_notifier.notifier_call) {
4892 unregister_reboot_notifier(&intel_dp->edp_notifier);
4893 intel_dp->edp_notifier.notifier_call = NULL;
4894 }
Keith Packardbd943152011-09-18 23:09:52 -07004895 }
Imre Deakc8bd0e42014-12-12 17:57:38 +02004896 drm_encoder_cleanup(encoder);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004897 kfree(intel_dig_port);
Daniel Vetter24d05922010-08-20 18:08:28 +02004898}
4899
Imre Deak07f9cd02014-08-18 14:42:45 +03004900static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4901{
4902 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4903
4904 if (!is_edp(intel_dp))
4905 return;
4906
Ville Syrjälä951468f2014-09-04 14:55:31 +03004907 /*
4908 * vdd might still be enabled do to the delayed vdd off.
4909 * Make sure vdd is actually turned off here.
4910 */
Ville Syrjäläafa4e532014-11-25 15:43:48 +02004911 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004912 pps_lock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004913 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004914 pps_unlock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004915}
4916
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004917static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4918{
4919 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4920 struct drm_device *dev = intel_dig_port->base.base.dev;
4921 struct drm_i915_private *dev_priv = dev->dev_private;
4922 enum intel_display_power_domain power_domain;
4923
4924 lockdep_assert_held(&dev_priv->pps_mutex);
4925
4926 if (!edp_have_panel_vdd(intel_dp))
4927 return;
4928
4929 /*
4930 * The VDD bit needs a power domain reference, so if the bit is
4931 * already enabled when we boot or resume, grab this reference and
4932 * schedule a vdd off, so we don't hold on to the reference
4933 * indefinitely.
4934 */
4935 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
Ville Syrjälä25f78f52015-11-16 15:01:04 +01004936 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004937 intel_display_power_get(dev_priv, power_domain);
4938
4939 edp_panel_vdd_schedule_off(intel_dp);
4940}
4941
Imre Deak6d93c0c2014-07-31 14:03:36 +03004942static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4943{
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004944 struct intel_dp *intel_dp;
4945
4946 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4947 return;
4948
4949 intel_dp = enc_to_intel_dp(encoder);
4950
4951 pps_lock(intel_dp);
4952
4953 /*
4954 * Read out the current power sequencer assignment,
4955 * in case the BIOS did something with it.
4956 */
Wayne Boyer666a4532015-12-09 12:29:35 -08004957 if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004958 vlv_initial_power_sequencer_setup(intel_dp);
4959
4960 intel_edp_panel_vdd_sanitize(intel_dp);
4961
4962 pps_unlock(intel_dp);
Imre Deak6d93c0c2014-07-31 14:03:36 +03004963}
4964
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004965static const struct drm_connector_funcs intel_dp_connector_funcs = {
Maarten Lankhorst4d688a22015-08-05 12:37:06 +02004966 .dpms = drm_atomic_helper_connector_dpms,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004967 .detect = intel_dp_detect,
Chris Wilsonbeb60602014-09-02 20:04:00 +01004968 .force = intel_dp_force,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004969 .fill_modes = drm_helper_probe_single_connector_modes,
Chris Wilsonf6849602010-09-19 09:29:33 +01004970 .set_property = intel_dp_set_property,
Matt Roper2545e4a2015-01-22 16:51:27 -08004971 .atomic_get_property = intel_connector_atomic_get_property,
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004972 .destroy = intel_dp_connector_destroy,
Matt Roperc6f95f22015-01-22 16:50:32 -08004973 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
Ander Conselvan de Oliveira98969722015-03-20 16:18:06 +02004974 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004975};
4976
4977static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4978 .get_modes = intel_dp_get_modes,
4979 .mode_valid = intel_dp_mode_valid,
Chris Wilsondf0e9242010-09-09 16:20:55 +01004980 .best_encoder = intel_best_encoder,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004981};
4982
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004983static const struct drm_encoder_funcs intel_dp_enc_funcs = {
Imre Deak6d93c0c2014-07-31 14:03:36 +03004984 .reset = intel_dp_encoder_reset,
Daniel Vetter24d05922010-08-20 18:08:28 +02004985 .destroy = intel_dp_encoder_destroy,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004986};
4987
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004988enum irqreturn
Dave Airlie13cf5502014-06-18 11:29:35 +10004989intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4990{
4991 struct intel_dp *intel_dp = &intel_dig_port->dp;
Imre Deak1c767b32014-08-18 14:42:42 +03004992 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Dave Airlie0e32b392014-05-02 14:02:48 +10004993 struct drm_device *dev = intel_dig_port->base.base.dev;
4994 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak1c767b32014-08-18 14:42:42 +03004995 enum intel_display_power_domain power_domain;
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004996 enum irqreturn ret = IRQ_NONE;
Imre Deak1c767b32014-08-18 14:42:42 +03004997
Dave Airlie0e32b392014-05-02 14:02:48 +10004998 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4999 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
Dave Airlie13cf5502014-06-18 11:29:35 +10005000
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03005001 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5002 /*
5003 * vdd off can generate a long pulse on eDP which
5004 * would require vdd on to handle it, and thus we
5005 * would end up in an endless cycle of
5006 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5007 */
5008 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5009 port_name(intel_dig_port->port));
Ville Syrjäläa8b3d522015-02-10 14:11:46 +02005010 return IRQ_HANDLED;
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03005011 }
5012
Ville Syrjälä26fbb772014-08-11 18:37:37 +03005013 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5014 port_name(intel_dig_port->port),
Dave Airlie0e32b392014-05-02 14:02:48 +10005015 long_hpd ? "long" : "short");
Dave Airlie13cf5502014-06-18 11:29:35 +10005016
Ville Syrjälä25f78f52015-11-16 15:01:04 +01005017 power_domain = intel_display_port_aux_power_domain(intel_encoder);
Imre Deak1c767b32014-08-18 14:42:42 +03005018 intel_display_power_get(dev_priv, power_domain);
5019
Dave Airlie0e32b392014-05-02 14:02:48 +10005020 if (long_hpd) {
Mika Kahola5fa836a2015-04-29 09:17:40 +03005021 /* indicate that we need to restart link training */
5022 intel_dp->train_set_valid = false;
Dave Airlie2a592be2014-09-01 16:58:12 +10005023
Jani Nikula7e66bcf2015-08-20 10:47:39 +03005024 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5025 goto mst_fail;
Dave Airlie0e32b392014-05-02 14:02:48 +10005026
5027 if (!intel_dp_get_dpcd(intel_dp)) {
5028 goto mst_fail;
5029 }
5030
5031 intel_dp_probe_oui(intel_dp);
5032
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03005033 if (!intel_dp_probe_mst(intel_dp)) {
5034 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5035 intel_dp_check_link_status(intel_dp);
5036 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10005037 goto mst_fail;
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03005038 }
Dave Airlie0e32b392014-05-02 14:02:48 +10005039 } else {
5040 if (intel_dp->is_mst) {
Imre Deak1c767b32014-08-18 14:42:42 +03005041 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
Dave Airlie0e32b392014-05-02 14:02:48 +10005042 goto mst_fail;
5043 }
5044
5045 if (!intel_dp->is_mst) {
Dave Airlie5b215bc2014-08-05 10:40:20 +10005046 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
Dave Airlie0e32b392014-05-02 14:02:48 +10005047 intel_dp_check_link_status(intel_dp);
Dave Airlie5b215bc2014-08-05 10:40:20 +10005048 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10005049 }
5050 }
Daniel Vetterb2c5c182015-01-23 06:00:31 +01005051
5052 ret = IRQ_HANDLED;
5053
Imre Deak1c767b32014-08-18 14:42:42 +03005054 goto put_power;
Dave Airlie0e32b392014-05-02 14:02:48 +10005055mst_fail:
5056 /* if we were in MST mode, and device is not there get out of MST mode */
5057 if (intel_dp->is_mst) {
5058 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5059 intel_dp->is_mst = false;
5060 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5061 }
Imre Deak1c767b32014-08-18 14:42:42 +03005062put_power:
5063 intel_display_power_put(dev_priv, power_domain);
5064
5065 return ret;
Dave Airlie13cf5502014-06-18 11:29:35 +10005066}
5067
Rodrigo Vivi477ec322015-08-06 15:51:39 +08005068/* check the VBT to see whether the eDP is on another port */
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005069bool intel_dp_is_edp(struct drm_device *dev, enum port port)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005070{
5071 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni768f69c2013-09-11 18:02:47 -03005072 union child_device_config *p_child;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005073 int i;
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005074 static const short port_mapping[] = {
Rodrigo Vivi477ec322015-08-06 15:51:39 +08005075 [PORT_B] = DVO_PORT_DPB,
5076 [PORT_C] = DVO_PORT_DPC,
5077 [PORT_D] = DVO_PORT_DPD,
5078 [PORT_E] = DVO_PORT_DPE,
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005079 };
Zhao Yakui36e83a12010-06-12 14:32:21 +08005080
Ville Syrjälä53ce81a2015-09-11 21:04:38 +03005081 /*
5082 * eDP not supported on g4x. so bail out early just
5083 * for a bit extra safety in case the VBT is bonkers.
5084 */
5085 if (INTEL_INFO(dev)->gen < 5)
5086 return false;
5087
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005088 if (port == PORT_A)
5089 return true;
5090
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005091 if (!dev_priv->vbt.child_dev_num)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005092 return false;
5093
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005094 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5095 p_child = dev_priv->vbt.child_dev + i;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005096
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005097 if (p_child->common.dvo_port == port_mapping[port] &&
Ville Syrjäläf02586d2013-11-01 20:32:08 +02005098 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5099 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
Zhao Yakui36e83a12010-06-12 14:32:21 +08005100 return true;
5101 }
5102 return false;
5103}
5104
Dave Airlie0e32b392014-05-02 14:02:48 +10005105void
Chris Wilsonf6849602010-09-19 09:29:33 +01005106intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5107{
Yuly Novikov53b41832012-10-26 12:04:00 +03005108 struct intel_connector *intel_connector = to_intel_connector(connector);
5109
Chris Wilson3f43c482011-05-12 22:17:24 +01005110 intel_attach_force_audio_property(connector);
Chris Wilsone953fd72011-02-21 22:23:52 +00005111 intel_attach_broadcast_rgb_property(connector);
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02005112 intel_dp->color_range_auto = true;
Yuly Novikov53b41832012-10-26 12:04:00 +03005113
5114 if (is_edp(intel_dp)) {
5115 drm_mode_create_scaling_mode_property(connector->dev);
Rob Clark6de6d842012-10-11 20:36:04 -05005116 drm_object_attach_property(
5117 &connector->base,
Yuly Novikov53b41832012-10-26 12:04:00 +03005118 connector->dev->mode_config.scaling_mode_property,
Yuly Novikov8e740cd2012-10-26 12:04:01 +03005119 DRM_MODE_SCALE_ASPECT);
5120 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
Yuly Novikov53b41832012-10-26 12:04:00 +03005121 }
Chris Wilsonf6849602010-09-19 09:29:33 +01005122}
5123
Imre Deakdada1a92014-01-29 13:25:41 +02005124static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5125{
5126 intel_dp->last_power_cycle = jiffies;
5127 intel_dp->last_power_on = jiffies;
5128 intel_dp->last_backlight_off = jiffies;
5129}
5130
Daniel Vetter67a54562012-10-20 20:57:45 +02005131static void
5132intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005133 struct intel_dp *intel_dp)
Daniel Vetter67a54562012-10-20 20:57:45 +02005134{
5135 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005136 struct edp_power_seq cur, vbt, spec,
5137 *final = &intel_dp->pps_delays;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305138 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02005139 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
Jesse Barnes453c5422013-03-28 09:55:41 -07005140
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005141 lockdep_assert_held(&dev_priv->pps_mutex);
5142
Ville Syrjälä81ddbc62014-10-16 21:27:31 +03005143 /* already initialized? */
5144 if (final->t11_t12 != 0)
5145 return;
5146
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305147 if (IS_BROXTON(dev)) {
5148 /*
5149 * TODO: BXT has 2 sets of PPS registers.
5150 * Correct Register for Broxton need to be identified
5151 * using VBT. hardcoding for now
5152 */
5153 pp_ctrl_reg = BXT_PP_CONTROL(0);
5154 pp_on_reg = BXT_PP_ON_DELAYS(0);
5155 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5156 } else if (HAS_PCH_SPLIT(dev)) {
Jani Nikulabf13e812013-09-06 07:40:05 +03005157 pp_ctrl_reg = PCH_PP_CONTROL;
Jesse Barnes453c5422013-03-28 09:55:41 -07005158 pp_on_reg = PCH_PP_ON_DELAYS;
5159 pp_off_reg = PCH_PP_OFF_DELAYS;
5160 pp_div_reg = PCH_PP_DIVISOR;
5161 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005162 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5163
5164 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5165 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5166 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5167 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005168 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005169
5170 /* Workaround: Need to write PP_CONTROL with the unlock key as
5171 * the very first thing. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305172 pp_ctl = ironlake_get_pp_control(intel_dp);
Daniel Vetter67a54562012-10-20 20:57:45 +02005173
Jesse Barnes453c5422013-03-28 09:55:41 -07005174 pp_on = I915_READ(pp_on_reg);
5175 pp_off = I915_READ(pp_off_reg);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305176 if (!IS_BROXTON(dev)) {
5177 I915_WRITE(pp_ctrl_reg, pp_ctl);
5178 pp_div = I915_READ(pp_div_reg);
5179 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005180
5181 /* Pull timing values out of registers */
5182 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5183 PANEL_POWER_UP_DELAY_SHIFT;
5184
5185 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5186 PANEL_LIGHT_ON_DELAY_SHIFT;
5187
5188 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5189 PANEL_LIGHT_OFF_DELAY_SHIFT;
5190
5191 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5192 PANEL_POWER_DOWN_DELAY_SHIFT;
5193
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305194 if (IS_BROXTON(dev)) {
5195 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5196 BXT_POWER_CYCLE_DELAY_SHIFT;
5197 if (tmp > 0)
5198 cur.t11_t12 = (tmp - 1) * 1000;
5199 else
5200 cur.t11_t12 = 0;
5201 } else {
5202 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
Daniel Vetter67a54562012-10-20 20:57:45 +02005203 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305204 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005205
5206 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5207 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5208
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005209 vbt = dev_priv->vbt.edp_pps;
Daniel Vetter67a54562012-10-20 20:57:45 +02005210
5211 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5212 * our hw here, which are all in 100usec. */
5213 spec.t1_t3 = 210 * 10;
5214 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5215 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5216 spec.t10 = 500 * 10;
5217 /* This one is special and actually in units of 100ms, but zero
5218 * based in the hw (so we need to add 100 ms). But the sw vbt
5219 * table multiplies it with 1000 to make it in units of 100usec,
5220 * too. */
5221 spec.t11_t12 = (510 + 100) * 10;
5222
5223 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5224 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5225
5226 /* Use the max of the register settings and vbt. If both are
5227 * unset, fall back to the spec limits. */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005228#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
Daniel Vetter67a54562012-10-20 20:57:45 +02005229 spec.field : \
5230 max(cur.field, vbt.field))
5231 assign_final(t1_t3);
5232 assign_final(t8);
5233 assign_final(t9);
5234 assign_final(t10);
5235 assign_final(t11_t12);
5236#undef assign_final
5237
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005238#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
Daniel Vetter67a54562012-10-20 20:57:45 +02005239 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5240 intel_dp->backlight_on_delay = get_delay(t8);
5241 intel_dp->backlight_off_delay = get_delay(t9);
5242 intel_dp->panel_power_down_delay = get_delay(t10);
5243 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5244#undef get_delay
5245
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005246 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5247 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5248 intel_dp->panel_power_cycle_delay);
5249
5250 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5251 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005252}
5253
5254static void
5255intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005256 struct intel_dp *intel_dp)
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005257{
5258 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07005259 u32 pp_on, pp_off, pp_div, port_sel = 0;
5260 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02005261 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
Ville Syrjäläad933b52014-08-18 22:15:56 +03005262 enum port port = dp_to_dig_port(intel_dp)->port;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005263 const struct edp_power_seq *seq = &intel_dp->pps_delays;
Jesse Barnes453c5422013-03-28 09:55:41 -07005264
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005265 lockdep_assert_held(&dev_priv->pps_mutex);
Jesse Barnes453c5422013-03-28 09:55:41 -07005266
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305267 if (IS_BROXTON(dev)) {
5268 /*
5269 * TODO: BXT has 2 sets of PPS registers.
5270 * Correct Register for Broxton need to be identified
5271 * using VBT. hardcoding for now
5272 */
5273 pp_ctrl_reg = BXT_PP_CONTROL(0);
5274 pp_on_reg = BXT_PP_ON_DELAYS(0);
5275 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5276
5277 } else if (HAS_PCH_SPLIT(dev)) {
Jesse Barnes453c5422013-03-28 09:55:41 -07005278 pp_on_reg = PCH_PP_ON_DELAYS;
5279 pp_off_reg = PCH_PP_OFF_DELAYS;
5280 pp_div_reg = PCH_PP_DIVISOR;
5281 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005282 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5283
5284 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5285 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5286 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005287 }
5288
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005289 /*
5290 * And finally store the new values in the power sequencer. The
5291 * backlight delays are set to 1 because we do manual waits on them. For
5292 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5293 * we'll end up waiting for the backlight off delay twice: once when we
5294 * do the manual sleep, and once when we disable the panel and wait for
5295 * the PP_STATUS bit to become zero.
5296 */
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005297 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005298 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5299 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005300 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
Daniel Vetter67a54562012-10-20 20:57:45 +02005301 /* Compute the divisor for the pp clock, simply match the Bspec
5302 * formula. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305303 if (IS_BROXTON(dev)) {
5304 pp_div = I915_READ(pp_ctrl_reg);
5305 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5306 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5307 << BXT_POWER_CYCLE_DELAY_SHIFT);
5308 } else {
5309 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5310 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5311 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5312 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005313
5314 /* Haswell doesn't have any port selection bits for the panel
5315 * power sequencer any more. */
Wayne Boyer666a4532015-12-09 12:29:35 -08005316 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005317 port_sel = PANEL_PORT_SELECT_VLV(port);
Imre Deakbc7d38a2013-05-16 14:40:36 +03005318 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005319 if (port == PORT_A)
Jani Nikulaa24c1442013-09-05 16:44:46 +03005320 port_sel = PANEL_PORT_SELECT_DPA;
Daniel Vetter67a54562012-10-20 20:57:45 +02005321 else
Jani Nikulaa24c1442013-09-05 16:44:46 +03005322 port_sel = PANEL_PORT_SELECT_DPD;
Daniel Vetter67a54562012-10-20 20:57:45 +02005323 }
5324
Jesse Barnes453c5422013-03-28 09:55:41 -07005325 pp_on |= port_sel;
5326
5327 I915_WRITE(pp_on_reg, pp_on);
5328 I915_WRITE(pp_off_reg, pp_off);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305329 if (IS_BROXTON(dev))
5330 I915_WRITE(pp_ctrl_reg, pp_div);
5331 else
5332 I915_WRITE(pp_div_reg, pp_div);
Daniel Vetter67a54562012-10-20 20:57:45 +02005333
Daniel Vetter67a54562012-10-20 20:57:45 +02005334 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07005335 I915_READ(pp_on_reg),
5336 I915_READ(pp_off_reg),
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305337 IS_BROXTON(dev) ?
5338 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
Jesse Barnes453c5422013-03-28 09:55:41 -07005339 I915_READ(pp_div_reg));
Zhenyu Wange3421a12010-04-08 09:43:27 +08005340}
5341
Vandana Kannanb33a2812015-02-13 15:33:03 +05305342/**
5343 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5344 * @dev: DRM device
5345 * @refresh_rate: RR to be programmed
5346 *
5347 * This function gets called when refresh rate (RR) has to be changed from
5348 * one frequency to another. Switches can be between high and low RR
5349 * supported by the panel or to any other RR based on media playback (in
5350 * this case, RR value needs to be passed from user space).
5351 *
5352 * The caller of this function needs to take a lock on dev_priv->drrs.
5353 */
Vandana Kannan96178ee2015-01-10 02:25:56 +05305354static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305355{
5356 struct drm_i915_private *dev_priv = dev->dev_private;
5357 struct intel_encoder *encoder;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305358 struct intel_digital_port *dig_port = NULL;
5359 struct intel_dp *intel_dp = dev_priv->drrs.dp;
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02005360 struct intel_crtc_state *config = NULL;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305361 struct intel_crtc *intel_crtc = NULL;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305362 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305363
5364 if (refresh_rate <= 0) {
5365 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5366 return;
5367 }
5368
Vandana Kannan96178ee2015-01-10 02:25:56 +05305369 if (intel_dp == NULL) {
5370 DRM_DEBUG_KMS("DRRS not supported.\n");
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305371 return;
5372 }
5373
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005374 /*
Rodrigo Vivie4d59f62014-11-20 02:22:08 -08005375 * FIXME: This needs proper synchronization with psr state for some
5376 * platforms that cannot have PSR and DRRS enabled at the same time.
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005377 */
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305378
Vandana Kannan96178ee2015-01-10 02:25:56 +05305379 dig_port = dp_to_dig_port(intel_dp);
5380 encoder = &dig_port->base;
Ander Conselvan de Oliveira723f9aa2015-03-20 16:18:18 +02005381 intel_crtc = to_intel_crtc(encoder->base.crtc);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305382
5383 if (!intel_crtc) {
5384 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5385 return;
5386 }
5387
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005388 config = intel_crtc->config;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305389
Vandana Kannan96178ee2015-01-10 02:25:56 +05305390 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305391 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5392 return;
5393 }
5394
Vandana Kannan96178ee2015-01-10 02:25:56 +05305395 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5396 refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305397 index = DRRS_LOW_RR;
5398
Vandana Kannan96178ee2015-01-10 02:25:56 +05305399 if (index == dev_priv->drrs.refresh_rate_type) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305400 DRM_DEBUG_KMS(
5401 "DRRS requested for previously set RR...ignoring\n");
5402 return;
5403 }
5404
5405 if (!intel_crtc->active) {
5406 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5407 return;
5408 }
5409
Durgadoss R44395bf2015-02-13 15:33:02 +05305410 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
Vandana Kannana4c30b12015-02-13 15:33:00 +05305411 switch (index) {
5412 case DRRS_HIGH_RR:
5413 intel_dp_set_m_n(intel_crtc, M1_N1);
5414 break;
5415 case DRRS_LOW_RR:
5416 intel_dp_set_m_n(intel_crtc, M2_N2);
5417 break;
5418 case DRRS_MAX_RR:
5419 default:
5420 DRM_ERROR("Unsupported refreshrate type\n");
5421 }
5422 } else if (INTEL_INFO(dev)->gen > 6) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02005423 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
Ville Syrjälä649636e2015-09-22 19:50:01 +03005424 u32 val;
Vandana Kannana4c30b12015-02-13 15:33:00 +05305425
Ville Syrjälä649636e2015-09-22 19:50:01 +03005426 val = I915_READ(reg);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305427 if (index > DRRS_HIGH_RR) {
Wayne Boyer666a4532015-12-09 12:29:35 -08005428 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305429 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5430 else
5431 val |= PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305432 } else {
Wayne Boyer666a4532015-12-09 12:29:35 -08005433 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305434 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5435 else
5436 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305437 }
5438 I915_WRITE(reg, val);
5439 }
5440
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305441 dev_priv->drrs.refresh_rate_type = index;
5442
5443 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5444}
5445
Vandana Kannanb33a2812015-02-13 15:33:03 +05305446/**
5447 * intel_edp_drrs_enable - init drrs struct if supported
5448 * @intel_dp: DP struct
5449 *
5450 * Initializes frontbuffer_bits and drrs.dp
5451 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305452void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5453{
5454 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5455 struct drm_i915_private *dev_priv = dev->dev_private;
5456 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5457 struct drm_crtc *crtc = dig_port->base.base.crtc;
5458 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5459
5460 if (!intel_crtc->config->has_drrs) {
5461 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5462 return;
5463 }
5464
5465 mutex_lock(&dev_priv->drrs.mutex);
5466 if (WARN_ON(dev_priv->drrs.dp)) {
5467 DRM_ERROR("DRRS already enabled\n");
5468 goto unlock;
5469 }
5470
5471 dev_priv->drrs.busy_frontbuffer_bits = 0;
5472
5473 dev_priv->drrs.dp = intel_dp;
5474
5475unlock:
5476 mutex_unlock(&dev_priv->drrs.mutex);
5477}
5478
Vandana Kannanb33a2812015-02-13 15:33:03 +05305479/**
5480 * intel_edp_drrs_disable - Disable DRRS
5481 * @intel_dp: DP struct
5482 *
5483 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305484void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5485{
5486 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5487 struct drm_i915_private *dev_priv = dev->dev_private;
5488 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5489 struct drm_crtc *crtc = dig_port->base.base.crtc;
5490 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5491
5492 if (!intel_crtc->config->has_drrs)
5493 return;
5494
5495 mutex_lock(&dev_priv->drrs.mutex);
5496 if (!dev_priv->drrs.dp) {
5497 mutex_unlock(&dev_priv->drrs.mutex);
5498 return;
5499 }
5500
5501 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5502 intel_dp_set_drrs_state(dev_priv->dev,
5503 intel_dp->attached_connector->panel.
5504 fixed_mode->vrefresh);
5505
5506 dev_priv->drrs.dp = NULL;
5507 mutex_unlock(&dev_priv->drrs.mutex);
5508
5509 cancel_delayed_work_sync(&dev_priv->drrs.work);
5510}
5511
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305512static void intel_edp_drrs_downclock_work(struct work_struct *work)
5513{
5514 struct drm_i915_private *dev_priv =
5515 container_of(work, typeof(*dev_priv), drrs.work.work);
5516 struct intel_dp *intel_dp;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305517
Vandana Kannan96178ee2015-01-10 02:25:56 +05305518 mutex_lock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305519
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305520 intel_dp = dev_priv->drrs.dp;
5521
5522 if (!intel_dp)
5523 goto unlock;
5524
5525 /*
5526 * The delayed work can race with an invalidate hence we need to
5527 * recheck.
5528 */
5529
5530 if (dev_priv->drrs.busy_frontbuffer_bits)
5531 goto unlock;
5532
5533 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5534 intel_dp_set_drrs_state(dev_priv->dev,
5535 intel_dp->attached_connector->panel.
5536 downclock_mode->vrefresh);
5537
5538unlock:
Vandana Kannan96178ee2015-01-10 02:25:56 +05305539 mutex_unlock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305540}
5541
Vandana Kannanb33a2812015-02-13 15:33:03 +05305542/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305543 * intel_edp_drrs_invalidate - Disable Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305544 * @dev: DRM device
5545 * @frontbuffer_bits: frontbuffer plane tracking bits
5546 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305547 * This function gets called everytime rendering on the given planes start.
5548 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
Vandana Kannanb33a2812015-02-13 15:33:03 +05305549 *
5550 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5551 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305552void intel_edp_drrs_invalidate(struct drm_device *dev,
5553 unsigned frontbuffer_bits)
5554{
5555 struct drm_i915_private *dev_priv = dev->dev_private;
5556 struct drm_crtc *crtc;
5557 enum pipe pipe;
5558
Daniel Vetter9da7d692015-04-09 16:44:15 +02005559 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305560 return;
5561
Daniel Vetter88f933a2015-04-09 16:44:16 +02005562 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305563
Vandana Kannana93fad02015-01-10 02:25:59 +05305564 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005565 if (!dev_priv->drrs.dp) {
5566 mutex_unlock(&dev_priv->drrs.mutex);
5567 return;
5568 }
5569
Vandana Kannana93fad02015-01-10 02:25:59 +05305570 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5571 pipe = to_intel_crtc(crtc)->pipe;
5572
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005573 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5574 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5575
Ramalingam C0ddfd202015-06-15 20:50:05 +05305576 /* invalidate means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005577 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Vandana Kannana93fad02015-01-10 02:25:59 +05305578 intel_dp_set_drrs_state(dev_priv->dev,
5579 dev_priv->drrs.dp->attached_connector->panel.
5580 fixed_mode->vrefresh);
Vandana Kannana93fad02015-01-10 02:25:59 +05305581
Vandana Kannana93fad02015-01-10 02:25:59 +05305582 mutex_unlock(&dev_priv->drrs.mutex);
5583}
5584
Vandana Kannanb33a2812015-02-13 15:33:03 +05305585/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305586 * intel_edp_drrs_flush - Restart Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305587 * @dev: DRM device
5588 * @frontbuffer_bits: frontbuffer plane tracking bits
5589 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305590 * This function gets called every time rendering on the given planes has
5591 * completed or flip on a crtc is completed. So DRRS should be upclocked
5592 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5593 * if no other planes are dirty.
Vandana Kannanb33a2812015-02-13 15:33:03 +05305594 *
5595 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5596 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305597void intel_edp_drrs_flush(struct drm_device *dev,
5598 unsigned frontbuffer_bits)
5599{
5600 struct drm_i915_private *dev_priv = dev->dev_private;
5601 struct drm_crtc *crtc;
5602 enum pipe pipe;
5603
Daniel Vetter9da7d692015-04-09 16:44:15 +02005604 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305605 return;
5606
Daniel Vetter88f933a2015-04-09 16:44:16 +02005607 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305608
Vandana Kannana93fad02015-01-10 02:25:59 +05305609 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005610 if (!dev_priv->drrs.dp) {
5611 mutex_unlock(&dev_priv->drrs.mutex);
5612 return;
5613 }
5614
Vandana Kannana93fad02015-01-10 02:25:59 +05305615 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5616 pipe = to_intel_crtc(crtc)->pipe;
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005617
5618 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Vandana Kannana93fad02015-01-10 02:25:59 +05305619 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5620
Ramalingam C0ddfd202015-06-15 20:50:05 +05305621 /* flush means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005622 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Ramalingam C0ddfd202015-06-15 20:50:05 +05305623 intel_dp_set_drrs_state(dev_priv->dev,
5624 dev_priv->drrs.dp->attached_connector->panel.
5625 fixed_mode->vrefresh);
5626
5627 /*
5628 * flush also means no more activity hence schedule downclock, if all
5629 * other fbs are quiescent too
5630 */
5631 if (!dev_priv->drrs.busy_frontbuffer_bits)
Vandana Kannana93fad02015-01-10 02:25:59 +05305632 schedule_delayed_work(&dev_priv->drrs.work,
5633 msecs_to_jiffies(1000));
5634 mutex_unlock(&dev_priv->drrs.mutex);
5635}
5636
Vandana Kannanb33a2812015-02-13 15:33:03 +05305637/**
5638 * DOC: Display Refresh Rate Switching (DRRS)
5639 *
5640 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5641 * which enables swtching between low and high refresh rates,
5642 * dynamically, based on the usage scenario. This feature is applicable
5643 * for internal panels.
5644 *
5645 * Indication that the panel supports DRRS is given by the panel EDID, which
5646 * would list multiple refresh rates for one resolution.
5647 *
5648 * DRRS is of 2 types - static and seamless.
5649 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5650 * (may appear as a blink on screen) and is used in dock-undock scenario.
5651 * Seamless DRRS involves changing RR without any visual effect to the user
5652 * and can be used during normal system usage. This is done by programming
5653 * certain registers.
5654 *
5655 * Support for static/seamless DRRS may be indicated in the VBT based on
5656 * inputs from the panel spec.
5657 *
5658 * DRRS saves power by switching to low RR based on usage scenarios.
5659 *
5660 * eDP DRRS:-
5661 * The implementation is based on frontbuffer tracking implementation.
5662 * When there is a disturbance on the screen triggered by user activity or a
5663 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5664 * When there is no movement on screen, after a timeout of 1 second, a switch
5665 * to low RR is made.
5666 * For integration with frontbuffer tracking code,
5667 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5668 *
5669 * DRRS can be further extended to support other internal panels and also
5670 * the scenario of video playback wherein RR is set based on the rate
5671 * requested by userspace.
5672 */
5673
5674/**
5675 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5676 * @intel_connector: eDP connector
5677 * @fixed_mode: preferred mode of panel
5678 *
5679 * This function is called only once at driver load to initialize basic
5680 * DRRS stuff.
5681 *
5682 * Returns:
5683 * Downclock mode if panel supports it, else return NULL.
5684 * DRRS support is determined by the presence of downclock mode (apart
5685 * from VBT setting).
5686 */
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305687static struct drm_display_mode *
Vandana Kannan96178ee2015-01-10 02:25:56 +05305688intel_dp_drrs_init(struct intel_connector *intel_connector,
5689 struct drm_display_mode *fixed_mode)
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305690{
5691 struct drm_connector *connector = &intel_connector->base;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305692 struct drm_device *dev = connector->dev;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305693 struct drm_i915_private *dev_priv = dev->dev_private;
5694 struct drm_display_mode *downclock_mode = NULL;
5695
Daniel Vetter9da7d692015-04-09 16:44:15 +02005696 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5697 mutex_init(&dev_priv->drrs.mutex);
5698
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305699 if (INTEL_INFO(dev)->gen <= 6) {
5700 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5701 return NULL;
5702 }
5703
5704 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005705 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305706 return NULL;
5707 }
5708
5709 downclock_mode = intel_find_panel_downclock
5710 (dev, fixed_mode, connector);
5711
5712 if (!downclock_mode) {
Ramalingam Ca1d26342015-02-23 17:38:33 +05305713 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305714 return NULL;
5715 }
5716
Vandana Kannan96178ee2015-01-10 02:25:56 +05305717 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305718
Vandana Kannan96178ee2015-01-10 02:25:56 +05305719 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005720 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305721 return downclock_mode;
5722}
5723
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005724static bool intel_edp_init_connector(struct intel_dp *intel_dp,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005725 struct intel_connector *intel_connector)
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005726{
5727 struct drm_connector *connector = &intel_connector->base;
5728 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005729 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5730 struct drm_device *dev = intel_encoder->base.dev;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005731 struct drm_i915_private *dev_priv = dev->dev_private;
5732 struct drm_display_mode *fixed_mode = NULL;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305733 struct drm_display_mode *downclock_mode = NULL;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005734 bool has_dpcd;
5735 struct drm_display_mode *scan;
5736 struct edid *edid;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005737 enum pipe pipe = INVALID_PIPE;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005738
5739 if (!is_edp(intel_dp))
5740 return true;
5741
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02005742 pps_lock(intel_dp);
5743 intel_edp_panel_vdd_sanitize(intel_dp);
5744 pps_unlock(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005745
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005746 /* Cache DPCD and EDID for edp. */
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005747 has_dpcd = intel_dp_get_dpcd(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005748
5749 if (has_dpcd) {
5750 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5751 dev_priv->no_aux_handshake =
5752 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5753 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5754 } else {
5755 /* if this fails, presume the device is a ghost */
5756 DRM_INFO("failed to retrieve link info, disabling eDP\n");
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005757 return false;
5758 }
5759
5760 /* We now know it's not a ghost, init power sequence regs. */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005761 pps_lock(intel_dp);
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005762 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005763 pps_unlock(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005764
Daniel Vetter060c8772014-03-21 23:22:35 +01005765 mutex_lock(&dev->mode_config.mutex);
Jani Nikula0b998362014-03-14 16:51:17 +02005766 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005767 if (edid) {
5768 if (drm_add_edid_modes(connector, edid)) {
5769 drm_mode_connector_update_edid_property(connector,
5770 edid);
5771 drm_edid_to_eld(connector, edid);
5772 } else {
5773 kfree(edid);
5774 edid = ERR_PTR(-EINVAL);
5775 }
5776 } else {
5777 edid = ERR_PTR(-ENOENT);
5778 }
5779 intel_connector->edid = edid;
5780
5781 /* prefer fixed mode from EDID if available */
5782 list_for_each_entry(scan, &connector->probed_modes, head) {
5783 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5784 fixed_mode = drm_mode_duplicate(dev, scan);
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305785 downclock_mode = intel_dp_drrs_init(
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305786 intel_connector, fixed_mode);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005787 break;
5788 }
5789 }
5790
5791 /* fallback to VBT if available for eDP */
5792 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5793 fixed_mode = drm_mode_duplicate(dev,
5794 dev_priv->vbt.lfp_lvds_vbt_mode);
5795 if (fixed_mode)
5796 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5797 }
Daniel Vetter060c8772014-03-21 23:22:35 +01005798 mutex_unlock(&dev->mode_config.mutex);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005799
Wayne Boyer666a4532015-12-09 12:29:35 -08005800 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
Clint Taylor01527b32014-07-07 13:01:46 -07005801 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5802 register_reboot_notifier(&intel_dp->edp_notifier);
Ville Syrjälä6517d272014-11-07 11:16:02 +02005803
5804 /*
5805 * Figure out the current pipe for the initial backlight setup.
5806 * If the current pipe isn't valid, try the PPS pipe, and if that
5807 * fails just assume pipe A.
5808 */
5809 if (IS_CHERRYVIEW(dev))
5810 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5811 else
5812 pipe = PORT_TO_PIPE(intel_dp->DP);
5813
5814 if (pipe != PIPE_A && pipe != PIPE_B)
5815 pipe = intel_dp->pps_pipe;
5816
5817 if (pipe != PIPE_A && pipe != PIPE_B)
5818 pipe = PIPE_A;
5819
5820 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5821 pipe_name(pipe));
Clint Taylor01527b32014-07-07 13:01:46 -07005822 }
5823
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305824 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
Jani Nikula5507fae2015-09-14 14:03:48 +03005825 intel_connector->panel.backlight.power = intel_edp_backlight_power;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005826 intel_panel_setup_backlight(connector, pipe);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005827
5828 return true;
5829}
5830
Paulo Zanoni16c25532013-06-12 17:27:25 -03005831bool
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005832intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5833 struct intel_connector *intel_connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005834{
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005835 struct drm_connector *connector = &intel_connector->base;
5836 struct intel_dp *intel_dp = &intel_dig_port->dp;
5837 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5838 struct drm_device *dev = intel_encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005839 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni174edf12012-10-26 19:05:50 -02005840 enum port port = intel_dig_port->port;
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02005841 int type, ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005842
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005843 intel_dp->pps_pipe = INVALID_PIPE;
5844
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005845 /* intel_dp vfuncs */
Damien Lespiaub6b5e382014-01-20 16:00:59 +00005846 if (INTEL_INFO(dev)->gen >= 9)
5847 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
Wayne Boyer666a4532015-12-09 12:29:35 -08005848 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005849 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5850 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5851 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5852 else if (HAS_PCH_SPLIT(dev))
5853 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5854 else
5855 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5856
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +00005857 if (INTEL_INFO(dev)->gen >= 9)
5858 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5859 else
5860 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
Damien Lespiau153b1102014-01-21 13:37:15 +00005861
Ander Conselvan de Oliveiraad642172015-10-23 13:01:49 +03005862 if (HAS_DDI(dev))
5863 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5864
Daniel Vetter07679352012-09-06 22:15:42 +02005865 /* Preserve the current hw state. */
5866 intel_dp->DP = I915_READ(intel_dp->output_reg);
Jani Nikuladd06f902012-10-19 14:51:50 +03005867 intel_dp->attached_connector = intel_connector;
Chris Wilson3d3dc142011-02-12 10:33:12 +00005868
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005869 if (intel_dp_is_edp(dev, port))
Gajanan Bhat19c03922012-09-27 19:13:07 +05305870 type = DRM_MODE_CONNECTOR_eDP;
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005871 else
5872 type = DRM_MODE_CONNECTOR_DisplayPort;
Adam Jacksonb3295302010-07-16 14:46:28 -04005873
Imre Deakf7d24902013-05-08 13:14:05 +03005874 /*
5875 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5876 * for DP the encoder type can be set by the caller to
5877 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5878 */
5879 if (type == DRM_MODE_CONNECTOR_eDP)
5880 intel_encoder->type = INTEL_OUTPUT_EDP;
5881
Ville Syrjäläc17ed5b2014-10-16 21:27:27 +03005882 /* eDP only on port B and/or C on vlv/chv */
Wayne Boyer666a4532015-12-09 12:29:35 -08005883 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5884 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
Ville Syrjäläc17ed5b2014-10-16 21:27:27 +03005885 return false;
5886
Imre Deake7281ea2013-05-08 13:14:08 +03005887 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5888 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5889 port_name(port));
5890
Adam Jacksonb3295302010-07-16 14:46:28 -04005891 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005892 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5893
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005894 connector->interlace_allowed = true;
5895 connector->doublescan_allowed = 0;
Ma Lingf8aed702009-08-24 13:50:24 +08005896
Daniel Vetter66a92782012-07-12 20:08:18 +02005897 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
Daniel Vetter4be73782014-01-17 14:39:48 +01005898 edp_panel_vdd_work);
Zhenyu Wang6251ec02010-01-12 05:38:32 +08005899
Chris Wilsondf0e9242010-09-09 16:20:55 +01005900 intel_connector_attach_encoder(intel_connector, intel_encoder);
Thomas Wood34ea3d32014-05-29 16:57:41 +01005901 drm_connector_register(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005902
Paulo Zanoniaffa9352012-11-23 15:30:39 -02005903 if (HAS_DDI(dev))
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005904 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5905 else
5906 intel_connector->get_hw_state = intel_connector_get_hw_state;
Imre Deak80f65de2014-02-11 17:12:49 +02005907 intel_connector->unregister = intel_dp_connector_unregister;
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005908
Jani Nikula0b998362014-03-14 16:51:17 +02005909 /* Set up the hotplug pin. */
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005910 switch (port) {
5911 case PORT_A:
Egbert Eich1d843f92013-02-25 12:06:49 -05005912 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005913 break;
5914 case PORT_B:
Egbert Eich1d843f92013-02-25 12:06:49 -05005915 intel_encoder->hpd_pin = HPD_PORT_B;
Jani Nikulae87a0052015-10-20 15:22:02 +03005916 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
Sonika Jindalcf1d5882015-08-10 10:35:36 +05305917 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005918 break;
5919 case PORT_C:
Egbert Eich1d843f92013-02-25 12:06:49 -05005920 intel_encoder->hpd_pin = HPD_PORT_C;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005921 break;
5922 case PORT_D:
Egbert Eich1d843f92013-02-25 12:06:49 -05005923 intel_encoder->hpd_pin = HPD_PORT_D;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005924 break;
Xiong Zhang26951ca2015-08-17 15:55:50 +08005925 case PORT_E:
5926 intel_encoder->hpd_pin = HPD_PORT_E;
5927 break;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005928 default:
Damien Lespiauad1c0b12013-03-07 15:30:28 +00005929 BUG();
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08005930 }
5931
Imre Deakdada1a92014-01-29 13:25:41 +02005932 if (is_edp(intel_dp)) {
Ville Syrjälä773538e82014-09-04 14:54:56 +03005933 pps_lock(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005934 intel_dp_init_panel_power_timestamps(intel_dp);
Wayne Boyer666a4532015-12-09 12:29:35 -08005935 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005936 vlv_initial_power_sequencer_setup(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005937 else
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005938 intel_dp_init_panel_power_sequencer(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005939 pps_unlock(intel_dp);
Imre Deakdada1a92014-01-29 13:25:41 +02005940 }
Paulo Zanoni0095e6d2013-12-19 14:29:39 -02005941
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02005942 ret = intel_dp_aux_init(intel_dp, intel_connector);
5943 if (ret)
5944 goto fail;
Dave Airliec1f05262012-08-30 11:06:18 +10005945
Dave Airlie0e32b392014-05-02 14:02:48 +10005946 /* init MST on ports that can support it */
Jani Nikula0c9b3712015-05-18 17:10:01 +03005947 if (HAS_DP_MST(dev) &&
5948 (port == PORT_B || port == PORT_C || port == PORT_D))
5949 intel_dp_mst_encoder_init(intel_dig_port,
5950 intel_connector->base.base.id);
Dave Airlie0e32b392014-05-02 14:02:48 +10005951
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005952 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02005953 intel_dp_aux_fini(intel_dp);
5954 intel_dp_mst_encoder_cleanup(intel_dig_port);
5955 goto fail;
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005956 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08005957
Chris Wilsonf6849602010-09-19 09:29:33 +01005958 intel_dp_add_properties(intel_dp, connector);
5959
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005960 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5961 * 0xd. Failure to do so will result in spurious interrupts being
5962 * generated on the port when a cable is not attached.
5963 */
5964 if (IS_G4X(dev) && !IS_GM45(dev)) {
5965 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5966 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5967 }
Paulo Zanoni16c25532013-06-12 17:27:25 -03005968
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005969 i915_debugfs_connector_add(connector);
5970
Paulo Zanoni16c25532013-06-12 17:27:25 -03005971 return true;
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02005972
5973fail:
5974 if (is_edp(intel_dp)) {
5975 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5976 /*
5977 * vdd might still be enabled do to the delayed vdd off.
5978 * Make sure vdd is actually turned off here.
5979 */
5980 pps_lock(intel_dp);
5981 edp_panel_vdd_off_sync(intel_dp);
5982 pps_unlock(intel_dp);
5983 }
5984 drm_connector_unregister(connector);
5985 drm_connector_cleanup(connector);
5986
5987 return false;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005988}
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005989
5990void
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02005991intel_dp_init(struct drm_device *dev,
5992 i915_reg_t output_reg, enum port port)
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005993{
Dave Airlie13cf5502014-06-18 11:29:35 +10005994 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005995 struct intel_digital_port *intel_dig_port;
5996 struct intel_encoder *intel_encoder;
5997 struct drm_encoder *encoder;
5998 struct intel_connector *intel_connector;
5999
Daniel Vetterb14c5672013-09-19 12:18:32 +02006000 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006001 if (!intel_dig_port)
6002 return;
6003
Ander Conselvan de Oliveira08d9bc92015-04-10 10:59:10 +03006004 intel_connector = intel_connector_alloc();
Sudip Mukherjee11aee0f2015-10-08 19:27:59 +05306005 if (!intel_connector)
6006 goto err_connector_alloc;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006007
6008 intel_encoder = &intel_dig_port->base;
6009 encoder = &intel_encoder->base;
6010
Sudip Mukherjee893da0c2015-10-08 19:28:00 +05306011 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6012 DRM_MODE_ENCODER_TMDS))
6013 goto err_encoder_init;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006014
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01006015 intel_encoder->compute_config = intel_dp_compute_config;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02006016 intel_encoder->disable = intel_disable_dp;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02006017 intel_encoder->get_hw_state = intel_dp_get_hw_state;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07006018 intel_encoder->get_config = intel_dp_get_config;
Imre Deak07f9cd02014-08-18 14:42:45 +03006019 intel_encoder->suspend = intel_dp_encoder_suspend;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03006020 if (IS_CHERRYVIEW(dev)) {
Ville Syrjälä9197c882014-04-09 13:29:05 +03006021 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03006022 intel_encoder->pre_enable = chv_pre_enable_dp;
6023 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä580d3812014-04-09 13:29:00 +03006024 intel_encoder->post_disable = chv_post_disable_dp;
Ville Syrjäläd6db9952015-07-08 23:45:49 +03006025 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03006026 } else if (IS_VALLEYVIEW(dev)) {
Jani Nikulaecff4f32013-09-06 07:38:29 +03006027 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03006028 intel_encoder->pre_enable = vlv_pre_enable_dp;
6029 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä49277c32014-03-31 18:21:26 +03006030 intel_encoder->post_disable = vlv_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03006031 } else {
Jani Nikulaecff4f32013-09-06 07:38:29 +03006032 intel_encoder->pre_enable = g4x_pre_enable_dp;
6033 intel_encoder->enable = g4x_enable_dp;
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03006034 if (INTEL_INFO(dev)->gen >= 5)
6035 intel_encoder->post_disable = ilk_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03006036 }
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006037
Paulo Zanoni174edf12012-10-26 19:05:50 -02006038 intel_dig_port->port = port;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006039 intel_dig_port->dp.output_reg = output_reg;
6040
Paulo Zanoni00c09d72012-10-26 19:05:52 -02006041 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Ville Syrjälä882ec382014-04-28 14:07:43 +03006042 if (IS_CHERRYVIEW(dev)) {
6043 if (port == PORT_D)
6044 intel_encoder->crtc_mask = 1 << 2;
6045 else
6046 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6047 } else {
6048 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6049 }
Ville Syrjäläbc079e82014-03-03 16:15:28 +02006050 intel_encoder->cloneable = 0;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006051
Dave Airlie13cf5502014-06-18 11:29:35 +10006052 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
Jani Nikula5fcece82015-05-27 15:03:42 +03006053 dev_priv->hotplug.irq_port[port] = intel_dig_port;
Dave Airlie13cf5502014-06-18 11:29:35 +10006054
Sudip Mukherjee11aee0f2015-10-08 19:27:59 +05306055 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6056 goto err_init_connector;
6057
6058 return;
6059
6060err_init_connector:
6061 drm_encoder_cleanup(encoder);
Sudip Mukherjee893da0c2015-10-08 19:28:00 +05306062err_encoder_init:
Sudip Mukherjee11aee0f2015-10-08 19:27:59 +05306063 kfree(intel_connector);
6064err_connector_alloc:
6065 kfree(intel_dig_port);
6066
6067 return;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006068}
Dave Airlie0e32b392014-05-02 14:02:48 +10006069
6070void intel_dp_mst_suspend(struct drm_device *dev)
6071{
6072 struct drm_i915_private *dev_priv = dev->dev_private;
6073 int i;
6074
6075 /* disable MST */
6076 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006077 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006078 if (!intel_dig_port)
6079 continue;
6080
6081 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6082 if (!intel_dig_port->dp.can_mst)
6083 continue;
6084 if (intel_dig_port->dp.is_mst)
6085 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6086 }
6087 }
6088}
6089
6090void intel_dp_mst_resume(struct drm_device *dev)
6091{
6092 struct drm_i915_private *dev_priv = dev->dev_private;
6093 int i;
6094
6095 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006096 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006097 if (!intel_dig_port)
6098 continue;
6099 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6100 int ret;
6101
6102 if (!intel_dig_port->dp.can_mst)
6103 continue;
6104
6105 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6106 if (ret != 0) {
6107 intel_dp_check_mst_status(&intel_dig_port->dp);
6108 }
6109 }
6110 }
6111}