blob: b8c4fc5a92f1f4286d1a7f0c2316c75c719963a1 [file] [log] [blame]
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040030#include <linux/export.h>
Clint Taylor01527b32014-07-07 13:01:46 -070031#include <linux/notifier.h>
32#include <linux/reboot.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
Matt Roperc6f95f22015-01-22 16:50:32 -080034#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010035#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070038#include "intel_drv.h"
David Howells760285e2012-10-02 18:01:07 +010039#include <drm/i915_drm.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070040#include "i915_drv.h"
Keith Packarda4fc5ed2009-04-07 16:16:42 -070041
Keith Packarda4fc5ed2009-04-07 16:16:42 -070042#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
Todd Previte559be302015-05-04 07:48:20 -070044/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080050struct dp_link_dpll {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030051 int clock;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080052 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030056 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080057 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030058 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080059 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030063 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080064 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030065 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080066 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080069static const struct dp_link_dpll vlv_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030070 { 162000,
Chon Ming Lee58f6e632013-09-25 15:47:51 +080071 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030072 { 270000,
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080073 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
Chon Ming Leeef9348c2014-04-09 13:28:18 +030076/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
Ville Syrjälä840b32b2015-08-11 20:21:46 +030086 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030087 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030088 { 270000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030089 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030090 { 540000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030091 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
Sonika Jindal637a9c62015-05-07 09:52:08 +053093
Sonika Jindal64987fc2015-05-26 17:50:13 +053094static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
Sonika Jindal637a9c62015-05-07 09:52:08 +053096static const int skl_rates[] = { 162000, 216000, 270000,
Ville Syrjäläf4896f12015-03-12 17:10:27 +020097 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
Chon Ming Leeef9348c2014-04-09 13:28:18 +030099
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
Paulo Zanonida63a9f2012-10-26 19:05:46 -0200109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700112}
113
Imre Deak68b4d822013-05-08 13:14:06 +0300114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700115{
Imre Deak68b4d822013-05-08 13:14:06 +0300116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700119}
120
Chris Wilsondf0e9242010-09-09 16:20:55 +0100121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
Paulo Zanonifa90ece2012-10-26 19:05:44 -0200123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Chris Wilsondf0e9242010-09-09 16:20:55 +0100124}
125
Chris Wilsonea5b2132010-08-04 13:50:23 +0100126static void intel_dp_link_down(struct intel_dp *intel_dp);
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +0100128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
Ville Syrjälä093e3f12014-10-16 21:27:33 +0300129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700132
Ville Syrjäläe0fce782015-07-08 23:45:54 +0300133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
Ville Syrjäläed4e9c12015-03-12 17:10:36 +0200138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700140{
Jesse Barnes7183dc22011-07-07 11:10:58 -0700141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
Ville Syrjälä1db10e22015-03-12 17:10:32 +0200146 case DP_LINK_BW_5_4:
Imre Deakd4eead52013-07-09 17:05:26 +0300147 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700148 default:
Imre Deakd4eead52013-07-09 17:05:26 +0300149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
Paulo Zanonieeb63242014-05-06 14:56:50 +0300157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 struct drm_device *dev = intel_dig_port->base.base.dev;
161 u8 source_max, sink_max;
162
163 source_max = 4;
164 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 source_max = 2;
167
168 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170 return min(source_max, sink_max);
171}
172
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400173/*
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
176 *
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178 *
179 * 270000 * 1 * 8 / 10 == 216000
180 *
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
185 *
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
188 */
189
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700190static int
Keith Packardc8982612012-01-25 08:16:25 -0800191intel_dp_link_required(int pixel_clock, int bpp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700192{
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400193 return (pixel_clock * bpp + 9) / 10;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700194}
195
196static int
Dave Airliefe27d532010-06-30 11:46:17 +1000197intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198{
199 return (max_link_clock * max_lanes * 8) / 10;
200}
201
Damien Lespiauc19de8e2013-11-28 15:29:18 +0000202static enum drm_mode_status
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700203intel_dp_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
205{
Chris Wilsondf0e9242010-09-09 16:20:55 +0100206 struct intel_dp *intel_dp = intel_attached_dp(connector);
Jani Nikuladd06f902012-10-19 14:51:50 +0300207 struct intel_connector *intel_connector = to_intel_connector(connector);
208 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
Daniel Vetter36008362013-03-27 00:44:59 +0100209 int target_clock = mode->clock;
210 int max_rate, mode_rate, max_lanes, max_link_clock;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700211
Jani Nikuladd06f902012-10-19 14:51:50 +0300212 if (is_edp(intel_dp) && fixed_mode) {
213 if (mode->hdisplay > fixed_mode->hdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100214 return MODE_PANEL;
215
Jani Nikuladd06f902012-10-19 14:51:50 +0300216 if (mode->vdisplay > fixed_mode->vdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100217 return MODE_PANEL;
Daniel Vetter03afc4a2013-04-02 23:42:31 +0200218
219 target_clock = fixed_mode->clock;
Zhao Yakui7de56f42010-07-19 09:43:14 +0100220 }
221
Ville Syrjälä50fec212015-03-12 17:10:34 +0200222 max_link_clock = intel_dp_max_link_rate(intel_dp);
Paulo Zanonieeb63242014-05-06 14:56:50 +0300223 max_lanes = intel_dp_max_lane_count(intel_dp);
Daniel Vetter36008362013-03-27 00:44:59 +0100224
225 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 mode_rate = intel_dp_link_required(target_clock, 18);
227
228 if (mode_rate > max_rate)
Daniel Vetterc4867932012-04-10 10:42:36 +0200229 return MODE_CLOCK_HIGH;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700230
231 if (mode->clock < 10000)
232 return MODE_CLOCK_LOW;
233
Daniel Vetter0af78a22012-05-23 11:30:55 +0200234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 return MODE_H_ILLEGAL;
236
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700237 return MODE_OK;
238}
239
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800240uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700241{
242 int i;
243 uint32_t v = 0;
244
245 if (src_bytes > 4)
246 src_bytes = 4;
247 for (i = 0; i < src_bytes; i++)
248 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249 return v;
250}
251
Damien Lespiauc2af70e2015-02-10 19:32:23 +0000252static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700253{
254 int i;
255 if (dst_bytes > 4)
256 dst_bytes = 4;
257 for (i = 0; i < dst_bytes; i++)
258 dst[i] = src >> ((3-i) * 8);
259}
260
Jani Nikulabf13e812013-09-06 07:40:05 +0300261static void
262intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300263 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300264static void
265intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300266 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300267
Ville Syrjälä773538e82014-09-04 14:54:56 +0300268static void pps_lock(struct intel_dp *intel_dp)
269{
270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 struct intel_encoder *encoder = &intel_dig_port->base;
272 struct drm_device *dev = encoder->base.dev;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 enum intel_display_power_domain power_domain;
275
276 /*
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
279 */
280 power_domain = intel_display_port_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain);
282
283 mutex_lock(&dev_priv->pps_mutex);
284}
285
286static void pps_unlock(struct intel_dp *intel_dp)
287{
288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 struct intel_encoder *encoder = &intel_dig_port->base;
290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 enum intel_display_power_domain power_domain;
293
294 mutex_unlock(&dev_priv->pps_mutex);
295
296 power_domain = intel_display_port_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain);
298}
299
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300300static void
301vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302{
303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 struct drm_device *dev = intel_dig_port->base.base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum pipe pipe = intel_dp->pps_pipe;
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300310 uint32_t DP;
311
312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe), port_name(intel_dig_port->port)))
315 return;
316
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe), port_name(intel_dig_port->port));
319
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
322 */
323 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 DP |= DP_PORT_WIDTH(1);
326 DP |= DP_LINK_TRAIN_PAT_1;
327
328 if (IS_CHERRYVIEW(dev))
329 DP |= DP_PIPE_SELECT_CHV(pipe);
330 else if (pipe == PIPE_B)
331 DP |= DP_PIPEB_SELECT;
332
Ville Syrjäläd288f652014-10-28 13:20:22 +0200333 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335 /*
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
338 */
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300339 if (!pll_enabled) {
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
Ville Syrjäläd288f652014-10-28 13:20:22 +0200343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300345 }
Ville Syrjäläd288f652014-10-28 13:20:22 +0200346
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200361
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300362 if (!pll_enabled) {
Ville Syrjäläd288f652014-10-28 13:20:22 +0200363 vlv_force_pll_off(dev, pipe);
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300368}
369
Jani Nikulabf13e812013-09-06 07:40:05 +0300370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300378 enum pipe pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300379
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300380 lockdep_assert_held(&dev_priv->pps_mutex);
381
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300387
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393 base.head) {
394 struct intel_dp *tmp;
395
396 if (encoder->type != INTEL_OUTPUT_EDP)
397 continue;
398
399 tmp = enc_to_intel_dp(&encoder->base);
400
401 if (tmp->pps_pipe != INVALID_PIPE)
402 pipes &= ~(1 << tmp->pps_pipe);
403 }
404
405 /*
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
408 */
409 if (WARN_ON(pipes == 0))
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300410 pipe = PIPE_A;
411 else
412 pipe = ffs(pipes) - 1;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300413
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300414 vlv_steal_power_sequencer(dev, pipe);
415 intel_dp->pps_pipe = pipe;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300416
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp->pps_pipe),
419 port_name(intel_dig_port->port));
420
421 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300422 intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300424
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300425 /*
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
428 */
429 vlv_power_sequencer_kick(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300430
431 return intel_dp->pps_pipe;
432}
433
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300434typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 enum pipe pipe);
436
437static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439{
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441}
442
443static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445{
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447}
448
449static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450 enum pipe pipe)
451{
452 return true;
453}
454
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300455static enum pipe
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300456vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 enum port port,
458 vlv_pipe_check pipe_check)
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300459{
Jani Nikulabf13e812013-09-06 07:40:05 +0300460 enum pipe pipe;
461
Jani Nikulabf13e812013-09-06 07:40:05 +0300462 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 PANEL_PORT_SELECT_MASK;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300465
466 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 continue;
468
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300469 if (!pipe_check(dev_priv, pipe))
470 continue;
471
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300472 return pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300473 }
474
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300475 return INVALID_PIPE;
476}
477
478static void
479vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480{
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300484 enum port port = intel_dig_port->port;
485
486 lockdep_assert_held(&dev_priv->pps_mutex);
487
488 /* try to find a pipe with this port selected */
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300489 /* first pick one where the panel is on */
490 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 vlv_pipe_has_pp_on);
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp->pps_pipe == INVALID_PIPE)
494 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 vlv_pipe_has_vdd_on);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp->pps_pipe == INVALID_PIPE)
498 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 vlv_pipe_any);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300500
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp->pps_pipe == INVALID_PIPE) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504 port_name(port));
505 return;
506 }
507
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port), pipe_name(intel_dp->pps_pipe));
510
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300511 intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300513}
514
Ville Syrjälä773538e82014-09-04 14:54:56 +0300515void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516{
517 struct drm_device *dev = dev_priv->dev;
518 struct intel_encoder *encoder;
519
520 if (WARN_ON(!IS_VALLEYVIEW(dev)))
521 return;
522
523 /*
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
531 */
532
533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 struct intel_dp *intel_dp;
535
536 if (encoder->type != INTEL_OUTPUT_EDP)
537 continue;
538
539 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE;
541 }
Jani Nikulabf13e812013-09-06 07:40:05 +0300542}
543
544static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
545{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554}
555
556static u32 _pp_stat_reg(struct intel_dp *intel_dp)
557{
558 struct drm_device *dev = intel_dp_to_dev(intel_dp);
559
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530560 if (IS_BROXTON(dev))
561 return BXT_PP_STATUS(0);
562 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300563 return PCH_PP_STATUS;
564 else
565 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
566}
567
Clint Taylor01527b32014-07-07 13:01:46 -0700568/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569 This function only applicable when panel PM state is not to be tracked */
570static int edp_notify_handler(struct notifier_block *this, unsigned long code,
571 void *unused)
572{
573 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
574 edp_notifier);
575 struct drm_device *dev = intel_dp_to_dev(intel_dp);
576 struct drm_i915_private *dev_priv = dev->dev_private;
577 u32 pp_div;
578 u32 pp_ctrl_reg, pp_div_reg;
Clint Taylor01527b32014-07-07 13:01:46 -0700579
580 if (!is_edp(intel_dp) || code != SYS_RESTART)
581 return 0;
582
Ville Syrjälä773538e82014-09-04 14:54:56 +0300583 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300584
Clint Taylor01527b32014-07-07 13:01:46 -0700585 if (IS_VALLEYVIEW(dev)) {
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300586 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
587
Clint Taylor01527b32014-07-07 13:01:46 -0700588 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
589 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
590 pp_div = I915_READ(pp_div_reg);
591 pp_div &= PP_REFERENCE_DIVIDER_MASK;
592
593 /* 0x1F write to PP_DIV_REG sets max cycle delay */
594 I915_WRITE(pp_div_reg, pp_div | 0x1F);
595 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
596 msleep(intel_dp->panel_power_cycle_delay);
597 }
598
Ville Syrjälä773538e82014-09-04 14:54:56 +0300599 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300600
Clint Taylor01527b32014-07-07 13:01:46 -0700601 return 0;
602}
603
Daniel Vetter4be73782014-01-17 14:39:48 +0100604static bool edp_have_panel_power(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700605{
Paulo Zanoni30add222012-10-26 19:05:45 -0200606 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700607 struct drm_i915_private *dev_priv = dev->dev_private;
608
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300609 lockdep_assert_held(&dev_priv->pps_mutex);
610
Ville Syrjälä9a423562014-10-16 21:29:48 +0300611 if (IS_VALLEYVIEW(dev) &&
612 intel_dp->pps_pipe == INVALID_PIPE)
613 return false;
614
Jani Nikulabf13e812013-09-06 07:40:05 +0300615 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
Keith Packardebf33b12011-09-29 15:53:27 -0700616}
617
Daniel Vetter4be73782014-01-17 14:39:48 +0100618static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700619{
Paulo Zanoni30add222012-10-26 19:05:45 -0200620 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700621 struct drm_i915_private *dev_priv = dev->dev_private;
622
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300623 lockdep_assert_held(&dev_priv->pps_mutex);
624
Ville Syrjälä9a423562014-10-16 21:29:48 +0300625 if (IS_VALLEYVIEW(dev) &&
626 intel_dp->pps_pipe == INVALID_PIPE)
627 return false;
628
Ville Syrjälä773538e82014-09-04 14:54:56 +0300629 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -0700630}
631
Keith Packard9b984da2011-09-19 13:54:47 -0700632static void
633intel_dp_check_edp(struct intel_dp *intel_dp)
634{
Paulo Zanoni30add222012-10-26 19:05:45 -0200635 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard9b984da2011-09-19 13:54:47 -0700636 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packardebf33b12011-09-29 15:53:27 -0700637
Keith Packard9b984da2011-09-19 13:54:47 -0700638 if (!is_edp(intel_dp))
639 return;
Jesse Barnes453c5422013-03-28 09:55:41 -0700640
Daniel Vetter4be73782014-01-17 14:39:48 +0100641 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
Keith Packard9b984da2011-09-19 13:54:47 -0700642 WARN(1, "eDP powered off while attempting aux channel communication.\n");
643 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
Jani Nikulabf13e812013-09-06 07:40:05 +0300644 I915_READ(_pp_stat_reg(intel_dp)),
645 I915_READ(_pp_ctrl_reg(intel_dp)));
Keith Packard9b984da2011-09-19 13:54:47 -0700646 }
647}
648
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100649static uint32_t
650intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651{
652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653 struct drm_device *dev = intel_dig_port->base.base.dev;
654 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni9ed35ab2013-02-18 19:00:25 -0300655 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100656 uint32_t status;
657 bool done;
658
Daniel Vetteref04f002012-12-01 21:03:59 +0100659#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100660 if (has_aux_irq)
Paulo Zanonib18ac462013-02-18 19:00:24 -0300661 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
Imre Deak35987062013-05-21 20:03:20 +0300662 msecs_to_jiffies_timeout(10));
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100663 else
664 done = wait_for_atomic(C, 10) == 0;
665 if (!done)
666 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
667 has_aux_irq);
668#undef C
669
670 return status;
671}
672
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000673static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
674{
675 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676 struct drm_device *dev = intel_dig_port->base.base.dev;
677
678 /*
679 * The clock divider is based off the hrawclk, and would like to run at
680 * 2MHz. So, take the hrawclk value and divide by 2 and use that
681 */
682 return index ? 0 : intel_hrawclk(dev) / 2;
683}
684
685static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686{
687 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688 struct drm_device *dev = intel_dig_port->base.base.dev;
Ville Syrjälä469d4b22015-03-31 14:11:59 +0300689 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000690
691 if (index)
692 return 0;
693
694 if (intel_dig_port->port == PORT_A) {
Ville Syrjälä05024da2015-06-03 15:45:08 +0300695 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
696
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000697 } else {
698 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
699 }
700}
701
702static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300703{
704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705 struct drm_device *dev = intel_dig_port->base.base.dev;
706 struct drm_i915_private *dev_priv = dev->dev_private;
707
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000708 if (intel_dig_port->port == PORT_A) {
Chris Wilsonbc866252013-07-21 16:00:03 +0100709 if (index)
710 return 0;
Ville Syrjälä05024da2015-06-03 15:45:08 +0300711 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300712 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
713 /* Workaround for non-ULT HSW */
Chris Wilsonbc866252013-07-21 16:00:03 +0100714 switch (index) {
715 case 0: return 63;
716 case 1: return 72;
717 default: return 0;
718 }
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000719 } else {
Chris Wilsonbc866252013-07-21 16:00:03 +0100720 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300721 }
722}
723
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000724static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725{
726 return index ? 0 : 100;
727}
728
Damien Lespiaub6b5e382014-01-20 16:00:59 +0000729static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730{
731 /*
732 * SKL doesn't need us to program the AUX clock divider (Hardware will
733 * derive the clock from CDCLK automatically). We still implement the
734 * get_aux_clock_divider vfunc to plug-in into the existing code.
735 */
736 return index ? 0 : 1;
737}
738
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000739static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
740 bool has_aux_irq,
741 int send_bytes,
742 uint32_t aux_clock_divider)
743{
744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745 struct drm_device *dev = intel_dig_port->base.base.dev;
746 uint32_t precharge, timeout;
747
748 if (IS_GEN6(dev))
749 precharge = 3;
750 else
751 precharge = 5;
752
753 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
754 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755 else
756 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
757
758 return DP_AUX_CH_CTL_SEND_BUSY |
Damien Lespiau788d4432014-01-20 15:52:31 +0000759 DP_AUX_CH_CTL_DONE |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000760 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000761 DP_AUX_CH_CTL_TIME_OUT_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000762 timeout |
Damien Lespiau788d4432014-01-20 15:52:31 +0000763 DP_AUX_CH_CTL_RECEIVE_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000764 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000766 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000767}
768
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +0000769static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770 bool has_aux_irq,
771 int send_bytes,
772 uint32_t unused)
773{
774 return DP_AUX_CH_CTL_SEND_BUSY |
775 DP_AUX_CH_CTL_DONE |
776 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_TIME_OUT_1600us |
779 DP_AUX_CH_CTL_RECEIVE_ERROR |
780 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
782}
783
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700784static int
Chris Wilsonea5b2132010-08-04 13:50:23 +0100785intel_dp_aux_ch(struct intel_dp *intel_dp,
Daniel Vetterbd9f74a2014-10-02 09:45:35 +0200786 const uint8_t *send, int send_bytes,
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700787 uint8_t *recv, int recv_size)
788{
Paulo Zanoni174edf12012-10-26 19:05:50 -0200789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700791 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni9ed35ab2013-02-18 19:00:25 -0300792 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700793 uint32_t ch_data = ch_ctl + 4;
Chris Wilsonbc866252013-07-21 16:00:03 +0100794 uint32_t aux_clock_divider;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100795 int i, ret, recv_bytes;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700796 uint32_t status;
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000797 int try, clock = 0;
Daniel Vetter4e6b7882014-02-07 16:33:20 +0100798 bool has_aux_irq = HAS_AUX_IRQ(dev);
Jani Nikula884f19e2014-03-14 16:51:14 +0200799 bool vdd;
800
Ville Syrjälä773538e82014-09-04 14:54:56 +0300801 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300802
Ville Syrjälä72c35002014-08-18 22:16:00 +0300803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300809 vdd = edp_panel_vdd_on(intel_dp);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700816
Keith Packard9b984da2011-09-19 13:54:47 -0700817 intel_dp_check_edp(intel_dp);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +0800818
Paulo Zanonic67a4702013-08-19 13:18:09 -0300819 intel_aux_display_runtime_get(dev_priv);
820
Jesse Barnes11bee432011-08-01 15:02:20 -0700821 /* Try to wait for any previous AUX channel activity */
822 for (try = 0; try < 3; try++) {
Daniel Vetteref04f002012-12-01 21:03:59 +0100823 status = I915_READ_NOTRACE(ch_ctl);
Jesse Barnes11bee432011-08-01 15:02:20 -0700824 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
825 break;
826 msleep(1);
827 }
828
829 if (try == 3) {
Mika Kuoppala02196c72015-08-06 16:48:58 +0300830 static u32 last_status = -1;
831 const u32 status = I915_READ(ch_ctl);
832
833 if (status != last_status) {
834 WARN(1, "dp_aux_ch not started status 0x%08x\n",
835 status);
836 last_status = status;
837 }
838
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100839 ret = -EBUSY;
840 goto out;
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100841 }
842
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300843 /* Only 5 data registers! */
844 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
845 ret = -E2BIG;
846 goto out;
847 }
848
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000849 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
Damien Lespiau153b1102014-01-21 13:37:15 +0000850 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
851 has_aux_irq,
852 send_bytes,
853 aux_clock_divider);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000854
Chris Wilsonbc866252013-07-21 16:00:03 +0100855 /* Must try at least 3 times according to DP spec */
856 for (try = 0; try < 5; try++) {
857 /* Load the send data into the aux channel data registers */
858 for (i = 0; i < send_bytes; i += 4)
859 I915_WRITE(ch_data + i,
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800860 intel_dp_pack_aux(send + i,
861 send_bytes - i));
Akshay Joshi0206e352011-08-16 15:34:10 -0400862
Chris Wilsonbc866252013-07-21 16:00:03 +0100863 /* Send the command and wait for it to complete */
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000864 I915_WRITE(ch_ctl, send_ctl);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100865
Chris Wilsonbc866252013-07-21 16:00:03 +0100866 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
Akshay Joshi0206e352011-08-16 15:34:10 -0400867
Chris Wilsonbc866252013-07-21 16:00:03 +0100868 /* Clear done status and any errors */
869 I915_WRITE(ch_ctl,
870 status |
871 DP_AUX_CH_CTL_DONE |
872 DP_AUX_CH_CTL_TIME_OUT_ERROR |
873 DP_AUX_CH_CTL_RECEIVE_ERROR);
Adam Jacksond7e96fe2011-07-26 15:39:46 -0400874
Todd Previte74ebf292015-04-15 08:38:41 -0700875 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
Chris Wilsonbc866252013-07-21 16:00:03 +0100876 continue;
Todd Previte74ebf292015-04-15 08:38:41 -0700877
878 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
879 * 400us delay required for errors and timeouts
880 * Timeout errors from the HW already meet this
881 * requirement so skip to next iteration
882 */
883 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
884 usleep_range(400, 500);
885 continue;
886 }
Chris Wilsonbc866252013-07-21 16:00:03 +0100887 if (status & DP_AUX_CH_CTL_DONE)
Jim Bridee058c942015-05-27 10:21:48 -0700888 goto done;
Chris Wilsonbc866252013-07-21 16:00:03 +0100889 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700890 }
891
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700892 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700893 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100894 ret = -EBUSY;
895 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700896 }
897
Jim Bridee058c942015-05-27 10:21:48 -0700898done:
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700899 /* Check for timeout or receive error.
900 * Timeouts occur when the sink is not connected
901 */
Keith Packarda5b3da52009-06-11 22:30:32 -0700902 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700903 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100904 ret = -EIO;
905 goto out;
Keith Packarda5b3da52009-06-11 22:30:32 -0700906 }
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700907
908 /* Timeouts occur when the device isn't connected, so they're
909 * "normal" -- don't fill the kernel log with these */
Keith Packarda5b3da52009-06-11 22:30:32 -0700910 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
Zhao Yakui28c97732009-10-09 11:39:41 +0800911 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100912 ret = -ETIMEDOUT;
913 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700914 }
915
916 /* Unload any bytes sent back from the other side */
917 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
918 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700919 if (recv_bytes > recv_size)
920 recv_bytes = recv_size;
Akshay Joshi0206e352011-08-16 15:34:10 -0400921
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100922 for (i = 0; i < recv_bytes; i += 4)
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800923 intel_dp_unpack_aux(I915_READ(ch_data + i),
924 recv + i, recv_bytes - i);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700925
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100926 ret = recv_bytes;
927out:
928 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
Paulo Zanonic67a4702013-08-19 13:18:09 -0300929 intel_aux_display_runtime_put(dev_priv);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100930
Jani Nikula884f19e2014-03-14 16:51:14 +0200931 if (vdd)
932 edp_panel_vdd_off(intel_dp, false);
933
Ville Syrjälä773538e82014-09-04 14:54:56 +0300934 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300935
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100936 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700937}
938
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300939#define BARE_ADDRESS_SIZE 3
940#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
Jani Nikula9d1a1032014-03-14 16:51:15 +0200941static ssize_t
942intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700943{
Jani Nikula9d1a1032014-03-14 16:51:15 +0200944 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
945 uint8_t txbuf[20], rxbuf[20];
946 size_t txsize, rxsize;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700947 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700948
Ville Syrjäläd2d9cbb2015-03-19 11:44:06 +0200949 txbuf[0] = (msg->request << 4) |
950 ((msg->address >> 16) & 0xf);
951 txbuf[1] = (msg->address >> 8) & 0xff;
Jani Nikula9d1a1032014-03-14 16:51:15 +0200952 txbuf[2] = msg->address & 0xff;
953 txbuf[3] = msg->size - 1;
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300954
Jani Nikula9d1a1032014-03-14 16:51:15 +0200955 switch (msg->request & ~DP_AUX_I2C_MOT) {
956 case DP_AUX_NATIVE_WRITE:
957 case DP_AUX_I2C_WRITE:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300958 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200959 rxsize = 2; /* 0 or 1 data bytes */
Jani Nikulaf51a44b2014-02-11 11:52:05 +0200960
Jani Nikula9d1a1032014-03-14 16:51:15 +0200961 if (WARN_ON(txsize > 20))
962 return -E2BIG;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700963
Jani Nikula9d1a1032014-03-14 16:51:15 +0200964 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700965
Jani Nikula9d1a1032014-03-14 16:51:15 +0200966 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
967 if (ret > 0) {
968 msg->reply = rxbuf[0] >> 4;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700969
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200970 if (ret > 1) {
971 /* Number of bytes written in a short write. */
972 ret = clamp_t(int, rxbuf[1], 0, msg->size);
973 } else {
974 /* Return payload size. */
975 ret = msg->size;
976 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700977 }
Jani Nikula9d1a1032014-03-14 16:51:15 +0200978 break;
979
980 case DP_AUX_NATIVE_READ:
981 case DP_AUX_I2C_READ:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300982 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
Jani Nikula9d1a1032014-03-14 16:51:15 +0200983 rxsize = msg->size + 1;
984
985 if (WARN_ON(rxsize > 20))
986 return -E2BIG;
987
988 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989 if (ret > 0) {
990 msg->reply = rxbuf[0] >> 4;
991 /*
992 * Assume happy day, and copy the data. The caller is
993 * expected to check msg->reply before touching it.
994 *
995 * Return payload size.
996 */
997 ret--;
998 memcpy(msg->buffer, rxbuf + 1, ret);
999 }
1000 break;
1001
1002 default:
1003 ret = -EINVAL;
1004 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001005 }
Jani Nikulaf51a44b2014-02-11 11:52:05 +02001006
Jani Nikula9d1a1032014-03-14 16:51:15 +02001007 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001008}
1009
Jani Nikula9d1a1032014-03-14 16:51:15 +02001010static void
1011intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001012{
Jani Nikula9d1a1032014-03-14 16:51:15 +02001013 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001014 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikula33ad6622014-03-14 16:51:16 +02001015 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1016 enum port port = intel_dig_port->port;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001017 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
Jani Nikula0b998362014-03-14 16:51:17 +02001018 const char *name = NULL;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001019 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
Dave Airlieab2c0672009-12-04 10:55:24 +10001020 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001021
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001022 /* On SKL we don't have Aux for port E so we rely on VBT to set
1023 * a proper alternate aux channel.
1024 */
1025 if (IS_SKYLAKE(dev) && port == PORT_E) {
1026 switch (info->alternate_aux_channel) {
1027 case DP_AUX_B:
1028 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1029 break;
1030 case DP_AUX_C:
1031 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1032 break;
1033 case DP_AUX_D:
1034 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1035 break;
1036 case DP_AUX_A:
1037 default:
1038 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1039 }
1040 }
1041
Jani Nikula33ad6622014-03-14 16:51:16 +02001042 switch (port) {
1043 case PORT_A:
1044 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001045 name = "DPDDC-A";
Dave Airlieab2c0672009-12-04 10:55:24 +10001046 break;
Jani Nikula33ad6622014-03-14 16:51:16 +02001047 case PORT_B:
1048 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001049 name = "DPDDC-B";
Jani Nikula33ad6622014-03-14 16:51:16 +02001050 break;
1051 case PORT_C:
1052 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001053 name = "DPDDC-C";
Jani Nikula33ad6622014-03-14 16:51:16 +02001054 break;
1055 case PORT_D:
1056 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001057 name = "DPDDC-D";
Dave Airlieab2c0672009-12-04 10:55:24 +10001058 break;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001059 case PORT_E:
1060 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1061 name = "DPDDC-E";
1062 break;
Dave Airlieab2c0672009-12-04 10:55:24 +10001063 default:
Jani Nikula33ad6622014-03-14 16:51:16 +02001064 BUG();
Dave Airlieab2c0672009-12-04 10:55:24 +10001065 }
1066
Damien Lespiau1b1aad72013-12-03 13:56:29 +00001067 /*
1068 * The AUX_CTL register is usually DP_CTL + 0x10.
1069 *
1070 * On Haswell and Broadwell though:
1071 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1072 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1073 *
1074 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1075 */
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001076 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
Jani Nikula33ad6622014-03-14 16:51:16 +02001077 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
David Flynn8316f332010-12-08 16:10:21 +00001078
Jani Nikula0b998362014-03-14 16:51:17 +02001079 intel_dp->aux.name = name;
Jani Nikula9d1a1032014-03-14 16:51:15 +02001080 intel_dp->aux.dev = dev->dev;
1081 intel_dp->aux.transfer = intel_dp_aux_transfer;
David Flynn8316f332010-12-08 16:10:21 +00001082
Jani Nikula0b998362014-03-14 16:51:17 +02001083 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1084 connector->base.kdev->kobj.name);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001085
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001086 ret = drm_dp_aux_register(&intel_dp->aux);
Jani Nikula0b998362014-03-14 16:51:17 +02001087 if (ret < 0) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001088 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
Jani Nikula0b998362014-03-14 16:51:17 +02001089 name, ret);
1090 return;
Dave Airlieab2c0672009-12-04 10:55:24 +10001091 }
David Flynn8316f332010-12-08 16:10:21 +00001092
Jani Nikula0b998362014-03-14 16:51:17 +02001093 ret = sysfs_create_link(&connector->base.kdev->kobj,
1094 &intel_dp->aux.ddc.dev.kobj,
1095 intel_dp->aux.ddc.dev.kobj.name);
1096 if (ret < 0) {
1097 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001098 drm_dp_aux_unregister(&intel_dp->aux);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001099 }
1100}
1101
Imre Deak80f65de2014-02-11 17:12:49 +02001102static void
1103intel_dp_connector_unregister(struct intel_connector *intel_connector)
1104{
1105 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1106
Dave Airlie0e32b392014-05-02 14:02:48 +10001107 if (!intel_connector->mst_port)
1108 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1109 intel_dp->aux.ddc.dev.kobj.name);
Imre Deak80f65de2014-02-11 17:12:49 +02001110 intel_connector_unregister(intel_connector);
1111}
1112
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001113static void
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001114skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
Damien Lespiau5416d872014-11-14 17:24:33 +00001115{
1116 u32 ctrl1;
1117
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03001118 memset(&pipe_config->dpll_hw_state, 0,
1119 sizeof(pipe_config->dpll_hw_state));
1120
Damien Lespiau5416d872014-11-14 17:24:33 +00001121 pipe_config->ddi_pll_sel = SKL_DPLL0;
1122 pipe_config->dpll_hw_state.cfgcr1 = 0;
1123 pipe_config->dpll_hw_state.cfgcr2 = 0;
1124
1125 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001126 switch (pipe_config->port_clock / 2) {
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301127 case 81000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001128 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
Damien Lespiau5416d872014-11-14 17:24:33 +00001129 SKL_DPLL0);
1130 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301131 case 135000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001132 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
Damien Lespiau5416d872014-11-14 17:24:33 +00001133 SKL_DPLL0);
1134 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301135 case 270000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001136 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
Damien Lespiau5416d872014-11-14 17:24:33 +00001137 SKL_DPLL0);
1138 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301139 case 162000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001140 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301141 SKL_DPLL0);
1142 break;
1143 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1144 results in CDCLK change. Need to handle the change of CDCLK by
1145 disabling pipes and re-enabling them */
1146 case 108000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001147 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301148 SKL_DPLL0);
1149 break;
1150 case 216000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001151 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301152 SKL_DPLL0);
1153 break;
1154
Damien Lespiau5416d872014-11-14 17:24:33 +00001155 }
1156 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1157}
1158
Ander Conselvan de Oliveira6fa2d192015-08-31 11:23:28 +03001159void
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001160hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
Daniel Vetter0e503382014-07-04 11:26:04 -03001161{
Ander Conselvan de Oliveiraee46f3c72015-06-30 16:10:38 +03001162 memset(&pipe_config->dpll_hw_state, 0,
1163 sizeof(pipe_config->dpll_hw_state));
1164
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001165 switch (pipe_config->port_clock / 2) {
1166 case 81000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001167 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1168 break;
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001169 case 135000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001170 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1171 break;
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001172 case 270000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001173 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1174 break;
1175 }
1176}
1177
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301178static int
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001179intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301180{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001181 if (intel_dp->num_sink_rates) {
1182 *sink_rates = intel_dp->sink_rates;
1183 return intel_dp->num_sink_rates;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301184 }
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001185
1186 *sink_rates = default_rates;
1187
1188 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301189}
1190
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301191static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1192{
1193 /* WaDisableHBR2:skl */
1194 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1195 return false;
1196
1197 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1198 (INTEL_INFO(dev)->gen >= 9))
1199 return true;
1200 else
1201 return false;
1202}
1203
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301204static int
Ville Syrjälä1db10e22015-03-12 17:10:32 +02001205intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301206{
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301207 int size;
1208
Sonika Jindal64987fc2015-05-26 17:50:13 +05301209 if (IS_BROXTON(dev)) {
1210 *source_rates = bxt_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301211 size = ARRAY_SIZE(bxt_rates);
Sonika Jindal64987fc2015-05-26 17:50:13 +05301212 } else if (IS_SKYLAKE(dev)) {
Sonika Jindal637a9c62015-05-07 09:52:08 +05301213 *source_rates = skl_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301214 size = ARRAY_SIZE(skl_rates);
1215 } else {
1216 *source_rates = default_rates;
1217 size = ARRAY_SIZE(default_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301218 }
Ville Syrjälä636280b2015-03-12 17:10:29 +02001219
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301220 /* This depends on the fact that 5.4 is last value in the array */
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301221 if (!intel_dp_source_supports_hbr2(dev))
1222 size--;
Ville Syrjälä636280b2015-03-12 17:10:29 +02001223
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301224 return size;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301225}
1226
Daniel Vetter0e503382014-07-04 11:26:04 -03001227static void
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001228intel_dp_set_clock(struct intel_encoder *encoder,
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001229 struct intel_crtc_state *pipe_config)
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001230{
1231 struct drm_device *dev = encoder->base.dev;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001232 const struct dp_link_dpll *divisor = NULL;
1233 int i, count = 0;
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001234
1235 if (IS_G4X(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001236 divisor = gen4_dpll;
1237 count = ARRAY_SIZE(gen4_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001238 } else if (HAS_PCH_SPLIT(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001239 divisor = pch_dpll;
1240 count = ARRAY_SIZE(pch_dpll);
Chon Ming Leeef9348c2014-04-09 13:28:18 +03001241 } else if (IS_CHERRYVIEW(dev)) {
1242 divisor = chv_dpll;
1243 count = ARRAY_SIZE(chv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001244 } else if (IS_VALLEYVIEW(dev)) {
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +08001245 divisor = vlv_dpll;
1246 count = ARRAY_SIZE(vlv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001247 }
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001248
1249 if (divisor && count) {
1250 for (i = 0; i < count; i++) {
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001251 if (pipe_config->port_clock == divisor[i].clock) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001252 pipe_config->dpll = divisor[i].dpll;
1253 pipe_config->clock_set = true;
1254 break;
1255 }
1256 }
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001257 }
1258}
1259
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001260static int intersect_rates(const int *source_rates, int source_len,
1261 const int *sink_rates, int sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001262 int *common_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301263{
1264 int i = 0, j = 0, k = 0;
1265
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301266 while (i < source_len && j < sink_len) {
1267 if (source_rates[i] == sink_rates[j]) {
Ville Syrjäläe6bda3e2015-03-12 17:10:37 +02001268 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1269 return k;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001270 common_rates[k] = source_rates[i];
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301271 ++k;
1272 ++i;
1273 ++j;
1274 } else if (source_rates[i] < sink_rates[j]) {
1275 ++i;
1276 } else {
1277 ++j;
1278 }
1279 }
1280 return k;
1281}
1282
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001283static int intel_dp_common_rates(struct intel_dp *intel_dp,
1284 int *common_rates)
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001285{
1286 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1287 const int *source_rates, *sink_rates;
1288 int source_len, sink_len;
1289
1290 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1291 source_len = intel_dp_source_rates(dev, &source_rates);
1292
1293 return intersect_rates(source_rates, source_len,
1294 sink_rates, sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001295 common_rates);
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001296}
1297
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001298static void snprintf_int_array(char *str, size_t len,
1299 const int *array, int nelem)
1300{
1301 int i;
1302
1303 str[0] = '\0';
1304
1305 for (i = 0; i < nelem; i++) {
Jani Nikulab2f505b2015-05-18 16:01:45 +03001306 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001307 if (r >= len)
1308 return;
1309 str += r;
1310 len -= r;
1311 }
1312}
1313
1314static void intel_dp_print_rates(struct intel_dp *intel_dp)
1315{
1316 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1317 const int *source_rates, *sink_rates;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001318 int source_len, sink_len, common_len;
1319 int common_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001320 char str[128]; /* FIXME: too big for stack? */
1321
1322 if ((drm_debug & DRM_UT_KMS) == 0)
1323 return;
1324
1325 source_len = intel_dp_source_rates(dev, &source_rates);
1326 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1327 DRM_DEBUG_KMS("source rates: %s\n", str);
1328
1329 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1330 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1331 DRM_DEBUG_KMS("sink rates: %s\n", str);
1332
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001333 common_len = intel_dp_common_rates(intel_dp, common_rates);
1334 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1335 DRM_DEBUG_KMS("common rates: %s\n", str);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001336}
1337
Ville Syrjäläf4896f12015-03-12 17:10:27 +02001338static int rate_to_index(int find, const int *rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301339{
1340 int i = 0;
1341
1342 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1343 if (find == rates[i])
1344 break;
1345
1346 return i;
1347}
1348
Ville Syrjälä50fec212015-03-12 17:10:34 +02001349int
1350intel_dp_max_link_rate(struct intel_dp *intel_dp)
1351{
1352 int rates[DP_MAX_SUPPORTED_RATES] = {};
1353 int len;
1354
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001355 len = intel_dp_common_rates(intel_dp, rates);
Ville Syrjälä50fec212015-03-12 17:10:34 +02001356 if (WARN_ON(len <= 0))
1357 return 162000;
1358
1359 return rates[rate_to_index(0, rates) - 1];
1360}
1361
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001362int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1363{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001364 return rate_to_index(rate, intel_dp->sink_rates);
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001365}
1366
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001367static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1368 uint8_t *link_bw, uint8_t *rate_select)
1369{
1370 if (intel_dp->num_sink_rates) {
1371 *link_bw = 0;
1372 *rate_select =
1373 intel_dp_rate_select(intel_dp, port_clock);
1374 } else {
1375 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1376 *rate_select = 0;
1377 }
1378}
1379
Paulo Zanoni00c09d72012-10-26 19:05:52 -02001380bool
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001381intel_dp_compute_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02001382 struct intel_crtc_state *pipe_config)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001383{
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001384 struct drm_device *dev = encoder->base.dev;
Daniel Vetter36008362013-03-27 00:44:59 +01001385 struct drm_i915_private *dev_priv = dev->dev_private;
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02001386 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001387 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001388 enum port port = dp_to_dig_port(intel_dp)->port;
Ander Conselvan de Oliveira84556d52015-03-20 16:18:10 +02001389 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
Jani Nikuladd06f902012-10-19 14:51:50 +03001390 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001391 int lane_count, clock;
Jani Nikula56071a22014-05-06 14:56:52 +03001392 int min_lane_count = 1;
Paulo Zanonieeb63242014-05-06 14:56:50 +03001393 int max_lane_count = intel_dp_max_lane_count(intel_dp);
Todd Previte06ea66b2014-01-20 10:19:39 -07001394 /* Conveniently, the link BW constants become indices with a shift...*/
Jani Nikula56071a22014-05-06 14:56:52 +03001395 int min_clock = 0;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301396 int max_clock;
Daniel Vetter083f9562012-04-20 20:23:49 +02001397 int bpp, mode_rate;
Daniel Vetterff9a6752013-06-01 17:16:21 +02001398 int link_avail, link_clock;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001399 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1400 int common_len;
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001401 uint8_t link_bw, rate_select;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301402
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001403 common_len = intel_dp_common_rates(intel_dp, common_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301404
1405 /* No common link rates between source and sink */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001406 WARN_ON(common_len <= 0);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301407
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001408 max_clock = common_len - 1;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001409
Imre Deakbc7d38a2013-05-16 14:40:36 +03001410 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001411 pipe_config->has_pch_encoder = true;
1412
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001413 pipe_config->has_dp_encoder = true;
Vandana Kannanf769cd22014-08-05 07:51:22 -07001414 pipe_config->has_drrs = false;
Jani Nikula9fcb1702015-05-05 16:32:12 +03001415 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001416
Jani Nikuladd06f902012-10-19 14:51:50 +03001417 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1418 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1419 adjusted_mode);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001420
1421 if (INTEL_INFO(dev)->gen >= 9) {
1422 int ret;
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02001423 ret = skl_update_scaler_crtc(pipe_config);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001424 if (ret)
1425 return ret;
1426 }
1427
Jesse Barnes2dd24552013-04-25 12:55:01 -07001428 if (!HAS_PCH_SPLIT(dev))
1429 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1430 intel_connector->panel.fitting_mode);
1431 else
Jesse Barnesb074cec2013-04-25 12:55:02 -07001432 intel_pch_panel_fitting(intel_crtc, pipe_config,
1433 intel_connector->panel.fitting_mode);
Zhao Yakui0d3a1be2010-07-19 09:43:13 +01001434 }
1435
Daniel Vettercb1793c2012-06-04 18:39:21 +02001436 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
Daniel Vetter0af78a22012-05-23 11:30:55 +02001437 return false;
1438
Daniel Vetter083f9562012-04-20 20:23:49 +02001439 DRM_DEBUG_KMS("DP link computation with max lane count %i "
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301440 "max bw %d pixel clock %iKHz\n",
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001441 max_lane_count, common_rates[max_clock],
Damien Lespiau241bfc32013-09-25 16:45:37 +01001442 adjusted_mode->crtc_clock);
Daniel Vetter083f9562012-04-20 20:23:49 +02001443
Daniel Vetter36008362013-03-27 00:44:59 +01001444 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1445 * bpc in between. */
Daniel Vetter3e7ca982013-06-01 19:45:56 +02001446 bpp = pipe_config->pipe_bpp;
Jani Nikula56071a22014-05-06 14:56:52 +03001447 if (is_edp(intel_dp)) {
Thulasimani,Sivakumar22ce5622015-07-31 11:05:27 +05301448
1449 /* Get bpp from vbt only for panels that dont have bpp in edid */
1450 if (intel_connector->base.display_info.bpc == 0 &&
1451 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
Jani Nikula56071a22014-05-06 14:56:52 +03001452 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1453 dev_priv->vbt.edp_bpp);
1454 bpp = dev_priv->vbt.edp_bpp;
1455 }
1456
Jani Nikula344c5bb2014-09-09 11:25:13 +03001457 /*
1458 * Use the maximum clock and number of lanes the eDP panel
1459 * advertizes being capable of. The panels are generally
1460 * designed to support only a single clock and lane
1461 * configuration, and typically these values correspond to the
1462 * native resolution of the panel.
1463 */
1464 min_lane_count = max_lane_count;
1465 min_clock = max_clock;
Imre Deak79842112013-07-18 17:44:13 +03001466 }
Daniel Vetter657445f2013-05-04 10:09:18 +02001467
Daniel Vetter36008362013-03-27 00:44:59 +01001468 for (; bpp >= 6*3; bpp -= 2*3) {
Damien Lespiau241bfc32013-09-25 16:45:37 +01001469 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1470 bpp);
Daniel Vetterc4867932012-04-10 10:42:36 +02001471
Dave Airliec6930992014-07-14 11:04:39 +10001472 for (clock = min_clock; clock <= max_clock; clock++) {
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301473 for (lane_count = min_lane_count;
1474 lane_count <= max_lane_count;
1475 lane_count <<= 1) {
1476
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001477 link_clock = common_rates[clock];
Daniel Vetter36008362013-03-27 00:44:59 +01001478 link_avail = intel_dp_max_data_rate(link_clock,
1479 lane_count);
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001480
Daniel Vetter36008362013-03-27 00:44:59 +01001481 if (mode_rate <= link_avail) {
1482 goto found;
1483 }
1484 }
1485 }
1486 }
1487
1488 return false;
1489
1490found:
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001491 if (intel_dp->color_range_auto) {
1492 /*
1493 * See:
1494 * CEA-861-E - 5.1 Default Encoding Parameters
1495 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1496 */
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001497 pipe_config->limited_color_range =
1498 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1499 } else {
1500 pipe_config->limited_color_range =
1501 intel_dp->limited_color_range;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001502 }
1503
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03001504 pipe_config->lane_count = lane_count;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301505
Daniel Vetter657445f2013-05-04 10:09:18 +02001506 pipe_config->pipe_bpp = bpp;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001507 pipe_config->port_clock = common_rates[clock];
Daniel Vetterc4867932012-04-10 10:42:36 +02001508
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001509 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1510 &link_bw, &rate_select);
1511
1512 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1513 link_bw, rate_select, pipe_config->lane_count,
Daniel Vetterff9a6752013-06-01 17:16:21 +02001514 pipe_config->port_clock, bpp);
Daniel Vetter36008362013-03-27 00:44:59 +01001515 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1516 mode_rate, link_avail);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001517
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001518 intel_link_compute_m_n(bpp, lane_count,
Damien Lespiau241bfc32013-09-25 16:45:37 +01001519 adjusted_mode->crtc_clock,
1520 pipe_config->port_clock,
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001521 &pipe_config->dp_m_n);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001522
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301523 if (intel_connector->panel.downclock_mode != NULL &&
Vandana Kannan96178ee2015-01-10 02:25:56 +05301524 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
Vandana Kannanf769cd22014-08-05 07:51:22 -07001525 pipe_config->has_drrs = true;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301526 intel_link_compute_m_n(bpp, lane_count,
1527 intel_connector->panel.downclock_mode->clock,
1528 pipe_config->port_clock,
1529 &pipe_config->dp_m2_n2);
1530 }
1531
Damien Lespiau5416d872014-11-14 17:24:33 +00001532 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001533 skl_edp_set_pll_config(pipe_config);
Satheeshakrishna M977bb382014-08-22 09:49:12 +05301534 else if (IS_BROXTON(dev))
1535 /* handled in ddi */;
Damien Lespiau5416d872014-11-14 17:24:33 +00001536 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001537 hsw_dp_set_ddi_pll_sel(pipe_config);
Daniel Vetter0e503382014-07-04 11:26:04 -03001538 else
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001539 intel_dp_set_clock(encoder, pipe_config);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001540
Daniel Vetter36008362013-03-27 00:44:59 +01001541 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001542}
1543
Daniel Vetter7c62a162013-06-01 17:16:20 +02001544static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
Daniel Vetterea9b6002012-11-29 15:59:31 +01001545{
Daniel Vetter7c62a162013-06-01 17:16:20 +02001546 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1547 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1548 struct drm_device *dev = crtc->base.dev;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001549 struct drm_i915_private *dev_priv = dev->dev_private;
1550 u32 dpa_ctl;
1551
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001552 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1553 crtc->config->port_clock);
Daniel Vetterea9b6002012-11-29 15:59:31 +01001554 dpa_ctl = I915_READ(DP_A);
1555 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1556
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001557 if (crtc->config->port_clock == 162000) {
Daniel Vetter1ce17032012-11-29 15:59:32 +01001558 /* For a long time we've carried around a ILK-DevA w/a for the
1559 * 160MHz clock. If we're really unlucky, it's still required.
1560 */
1561 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
Daniel Vetterea9b6002012-11-29 15:59:31 +01001562 dpa_ctl |= DP_PLL_FREQ_160MHZ;
Daniel Vetter7c62a162013-06-01 17:16:20 +02001563 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001564 } else {
1565 dpa_ctl |= DP_PLL_FREQ_270MHZ;
Daniel Vetter7c62a162013-06-01 17:16:20 +02001566 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001567 }
Daniel Vetter1ce17032012-11-29 15:59:32 +01001568
Daniel Vetterea9b6002012-11-29 15:59:31 +01001569 I915_WRITE(DP_A, dpa_ctl);
1570
1571 POSTING_READ(DP_A);
1572 udelay(500);
1573}
1574
Ville Syrjälä901c2da2015-08-17 18:05:12 +03001575void intel_dp_set_link_params(struct intel_dp *intel_dp,
1576 const struct intel_crtc_state *pipe_config)
1577{
1578 intel_dp->link_rate = pipe_config->port_clock;
1579 intel_dp->lane_count = pipe_config->lane_count;
1580}
1581
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02001582static void intel_dp_prepare(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001583{
Daniel Vetterb934223d2013-07-21 21:37:05 +02001584 struct drm_device *dev = encoder->base.dev;
Keith Packard417e8222011-11-01 19:54:11 -07001585 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001586 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001587 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001588 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001589 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001590
Ville Syrjälä901c2da2015-08-17 18:05:12 +03001591 intel_dp_set_link_params(intel_dp, crtc->config);
1592
Keith Packard417e8222011-11-01 19:54:11 -07001593 /*
Keith Packard1a2eb462011-11-16 16:26:07 -08001594 * There are four kinds of DP registers:
Keith Packard417e8222011-11-01 19:54:11 -07001595 *
1596 * IBX PCH
Keith Packard1a2eb462011-11-16 16:26:07 -08001597 * SNB CPU
1598 * IVB CPU
Keith Packard417e8222011-11-01 19:54:11 -07001599 * CPT PCH
1600 *
1601 * IBX PCH and CPU are the same for almost everything,
1602 * except that the CPU DP PLL is configured in this
1603 * register
1604 *
1605 * CPT PCH is quite different, having many bits moved
1606 * to the TRANS_DP_CTL register instead. That
1607 * configuration happens (oddly) in ironlake_pch_enable
1608 */
Adam Jackson9c9e7922010-04-05 17:57:59 -04001609
Keith Packard417e8222011-11-01 19:54:11 -07001610 /* Preserve the BIOS-computed detected bit. This is
1611 * supposed to be read-only.
1612 */
1613 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001614
Keith Packard417e8222011-11-01 19:54:11 -07001615 /* Handle DP bits in common between all three register formats */
Keith Packard417e8222011-11-01 19:54:11 -07001616 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03001617 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001618
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001619 if (crtc->config->has_audio)
Chris Wilsonea5b2132010-08-04 13:50:23 +01001620 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
Paulo Zanoni247d89f2012-10-15 15:51:33 -03001621
Keith Packard417e8222011-11-01 19:54:11 -07001622 /* Split out the IBX/CPU vs CPT settings */
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001623
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001624 if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08001625 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1626 intel_dp->DP |= DP_SYNC_HS_HIGH;
1627 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1628 intel_dp->DP |= DP_SYNC_VS_HIGH;
1629 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1630
Jani Nikula6aba5b62013-10-04 15:08:10 +03001631 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard1a2eb462011-11-16 16:26:07 -08001632 intel_dp->DP |= DP_ENHANCED_FRAMING;
1633
Daniel Vetter7c62a162013-06-01 17:16:20 +02001634 intel_dp->DP |= crtc->pipe << 29;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001635 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001636 u32 trans_dp;
1637
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001638 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001639
1640 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1641 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1642 trans_dp |= TRANS_DP_ENH_FRAMING;
1643 else
1644 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1645 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001646 } else {
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001647 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1648 crtc->config->limited_color_range)
1649 intel_dp->DP |= DP_COLOR_RANGE_16_235;
Keith Packard417e8222011-11-01 19:54:11 -07001650
1651 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1652 intel_dp->DP |= DP_SYNC_HS_HIGH;
1653 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1654 intel_dp->DP |= DP_SYNC_VS_HIGH;
1655 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1656
Jani Nikula6aba5b62013-10-04 15:08:10 +03001657 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard417e8222011-11-01 19:54:11 -07001658 intel_dp->DP |= DP_ENHANCED_FRAMING;
1659
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001660 if (IS_CHERRYVIEW(dev))
Chon Ming Lee44f37d12014-04-09 13:28:21 +03001661 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001662 else if (crtc->pipe == PIPE_B)
1663 intel_dp->DP |= DP_PIPEB_SELECT;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001664 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001665}
1666
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001667#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1668#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001669
Paulo Zanoni1a5ef5b2013-12-19 14:29:43 -02001670#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1671#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
Keith Packard99ea7122011-11-01 19:57:50 -07001672
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001673#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1674#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001675
Daniel Vetter4be73782014-01-17 14:39:48 +01001676static void wait_panel_status(struct intel_dp *intel_dp,
Keith Packard99ea7122011-11-01 19:57:50 -07001677 u32 mask,
1678 u32 value)
1679{
Paulo Zanoni30add222012-10-26 19:05:45 -02001680 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001681 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07001682 u32 pp_stat_reg, pp_ctrl_reg;
1683
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001684 lockdep_assert_held(&dev_priv->pps_mutex);
1685
Jani Nikulabf13e812013-09-06 07:40:05 +03001686 pp_stat_reg = _pp_stat_reg(intel_dp);
1687 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001688
1689 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001690 mask, value,
1691 I915_READ(pp_stat_reg),
1692 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001693
Jesse Barnes453c5422013-03-28 09:55:41 -07001694 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
Keith Packard99ea7122011-11-01 19:57:50 -07001695 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001696 I915_READ(pp_stat_reg),
1697 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001698 }
Chris Wilson54c136d2013-12-02 09:57:16 +00001699
1700 DRM_DEBUG_KMS("Wait complete\n");
Keith Packard99ea7122011-11-01 19:57:50 -07001701}
1702
Daniel Vetter4be73782014-01-17 14:39:48 +01001703static void wait_panel_on(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001704{
1705 DRM_DEBUG_KMS("Wait for panel power on\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001706 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001707}
1708
Daniel Vetter4be73782014-01-17 14:39:48 +01001709static void wait_panel_off(struct intel_dp *intel_dp)
Keith Packardbd943152011-09-18 23:09:52 -07001710{
Keith Packardbd943152011-09-18 23:09:52 -07001711 DRM_DEBUG_KMS("Wait for panel power off time\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001712 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
Keith Packardbd943152011-09-18 23:09:52 -07001713}
Keith Packardbd943152011-09-18 23:09:52 -07001714
Daniel Vetter4be73782014-01-17 14:39:48 +01001715static void wait_panel_power_cycle(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001716{
1717 DRM_DEBUG_KMS("Wait for panel power cycle\n");
Paulo Zanonidce56b32013-12-19 14:29:40 -02001718
1719 /* When we disable the VDD override bit last we have to do the manual
1720 * wait. */
1721 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1722 intel_dp->panel_power_cycle_delay);
1723
Daniel Vetter4be73782014-01-17 14:39:48 +01001724 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001725}
Keith Packardbd943152011-09-18 23:09:52 -07001726
Daniel Vetter4be73782014-01-17 14:39:48 +01001727static void wait_backlight_on(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001728{
1729 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1730 intel_dp->backlight_on_delay);
1731}
1732
Daniel Vetter4be73782014-01-17 14:39:48 +01001733static void edp_wait_backlight_off(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001734{
1735 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1736 intel_dp->backlight_off_delay);
1737}
Keith Packard99ea7122011-11-01 19:57:50 -07001738
Keith Packard832dd3c2011-11-01 19:34:06 -07001739/* Read the current pp_control value, unlocking the register if it
1740 * is locked
1741 */
1742
Jesse Barnes453c5422013-03-28 09:55:41 -07001743static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
Keith Packard832dd3c2011-11-01 19:34:06 -07001744{
Jesse Barnes453c5422013-03-28 09:55:41 -07001745 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1746 struct drm_i915_private *dev_priv = dev->dev_private;
1747 u32 control;
Jesse Barnes453c5422013-03-28 09:55:41 -07001748
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001749 lockdep_assert_held(&dev_priv->pps_mutex);
1750
Jani Nikulabf13e812013-09-06 07:40:05 +03001751 control = I915_READ(_pp_ctrl_reg(intel_dp));
Vandana Kannanb0a08be2015-06-18 11:00:55 +05301752 if (!IS_BROXTON(dev)) {
1753 control &= ~PANEL_UNLOCK_MASK;
1754 control |= PANEL_UNLOCK_REGS;
1755 }
Keith Packard832dd3c2011-11-01 19:34:06 -07001756 return control;
Keith Packardbd943152011-09-18 23:09:52 -07001757}
1758
Ville Syrjälä951468f2014-09-04 14:55:31 +03001759/*
1760 * Must be paired with edp_panel_vdd_off().
1761 * Must hold pps_mutex around the whole on/off sequence.
1762 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1763 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +03001764static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001765{
Paulo Zanoni30add222012-10-26 19:05:45 -02001766 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001767 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1768 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Jesse Barnes5d613502011-01-24 17:10:54 -08001769 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02001770 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001771 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001772 u32 pp_stat_reg, pp_ctrl_reg;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001773 bool need_to_disable = !intel_dp->want_panel_vdd;
Jesse Barnes5d613502011-01-24 17:10:54 -08001774
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001775 lockdep_assert_held(&dev_priv->pps_mutex);
1776
Keith Packard97af61f572011-09-28 16:23:51 -07001777 if (!is_edp(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001778 return false;
Keith Packardbd943152011-09-18 23:09:52 -07001779
Egbert Eich2c623c12014-11-25 12:54:57 +01001780 cancel_delayed_work(&intel_dp->panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001781 intel_dp->want_panel_vdd = true;
Keith Packard99ea7122011-11-01 19:57:50 -07001782
Daniel Vetter4be73782014-01-17 14:39:48 +01001783 if (edp_have_panel_vdd(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001784 return need_to_disable;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001785
Imre Deak4e6e1a52014-03-27 17:45:11 +02001786 power_domain = intel_display_port_power_domain(intel_encoder);
1787 intel_display_power_get(dev_priv, power_domain);
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001788
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001789 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1790 port_name(intel_dig_port->port));
Keith Packardbd943152011-09-18 23:09:52 -07001791
Daniel Vetter4be73782014-01-17 14:39:48 +01001792 if (!edp_have_panel_power(intel_dp))
1793 wait_panel_power_cycle(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001794
Jesse Barnes453c5422013-03-28 09:55:41 -07001795 pp = ironlake_get_pp_control(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001796 pp |= EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -07001797
Jani Nikulabf13e812013-09-06 07:40:05 +03001798 pp_stat_reg = _pp_stat_reg(intel_dp);
1799 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001800
1801 I915_WRITE(pp_ctrl_reg, pp);
1802 POSTING_READ(pp_ctrl_reg);
1803 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1804 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Keith Packardebf33b12011-09-29 15:53:27 -07001805 /*
1806 * If the panel wasn't on, delay before accessing aux channel
1807 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001808 if (!edp_have_panel_power(intel_dp)) {
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001809 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1810 port_name(intel_dig_port->port));
Keith Packardf01eca22011-09-28 16:48:10 -07001811 msleep(intel_dp->panel_power_up_delay);
Keith Packardf01eca22011-09-28 16:48:10 -07001812 }
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001813
1814 return need_to_disable;
1815}
1816
Ville Syrjälä951468f2014-09-04 14:55:31 +03001817/*
1818 * Must be paired with intel_edp_panel_vdd_off() or
1819 * intel_edp_panel_off().
1820 * Nested calls to these functions are not allowed since
1821 * we drop the lock. Caller must use some higher level
1822 * locking to prevent nested calls from other threads.
1823 */
Daniel Vetterb80d6c72014-03-19 15:54:37 +01001824void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001825{
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001826 bool vdd;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001827
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001828 if (!is_edp(intel_dp))
1829 return;
1830
Ville Syrjälä773538e82014-09-04 14:54:56 +03001831 pps_lock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001832 vdd = edp_panel_vdd_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001833 pps_unlock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001834
Rob Clarke2c719b2014-12-15 13:56:32 -05001835 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001836 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes5d613502011-01-24 17:10:54 -08001837}
1838
Daniel Vetter4be73782014-01-17 14:39:48 +01001839static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001840{
Paulo Zanoni30add222012-10-26 19:05:45 -02001841 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001842 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001843 struct intel_digital_port *intel_dig_port =
1844 dp_to_dig_port(intel_dp);
1845 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1846 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001847 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001848 u32 pp_stat_reg, pp_ctrl_reg;
Jesse Barnes5d613502011-01-24 17:10:54 -08001849
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001850 lockdep_assert_held(&dev_priv->pps_mutex);
Daniel Vettera0e99e62012-12-02 01:05:46 +01001851
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001852 WARN_ON(intel_dp->want_panel_vdd);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001853
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001854 if (!edp_have_panel_vdd(intel_dp))
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001855 return;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001856
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001857 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1858 port_name(intel_dig_port->port));
Jesse Barnes453c5422013-03-28 09:55:41 -07001859
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001860 pp = ironlake_get_pp_control(intel_dp);
1861 pp &= ~EDP_FORCE_VDD;
Jesse Barnes453c5422013-03-28 09:55:41 -07001862
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001863 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1864 pp_stat_reg = _pp_stat_reg(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001865
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001866 I915_WRITE(pp_ctrl_reg, pp);
1867 POSTING_READ(pp_ctrl_reg);
Paulo Zanoni90791a52013-12-06 17:32:42 -02001868
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001869 /* Make sure sequencer is idle before allowing subsequent activity */
1870 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1871 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001872
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001873 if ((pp & POWER_TARGET_ON) == 0)
1874 intel_dp->last_power_cycle = jiffies;
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001875
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001876 power_domain = intel_display_port_power_domain(intel_encoder);
1877 intel_display_power_put(dev_priv, power_domain);
Keith Packardbd943152011-09-18 23:09:52 -07001878}
1879
Daniel Vetter4be73782014-01-17 14:39:48 +01001880static void edp_panel_vdd_work(struct work_struct *__work)
Keith Packardbd943152011-09-18 23:09:52 -07001881{
1882 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1883 struct intel_dp, panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001884
Ville Syrjälä773538e82014-09-04 14:54:56 +03001885 pps_lock(intel_dp);
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001886 if (!intel_dp->want_panel_vdd)
1887 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001888 pps_unlock(intel_dp);
Keith Packardbd943152011-09-18 23:09:52 -07001889}
1890
Imre Deakaba86892014-07-30 15:57:31 +03001891static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1892{
1893 unsigned long delay;
1894
1895 /*
1896 * Queue the timer to fire a long time from now (relative to the power
1897 * down delay) to keep the panel power up across a sequence of
1898 * operations.
1899 */
1900 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1901 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1902}
1903
Ville Syrjälä951468f2014-09-04 14:55:31 +03001904/*
1905 * Must be paired with edp_panel_vdd_on().
1906 * Must hold pps_mutex around the whole on/off sequence.
1907 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1908 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001909static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
Keith Packardbd943152011-09-18 23:09:52 -07001910{
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001911 struct drm_i915_private *dev_priv =
1912 intel_dp_to_dev(intel_dp)->dev_private;
1913
1914 lockdep_assert_held(&dev_priv->pps_mutex);
1915
Keith Packard97af61f572011-09-28 16:23:51 -07001916 if (!is_edp(intel_dp))
1917 return;
Jesse Barnes5d613502011-01-24 17:10:54 -08001918
Rob Clarke2c719b2014-12-15 13:56:32 -05001919 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001920 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packardf2e8b182011-11-01 20:01:35 -07001921
Keith Packardbd943152011-09-18 23:09:52 -07001922 intel_dp->want_panel_vdd = false;
1923
Imre Deakaba86892014-07-30 15:57:31 +03001924 if (sync)
Daniel Vetter4be73782014-01-17 14:39:48 +01001925 edp_panel_vdd_off_sync(intel_dp);
Imre Deakaba86892014-07-30 15:57:31 +03001926 else
1927 edp_panel_vdd_schedule_off(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001928}
1929
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001930static void edp_panel_on(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07001931{
Paulo Zanoni30add222012-10-26 19:05:45 -02001932 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001933 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packard99ea7122011-11-01 19:57:50 -07001934 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001935 u32 pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07001936
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001937 lockdep_assert_held(&dev_priv->pps_mutex);
1938
Keith Packard97af61f572011-09-28 16:23:51 -07001939 if (!is_edp(intel_dp))
Keith Packardbd943152011-09-18 23:09:52 -07001940 return;
Keith Packard99ea7122011-11-01 19:57:50 -07001941
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001942 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1943 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packard99ea7122011-11-01 19:57:50 -07001944
Ville Syrjäläe7a89ac2014-10-16 21:30:07 +03001945 if (WARN(edp_have_panel_power(intel_dp),
1946 "eDP port %c panel power already on\n",
1947 port_name(dp_to_dig_port(intel_dp)->port)))
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001948 return;
Jesse Barnes9934c132010-07-22 13:18:19 -07001949
Daniel Vetter4be73782014-01-17 14:39:48 +01001950 wait_panel_power_cycle(intel_dp);
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07001951
Jani Nikulabf13e812013-09-06 07:40:05 +03001952 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001953 pp = ironlake_get_pp_control(intel_dp);
Keith Packard05ce1a42011-09-29 16:33:01 -07001954 if (IS_GEN5(dev)) {
1955 /* ILK workaround: disable reset around power sequence */
1956 pp &= ~PANEL_POWER_RESET;
Jani Nikulabf13e812013-09-06 07:40:05 +03001957 I915_WRITE(pp_ctrl_reg, pp);
1958 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07001959 }
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07001960
Keith Packard1c0ae802011-09-19 13:59:29 -07001961 pp |= POWER_TARGET_ON;
Keith Packard99ea7122011-11-01 19:57:50 -07001962 if (!IS_GEN5(dev))
1963 pp |= PANEL_POWER_RESET;
1964
Jesse Barnes453c5422013-03-28 09:55:41 -07001965 I915_WRITE(pp_ctrl_reg, pp);
1966 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07001967
Daniel Vetter4be73782014-01-17 14:39:48 +01001968 wait_panel_on(intel_dp);
Paulo Zanonidce56b32013-12-19 14:29:40 -02001969 intel_dp->last_power_on = jiffies;
Jesse Barnes9934c132010-07-22 13:18:19 -07001970
Keith Packard05ce1a42011-09-29 16:33:01 -07001971 if (IS_GEN5(dev)) {
1972 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
Jani Nikulabf13e812013-09-06 07:40:05 +03001973 I915_WRITE(pp_ctrl_reg, pp);
1974 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07001975 }
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001976}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001977
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001978void intel_edp_panel_on(struct intel_dp *intel_dp)
1979{
1980 if (!is_edp(intel_dp))
1981 return;
1982
1983 pps_lock(intel_dp);
1984 edp_panel_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001985 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001986}
1987
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001988
1989static void edp_panel_off(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07001990{
Imre Deak4e6e1a52014-03-27 17:45:11 +02001991 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1992 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanoni30add222012-10-26 19:05:45 -02001993 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001994 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02001995 enum intel_display_power_domain power_domain;
Keith Packard99ea7122011-11-01 19:57:50 -07001996 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001997 u32 pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07001998
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001999 lockdep_assert_held(&dev_priv->pps_mutex);
2000
Keith Packard97af61f572011-09-28 16:23:51 -07002001 if (!is_edp(intel_dp))
2002 return;
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002003
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002004 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2005 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002006
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002007 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2008 port_name(dp_to_dig_port(intel_dp)->port));
Jani Nikula24f3e092014-03-17 16:43:36 +02002009
Jesse Barnes453c5422013-03-28 09:55:41 -07002010 pp = ironlake_get_pp_control(intel_dp);
Daniel Vetter35a38552012-08-12 22:17:14 +02002011 /* We need to switch off panel power _and_ force vdd, for otherwise some
2012 * panels get very unhappy and cease to work. */
Patrik Jakobssonb3064152014-03-04 00:42:44 +01002013 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2014 EDP_BLC_ENABLE);
Jesse Barnes453c5422013-03-28 09:55:41 -07002015
Jani Nikulabf13e812013-09-06 07:40:05 +03002016 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002017
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002018 intel_dp->want_panel_vdd = false;
2019
Jesse Barnes453c5422013-03-28 09:55:41 -07002020 I915_WRITE(pp_ctrl_reg, pp);
2021 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07002022
Paulo Zanonidce56b32013-12-19 14:29:40 -02002023 intel_dp->last_power_cycle = jiffies;
Daniel Vetter4be73782014-01-17 14:39:48 +01002024 wait_panel_off(intel_dp);
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002025
2026 /* We got a reference when we enabled the VDD. */
Imre Deak4e6e1a52014-03-27 17:45:11 +02002027 power_domain = intel_display_port_power_domain(intel_encoder);
2028 intel_display_power_put(dev_priv, power_domain);
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002029}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002030
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002031void intel_edp_panel_off(struct intel_dp *intel_dp)
2032{
2033 if (!is_edp(intel_dp))
2034 return;
2035
2036 pps_lock(intel_dp);
2037 edp_panel_off(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002038 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002039}
2040
Jani Nikula1250d102014-08-12 17:11:39 +03002041/* Enable backlight in the panel power control. */
2042static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002043{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002044 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2045 struct drm_device *dev = intel_dig_port->base.base.dev;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002046 struct drm_i915_private *dev_priv = dev->dev_private;
2047 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002048 u32 pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002049
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07002050 /*
2051 * If we enable the backlight right away following a panel power
2052 * on, we may see slight flicker as the panel syncs with the eDP
2053 * link. So delay a bit to make sure the image is solid before
2054 * allowing it to appear.
2055 */
Daniel Vetter4be73782014-01-17 14:39:48 +01002056 wait_backlight_on(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002057
Ville Syrjälä773538e82014-09-04 14:54:56 +03002058 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002059
Jesse Barnes453c5422013-03-28 09:55:41 -07002060 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002061 pp |= EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002062
Jani Nikulabf13e812013-09-06 07:40:05 +03002063 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002064
2065 I915_WRITE(pp_ctrl_reg, pp);
2066 POSTING_READ(pp_ctrl_reg);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002067
Ville Syrjälä773538e82014-09-04 14:54:56 +03002068 pps_unlock(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002069}
2070
Jani Nikula1250d102014-08-12 17:11:39 +03002071/* Enable backlight PWM and backlight PP control. */
2072void intel_edp_backlight_on(struct intel_dp *intel_dp)
2073{
2074 if (!is_edp(intel_dp))
2075 return;
2076
2077 DRM_DEBUG_KMS("\n");
2078
2079 intel_panel_enable_backlight(intel_dp->attached_connector);
2080 _intel_edp_backlight_on(intel_dp);
2081}
2082
2083/* Disable backlight in the panel power control. */
2084static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002085{
Paulo Zanoni30add222012-10-26 19:05:45 -02002086 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002087 struct drm_i915_private *dev_priv = dev->dev_private;
2088 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002089 u32 pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002090
Keith Packardf01eca22011-09-28 16:48:10 -07002091 if (!is_edp(intel_dp))
2092 return;
2093
Ville Syrjälä773538e82014-09-04 14:54:56 +03002094 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002095
Jesse Barnes453c5422013-03-28 09:55:41 -07002096 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002097 pp &= ~EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002098
Jani Nikulabf13e812013-09-06 07:40:05 +03002099 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002100
2101 I915_WRITE(pp_ctrl_reg, pp);
2102 POSTING_READ(pp_ctrl_reg);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002103
Ville Syrjälä773538e82014-09-04 14:54:56 +03002104 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002105
Paulo Zanonidce56b32013-12-19 14:29:40 -02002106 intel_dp->last_backlight_off = jiffies;
Jesse Barnesf7d23232014-03-31 11:13:56 -07002107 edp_wait_backlight_off(intel_dp);
Jani Nikula1250d102014-08-12 17:11:39 +03002108}
Jesse Barnesf7d23232014-03-31 11:13:56 -07002109
Jani Nikula1250d102014-08-12 17:11:39 +03002110/* Disable backlight PP control and backlight PWM. */
2111void intel_edp_backlight_off(struct intel_dp *intel_dp)
2112{
2113 if (!is_edp(intel_dp))
2114 return;
2115
2116 DRM_DEBUG_KMS("\n");
2117
2118 _intel_edp_backlight_off(intel_dp);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002119 intel_panel_disable_backlight(intel_dp->attached_connector);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002120}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002121
Jani Nikula73580fb72014-08-12 17:11:41 +03002122/*
2123 * Hook for controlling the panel power control backlight through the bl_power
2124 * sysfs attribute. Take care to handle multiple calls.
2125 */
2126static void intel_edp_backlight_power(struct intel_connector *connector,
2127 bool enable)
2128{
2129 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002130 bool is_enabled;
2131
Ville Syrjälä773538e82014-09-04 14:54:56 +03002132 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002133 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002134 pps_unlock(intel_dp);
Jani Nikula73580fb72014-08-12 17:11:41 +03002135
2136 if (is_enabled == enable)
2137 return;
2138
Jani Nikula23ba9372014-08-27 14:08:43 +03002139 DRM_DEBUG_KMS("panel power control backlight %s\n",
2140 enable ? "enable" : "disable");
Jani Nikula73580fb72014-08-12 17:11:41 +03002141
2142 if (enable)
2143 _intel_edp_backlight_on(intel_dp);
2144 else
2145 _intel_edp_backlight_off(intel_dp);
2146}
2147
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002148static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002149{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002150 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2151 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2152 struct drm_device *dev = crtc->dev;
Jesse Barnesd240f202010-08-13 15:43:26 -07002153 struct drm_i915_private *dev_priv = dev->dev_private;
2154 u32 dpa_ctl;
2155
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002156 assert_pipe_disabled(dev_priv,
2157 to_intel_crtc(crtc)->pipe);
2158
Jesse Barnesd240f202010-08-13 15:43:26 -07002159 DRM_DEBUG_KMS("\n");
2160 dpa_ctl = I915_READ(DP_A);
Daniel Vetter07679352012-09-06 22:15:42 +02002161 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2162 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2163
2164 /* We don't adjust intel_dp->DP while tearing down the link, to
2165 * facilitate link retraining (e.g. after hotplug). Hence clear all
2166 * enable bits here to ensure that we don't enable too much. */
2167 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2168 intel_dp->DP |= DP_PLL_ENABLE;
2169 I915_WRITE(DP_A, intel_dp->DP);
Jesse Barnes298b0b32010-10-07 16:01:24 -07002170 POSTING_READ(DP_A);
2171 udelay(200);
Jesse Barnesd240f202010-08-13 15:43:26 -07002172}
2173
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002174static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002175{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002176 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2177 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2178 struct drm_device *dev = crtc->dev;
Jesse Barnesd240f202010-08-13 15:43:26 -07002179 struct drm_i915_private *dev_priv = dev->dev_private;
2180 u32 dpa_ctl;
2181
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002182 assert_pipe_disabled(dev_priv,
2183 to_intel_crtc(crtc)->pipe);
2184
Jesse Barnesd240f202010-08-13 15:43:26 -07002185 dpa_ctl = I915_READ(DP_A);
Daniel Vetter07679352012-09-06 22:15:42 +02002186 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2187 "dp pll off, should be on\n");
2188 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2189
2190 /* We can't rely on the value tracked for the DP register in
2191 * intel_dp->DP because link_down must not change that (otherwise link
2192 * re-training will fail. */
Jesse Barnes298b0b32010-10-07 16:01:24 -07002193 dpa_ctl &= ~DP_PLL_ENABLE;
Jesse Barnesd240f202010-08-13 15:43:26 -07002194 I915_WRITE(DP_A, dpa_ctl);
Chris Wilson1af5fa12010-09-08 21:07:28 +01002195 POSTING_READ(DP_A);
Jesse Barnesd240f202010-08-13 15:43:26 -07002196 udelay(200);
2197}
2198
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002199/* If the sink supports it, try to set the power state appropriately */
Paulo Zanonic19b0662012-10-15 15:51:41 -03002200void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002201{
2202 int ret, i;
2203
2204 /* Should have a valid DPCD by this point */
2205 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2206 return;
2207
2208 if (mode != DRM_MODE_DPMS_ON) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002209 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2210 DP_SET_POWER_D3);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002211 } else {
2212 /*
2213 * When turning on, we need to retry for 1ms to give the sink
2214 * time to wake up.
2215 */
2216 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002217 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2218 DP_SET_POWER_D0);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002219 if (ret == 1)
2220 break;
2221 msleep(1);
2222 }
2223 }
Jani Nikulaf9cac722014-09-02 16:33:52 +03002224
2225 if (ret != 1)
2226 DRM_DEBUG_KMS("failed to %s sink power state\n",
2227 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002228}
2229
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002230static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2231 enum pipe *pipe)
Jesse Barnesd240f202010-08-13 15:43:26 -07002232{
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002233 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03002234 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002235 struct drm_device *dev = encoder->base.dev;
2236 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak6d129be2014-03-05 16:20:54 +02002237 enum intel_display_power_domain power_domain;
2238 u32 tmp;
2239
2240 power_domain = intel_display_port_power_domain(encoder);
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002241 if (!intel_display_power_is_enabled(dev_priv, power_domain))
Imre Deak6d129be2014-03-05 16:20:54 +02002242 return false;
2243
2244 tmp = I915_READ(intel_dp->output_reg);
Jesse Barnesd240f202010-08-13 15:43:26 -07002245
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002246 if (!(tmp & DP_PORT_EN))
2247 return false;
2248
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002249 if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002250 *pipe = PORT_TO_PIPE_CPT(tmp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002251 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002252 enum pipe p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002253
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002254 for_each_pipe(dev_priv, p) {
2255 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2256 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2257 *pipe = p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002258 return true;
2259 }
2260 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002261
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002262 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2263 intel_dp->output_reg);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002264 } else if (IS_CHERRYVIEW(dev)) {
2265 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2266 } else {
2267 *pipe = PORT_TO_PIPE(tmp);
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002268 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002269
2270 return true;
2271}
2272
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002273static void intel_dp_get_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02002274 struct intel_crtc_state *pipe_config)
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002275{
2276 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002277 u32 tmp, flags = 0;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002278 struct drm_device *dev = encoder->base.dev;
2279 struct drm_i915_private *dev_priv = dev->dev_private;
2280 enum port port = dp_to_dig_port(intel_dp)->port;
2281 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjälä18442d02013-09-13 16:00:08 +03002282 int dotclock;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002283
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002284 tmp = I915_READ(intel_dp->output_reg);
Jani Nikula9fcb1702015-05-05 16:32:12 +03002285
2286 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002287
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002288 if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläb81e34c2015-07-06 15:10:03 +03002289 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2290
2291 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
Xiong Zhang63000ef2013-06-28 12:59:06 +08002292 flags |= DRM_MODE_FLAG_PHSYNC;
2293 else
2294 flags |= DRM_MODE_FLAG_NHSYNC;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002295
Ville Syrjäläb81e34c2015-07-06 15:10:03 +03002296 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
Xiong Zhang63000ef2013-06-28 12:59:06 +08002297 flags |= DRM_MODE_FLAG_PVSYNC;
2298 else
2299 flags |= DRM_MODE_FLAG_NVSYNC;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002300 } else {
2301 if (tmp & DP_SYNC_HS_HIGH)
2302 flags |= DRM_MODE_FLAG_PHSYNC;
2303 else
2304 flags |= DRM_MODE_FLAG_NHSYNC;
2305
2306 if (tmp & DP_SYNC_VS_HIGH)
2307 flags |= DRM_MODE_FLAG_PVSYNC;
2308 else
2309 flags |= DRM_MODE_FLAG_NVSYNC;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002310 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002311
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002312 pipe_config->base.adjusted_mode.flags |= flags;
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002313
Ville Syrjälä8c875fc2014-09-12 15:46:29 +03002314 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2315 tmp & DP_COLOR_RANGE_16_235)
2316 pipe_config->limited_color_range = true;
2317
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002318 pipe_config->has_dp_encoder = true;
2319
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03002320 pipe_config->lane_count =
2321 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2322
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002323 intel_dp_get_m_n(crtc, pipe_config);
2324
Ville Syrjälä18442d02013-09-13 16:00:08 +03002325 if (port == PORT_A) {
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002326 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2327 pipe_config->port_clock = 162000;
2328 else
2329 pipe_config->port_clock = 270000;
2330 }
Ville Syrjälä18442d02013-09-13 16:00:08 +03002331
2332 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2333 &pipe_config->dp_m_n);
2334
2335 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2336 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2337
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002338 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
Daniel Vetter7f16e5c2013-11-04 16:28:47 +01002339
Jani Nikulac6cd2ee2013-10-21 10:52:07 +03002340 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2341 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2342 /*
2343 * This is a big fat ugly hack.
2344 *
2345 * Some machines in UEFI boot mode provide us a VBT that has 18
2346 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2347 * unknown we fail to light up. Yet the same BIOS boots up with
2348 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2349 * max, not what it tells us to use.
2350 *
2351 * Note: This will still be broken if the eDP panel is not lit
2352 * up by the BIOS, and thus we can't get the mode at module
2353 * load.
2354 */
2355 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2356 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2357 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2358 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002359}
2360
Daniel Vettere8cb4552012-07-01 13:05:48 +02002361static void intel_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002362{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002363 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002364 struct drm_device *dev = encoder->base.dev;
Jani Nikula495a5bb2014-10-27 16:26:55 +02002365 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2366
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002367 if (crtc->config->has_audio)
Jani Nikula495a5bb2014-10-27 16:26:55 +02002368 intel_audio_codec_disable(encoder);
Daniel Vetter6cb49832012-05-20 17:14:50 +02002369
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002370 if (HAS_PSR(dev) && !HAS_DDI(dev))
2371 intel_psr_disable(intel_dp);
2372
Daniel Vetter6cb49832012-05-20 17:14:50 +02002373 /* Make sure the panel is off before trying to change the mode. But also
2374 * ensure that we have vdd while we switch off the panel. */
Jani Nikula24f3e092014-03-17 16:43:36 +02002375 intel_edp_panel_vdd_on(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01002376 intel_edp_backlight_off(intel_dp);
Jani Nikulafdbc3b12013-11-12 17:10:13 +02002377 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
Daniel Vetter4be73782014-01-17 14:39:48 +01002378 intel_edp_panel_off(intel_dp);
Daniel Vetter37398502012-09-06 22:15:44 +02002379
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002380 /* disable the port before the pipe on g4x */
2381 if (INTEL_INFO(dev)->gen < 5)
Daniel Vetter37398502012-09-06 22:15:44 +02002382 intel_dp_link_down(intel_dp);
Jesse Barnesd240f202010-08-13 15:43:26 -07002383}
2384
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002385static void ilk_post_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002386{
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002387 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002388 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002389
Ville Syrjälä49277c32014-03-31 18:21:26 +03002390 intel_dp_link_down(intel_dp);
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002391 if (port == PORT_A)
2392 ironlake_edp_pll_off(intel_dp);
Ville Syrjälä49277c32014-03-31 18:21:26 +03002393}
2394
2395static void vlv_post_disable_dp(struct intel_encoder *encoder)
2396{
2397 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2398
2399 intel_dp_link_down(intel_dp);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002400}
2401
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002402static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2403 bool reset)
2404{
2405 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2406 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2407 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2408 enum pipe pipe = crtc->pipe;
2409 uint32_t val;
2410
2411 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2412 if (reset)
2413 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2414 else
2415 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2416 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2417
2418 if (crtc->config->lane_count > 2) {
2419 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2420 if (reset)
2421 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2422 else
2423 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2424 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2425 }
2426
2427 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2428 val |= CHV_PCS_REQ_SOFTRESET_EN;
2429 if (reset)
2430 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2431 else
2432 val |= DPIO_PCS_CLK_SOFT_RESET;
2433 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2434
2435 if (crtc->config->lane_count > 2) {
2436 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2437 val |= CHV_PCS_REQ_SOFTRESET_EN;
2438 if (reset)
2439 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2440 else
2441 val |= DPIO_PCS_CLK_SOFT_RESET;
2442 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2443 }
2444}
2445
Ville Syrjälä580d3812014-04-09 13:29:00 +03002446static void chv_post_disable_dp(struct intel_encoder *encoder)
2447{
2448 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002449 struct drm_device *dev = encoder->base.dev;
2450 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä580d3812014-04-09 13:29:00 +03002451
2452 intel_dp_link_down(intel_dp);
2453
Ville Syrjäläa5805162015-05-26 20:42:30 +03002454 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002455
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002456 /* Assert data lane reset */
2457 chv_data_lane_soft_reset(encoder, true);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002458
Ville Syrjäläa5805162015-05-26 20:42:30 +03002459 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002460}
2461
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002462static void
2463_intel_dp_set_link_train(struct intel_dp *intel_dp,
2464 uint32_t *DP,
2465 uint8_t dp_train_pat)
2466{
2467 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2468 struct drm_device *dev = intel_dig_port->base.base.dev;
2469 struct drm_i915_private *dev_priv = dev->dev_private;
2470 enum port port = intel_dig_port->port;
2471
2472 if (HAS_DDI(dev)) {
2473 uint32_t temp = I915_READ(DP_TP_CTL(port));
2474
2475 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2476 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2477 else
2478 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2479
2480 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2481 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2482 case DP_TRAINING_PATTERN_DISABLE:
2483 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2484
2485 break;
2486 case DP_TRAINING_PATTERN_1:
2487 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2488 break;
2489 case DP_TRAINING_PATTERN_2:
2490 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2491 break;
2492 case DP_TRAINING_PATTERN_3:
2493 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2494 break;
2495 }
2496 I915_WRITE(DP_TP_CTL(port), temp);
2497
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002498 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2499 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002500 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2501
2502 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2503 case DP_TRAINING_PATTERN_DISABLE:
2504 *DP |= DP_LINK_TRAIN_OFF_CPT;
2505 break;
2506 case DP_TRAINING_PATTERN_1:
2507 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2508 break;
2509 case DP_TRAINING_PATTERN_2:
2510 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2511 break;
2512 case DP_TRAINING_PATTERN_3:
2513 DRM_ERROR("DP training pattern 3 not supported\n");
2514 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2515 break;
2516 }
2517
2518 } else {
2519 if (IS_CHERRYVIEW(dev))
2520 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2521 else
2522 *DP &= ~DP_LINK_TRAIN_MASK;
2523
2524 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2525 case DP_TRAINING_PATTERN_DISABLE:
2526 *DP |= DP_LINK_TRAIN_OFF;
2527 break;
2528 case DP_TRAINING_PATTERN_1:
2529 *DP |= DP_LINK_TRAIN_PAT_1;
2530 break;
2531 case DP_TRAINING_PATTERN_2:
2532 *DP |= DP_LINK_TRAIN_PAT_2;
2533 break;
2534 case DP_TRAINING_PATTERN_3:
2535 if (IS_CHERRYVIEW(dev)) {
2536 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2537 } else {
2538 DRM_ERROR("DP training pattern 3 not supported\n");
2539 *DP |= DP_LINK_TRAIN_PAT_2;
2540 }
2541 break;
2542 }
2543 }
2544}
2545
2546static void intel_dp_enable_port(struct intel_dp *intel_dp)
2547{
2548 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2549 struct drm_i915_private *dev_priv = dev->dev_private;
2550
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002551 /* enable with pattern 1 (as per spec) */
2552 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2553 DP_TRAINING_PATTERN_1);
2554
2555 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2556 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b713f52014-10-16 21:27:35 +03002557
2558 /*
2559 * Magic for VLV/CHV. We _must_ first set up the register
2560 * without actually enabling the port, and then do another
2561 * write to enable the port. Otherwise link training will
2562 * fail when the power sequencer is freshly used for this port.
2563 */
2564 intel_dp->DP |= DP_PORT_EN;
2565
2566 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2567 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002568}
2569
Daniel Vettere8cb4552012-07-01 13:05:48 +02002570static void intel_enable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002571{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002572 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2573 struct drm_device *dev = encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002574 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulac1dec792014-10-27 16:26:56 +02002575 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Chris Wilsonea5b2132010-08-04 13:50:23 +01002576 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002577
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02002578 if (WARN_ON(dp_reg & DP_PORT_EN))
2579 return;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002580
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002581 pps_lock(intel_dp);
2582
2583 if (IS_VALLEYVIEW(dev))
2584 vlv_init_panel_power_sequencer(intel_dp);
2585
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002586 intel_dp_enable_port(intel_dp);
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002587
2588 edp_panel_vdd_on(intel_dp);
2589 edp_panel_on(intel_dp);
2590 edp_panel_vdd_off(intel_dp, true);
2591
2592 pps_unlock(intel_dp);
2593
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002594 if (IS_VALLEYVIEW(dev)) {
2595 unsigned int lane_mask = 0x0;
2596
2597 if (IS_CHERRYVIEW(dev))
2598 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2599
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03002600 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2601 lane_mask);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002602 }
Ville Syrjälä61234fa2014-10-16 21:27:34 +03002603
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002604 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2605 intel_dp_start_link_train(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002606 intel_dp_complete_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03002607 intel_dp_stop_link_train(intel_dp);
Jani Nikulac1dec792014-10-27 16:26:56 +02002608
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002609 if (crtc->config->has_audio) {
Jani Nikulac1dec792014-10-27 16:26:56 +02002610 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2611 pipe_name(crtc->pipe));
2612 intel_audio_codec_enable(encoder);
2613 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002614}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002615
Jani Nikulaecff4f32013-09-06 07:38:29 +03002616static void g4x_enable_dp(struct intel_encoder *encoder)
2617{
Jani Nikula828f5c62013-09-05 16:44:45 +03002618 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2619
Jani Nikulaecff4f32013-09-06 07:38:29 +03002620 intel_enable_dp(encoder);
Daniel Vetter4be73782014-01-17 14:39:48 +01002621 intel_edp_backlight_on(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002622}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002623
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002624static void vlv_enable_dp(struct intel_encoder *encoder)
2625{
Jani Nikula828f5c62013-09-05 16:44:45 +03002626 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2627
Daniel Vetter4be73782014-01-17 14:39:48 +01002628 intel_edp_backlight_on(intel_dp);
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002629 intel_psr_enable(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002630}
2631
Jani Nikulaecff4f32013-09-06 07:38:29 +03002632static void g4x_pre_enable_dp(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002633{
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002634 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03002635 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002636
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002637 intel_dp_prepare(encoder);
2638
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002639 /* Only ilk+ has port A */
2640 if (dport->port == PORT_A) {
2641 ironlake_set_pll_cpu_edp(intel_dp);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002642 ironlake_edp_pll_on(intel_dp);
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002643 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002644}
2645
Ville Syrjälä83b84592014-10-16 21:29:51 +03002646static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2647{
2648 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2649 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2650 enum pipe pipe = intel_dp->pps_pipe;
2651 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2652
2653 edp_panel_vdd_off_sync(intel_dp);
2654
2655 /*
2656 * VLV seems to get confused when multiple power seqeuencers
2657 * have the same port selected (even if only one has power/vdd
2658 * enabled). The failure manifests as vlv_wait_port_ready() failing
2659 * CHV on the other hand doesn't seem to mind having the same port
2660 * selected in multiple power seqeuencers, but let's clear the
2661 * port select always when logically disconnecting a power sequencer
2662 * from a port.
2663 */
2664 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2665 pipe_name(pipe), port_name(intel_dig_port->port));
2666 I915_WRITE(pp_on_reg, 0);
2667 POSTING_READ(pp_on_reg);
2668
2669 intel_dp->pps_pipe = INVALID_PIPE;
2670}
2671
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002672static void vlv_steal_power_sequencer(struct drm_device *dev,
2673 enum pipe pipe)
2674{
2675 struct drm_i915_private *dev_priv = dev->dev_private;
2676 struct intel_encoder *encoder;
2677
2678 lockdep_assert_held(&dev_priv->pps_mutex);
2679
Ville Syrjäläac3c12e2014-10-16 21:29:56 +03002680 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2681 return;
2682
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002683 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2684 base.head) {
2685 struct intel_dp *intel_dp;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002686 enum port port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002687
2688 if (encoder->type != INTEL_OUTPUT_EDP)
2689 continue;
2690
2691 intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002692 port = dp_to_dig_port(intel_dp)->port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002693
2694 if (intel_dp->pps_pipe != pipe)
2695 continue;
2696
2697 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
Ville Syrjälä773538e82014-09-04 14:54:56 +03002698 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002699
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02002700 WARN(encoder->base.crtc,
Ville Syrjälä034e43c2014-10-16 21:27:28 +03002701 "stealing pipe %c power sequencer from active eDP port %c\n",
2702 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002703
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002704 /* make sure vdd is off before we steal it */
Ville Syrjälä83b84592014-10-16 21:29:51 +03002705 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002706 }
2707}
2708
2709static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2710{
2711 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2712 struct intel_encoder *encoder = &intel_dig_port->base;
2713 struct drm_device *dev = encoder->base.dev;
2714 struct drm_i915_private *dev_priv = dev->dev_private;
2715 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002716
2717 lockdep_assert_held(&dev_priv->pps_mutex);
2718
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002719 if (!is_edp(intel_dp))
2720 return;
2721
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002722 if (intel_dp->pps_pipe == crtc->pipe)
2723 return;
2724
2725 /*
2726 * If another power sequencer was being used on this
2727 * port previously make sure to turn off vdd there while
2728 * we still have control of it.
2729 */
2730 if (intel_dp->pps_pipe != INVALID_PIPE)
Ville Syrjälä83b84592014-10-16 21:29:51 +03002731 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002732
2733 /*
2734 * We may be stealing the power
2735 * sequencer from another port.
2736 */
2737 vlv_steal_power_sequencer(dev, crtc->pipe);
2738
2739 /* now it's all ours */
2740 intel_dp->pps_pipe = crtc->pipe;
2741
2742 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2743 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2744
2745 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03002746 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2747 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002748}
2749
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002750static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2751{
2752 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2753 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Jesse Barnesb2634012013-03-28 09:55:40 -07002754 struct drm_device *dev = encoder->base.dev;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002755 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002756 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002757 enum dpio_channel port = vlv_dport_to_channel(dport);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002758 int pipe = intel_crtc->pipe;
2759 u32 val;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002760
Ville Syrjäläa5805162015-05-26 20:42:30 +03002761 mutex_lock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002762
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002763 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002764 val = 0;
2765 if (pipe)
2766 val |= (1<<21);
2767 else
2768 val &= ~(1<<21);
2769 val |= 0x001000c4;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002770 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2771 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2772 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002773
Ville Syrjäläa5805162015-05-26 20:42:30 +03002774 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002775
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002776 intel_enable_dp(encoder);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002777}
2778
Jani Nikulaecff4f32013-09-06 07:38:29 +03002779static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
Jesse Barnes89b667f2013-04-18 14:51:36 -07002780{
2781 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2782 struct drm_device *dev = encoder->base.dev;
2783 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002784 struct intel_crtc *intel_crtc =
2785 to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002786 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002787 int pipe = intel_crtc->pipe;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002788
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002789 intel_dp_prepare(encoder);
2790
Jesse Barnes89b667f2013-04-18 14:51:36 -07002791 /* Program Tx lane resets to default */
Ville Syrjäläa5805162015-05-26 20:42:30 +03002792 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002793 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002794 DPIO_PCS_TX_LANE2_RESET |
2795 DPIO_PCS_TX_LANE1_RESET);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002796 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002797 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2798 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2799 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2800 DPIO_PCS_CLK_SOFT_RESET);
2801
2802 /* Fix up inter-pair skew failure */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002803 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2804 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2805 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03002806 mutex_unlock(&dev_priv->sb_lock);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002807}
2808
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002809static void chv_pre_enable_dp(struct intel_encoder *encoder)
2810{
2811 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2812 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2813 struct drm_device *dev = encoder->base.dev;
2814 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002815 struct intel_crtc *intel_crtc =
2816 to_intel_crtc(encoder->base.crtc);
2817 enum dpio_channel ch = vlv_dport_to_channel(dport);
2818 int pipe = intel_crtc->pipe;
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002819 int data, i, stagger;
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002820 u32 val;
2821
Ville Syrjäläa5805162015-05-26 20:42:30 +03002822 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002823
Ville Syrjälä570e2a72014-08-18 14:42:46 +03002824 /* allow hardware to manage TX FIFO reset source */
2825 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2826 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2827 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2828
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002829 if (intel_crtc->config->lane_count > 2) {
2830 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2831 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2832 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2833 }
Ville Syrjälä570e2a72014-08-18 14:42:46 +03002834
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002835 /* Program Tx lane latency optimal setting*/
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002836 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002837 /* Set the upar bit */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002838 if (intel_crtc->config->lane_count == 1)
2839 data = 0x0;
2840 else
2841 data = (i == 1) ? 0x0 : 0x1;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002842 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2843 data << DPIO_UPAR_SHIFT);
2844 }
2845
2846 /* Data lane stagger programming */
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002847 if (intel_crtc->config->port_clock > 270000)
2848 stagger = 0x18;
2849 else if (intel_crtc->config->port_clock > 135000)
2850 stagger = 0xd;
2851 else if (intel_crtc->config->port_clock > 67500)
2852 stagger = 0x7;
2853 else if (intel_crtc->config->port_clock > 33750)
2854 stagger = 0x4;
2855 else
2856 stagger = 0x2;
2857
2858 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2859 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2860 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2861
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002862 if (intel_crtc->config->lane_count > 2) {
2863 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2864 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2865 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2866 }
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002867
2868 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2869 DPIO_LANESTAGGER_STRAP(stagger) |
2870 DPIO_LANESTAGGER_STRAP_OVRD |
2871 DPIO_TX1_STAGGER_MASK(0x1f) |
2872 DPIO_TX1_STAGGER_MULT(6) |
2873 DPIO_TX2_STAGGER_MULT(0));
2874
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002875 if (intel_crtc->config->lane_count > 2) {
2876 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2877 DPIO_LANESTAGGER_STRAP(stagger) |
2878 DPIO_LANESTAGGER_STRAP_OVRD |
2879 DPIO_TX1_STAGGER_MASK(0x1f) |
2880 DPIO_TX1_STAGGER_MULT(7) |
2881 DPIO_TX2_STAGGER_MULT(5));
2882 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002883
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002884 /* Deassert data lane reset */
2885 chv_data_lane_soft_reset(encoder, false);
2886
Ville Syrjäläa5805162015-05-26 20:42:30 +03002887 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002888
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002889 intel_enable_dp(encoder);
Ville Syrjäläb0b33842015-07-08 23:45:55 +03002890
2891 /* Second common lane will stay alive on its own now */
2892 if (dport->release_cl2_override) {
2893 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2894 dport->release_cl2_override = false;
2895 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002896}
2897
Ville Syrjälä9197c882014-04-09 13:29:05 +03002898static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2899{
2900 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2901 struct drm_device *dev = encoder->base.dev;
2902 struct drm_i915_private *dev_priv = dev->dev_private;
2903 struct intel_crtc *intel_crtc =
2904 to_intel_crtc(encoder->base.crtc);
2905 enum dpio_channel ch = vlv_dport_to_channel(dport);
2906 enum pipe pipe = intel_crtc->pipe;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002907 unsigned int lane_mask =
2908 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002909 u32 val;
2910
Ville Syrjälä625695f2014-06-28 02:04:02 +03002911 intel_dp_prepare(encoder);
2912
Ville Syrjäläb0b33842015-07-08 23:45:55 +03002913 /*
2914 * Must trick the second common lane into life.
2915 * Otherwise we can't even access the PLL.
2916 */
2917 if (ch == DPIO_CH0 && pipe == PIPE_B)
2918 dport->release_cl2_override =
2919 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2920
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002921 chv_phy_powergate_lanes(encoder, true, lane_mask);
2922
Ville Syrjäläa5805162015-05-26 20:42:30 +03002923 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002924
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002925 /* Assert data lane reset */
2926 chv_data_lane_soft_reset(encoder, true);
2927
Ville Syrjäläb9e5ac32014-05-27 16:30:18 +03002928 /* program left/right clock distribution */
2929 if (pipe != PIPE_B) {
2930 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2931 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2932 if (ch == DPIO_CH0)
2933 val |= CHV_BUFLEFTENA1_FORCE;
2934 if (ch == DPIO_CH1)
2935 val |= CHV_BUFRIGHTENA1_FORCE;
2936 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2937 } else {
2938 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2939 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2940 if (ch == DPIO_CH0)
2941 val |= CHV_BUFLEFTENA2_FORCE;
2942 if (ch == DPIO_CH1)
2943 val |= CHV_BUFRIGHTENA2_FORCE;
2944 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2945 }
2946
Ville Syrjälä9197c882014-04-09 13:29:05 +03002947 /* program clock channel usage */
2948 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2949 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2950 if (pipe != PIPE_B)
2951 val &= ~CHV_PCS_USEDCLKCHANNEL;
2952 else
2953 val |= CHV_PCS_USEDCLKCHANNEL;
2954 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2955
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002956 if (intel_crtc->config->lane_count > 2) {
2957 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2958 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2959 if (pipe != PIPE_B)
2960 val &= ~CHV_PCS_USEDCLKCHANNEL;
2961 else
2962 val |= CHV_PCS_USEDCLKCHANNEL;
2963 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2964 }
Ville Syrjälä9197c882014-04-09 13:29:05 +03002965
2966 /*
2967 * This a a bit weird since generally CL
2968 * matches the pipe, but here we need to
2969 * pick the CL based on the port.
2970 */
2971 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2972 if (pipe != PIPE_B)
2973 val &= ~CHV_CMN_USEDCLKCHANNEL;
2974 else
2975 val |= CHV_CMN_USEDCLKCHANNEL;
2976 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2977
Ville Syrjäläa5805162015-05-26 20:42:30 +03002978 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002979}
2980
Ville Syrjäläd6db9952015-07-08 23:45:49 +03002981static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2982{
2983 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2984 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2985 u32 val;
2986
2987 mutex_lock(&dev_priv->sb_lock);
2988
2989 /* disable left/right clock distribution */
2990 if (pipe != PIPE_B) {
2991 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2992 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2993 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2994 } else {
2995 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2996 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2997 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2998 }
2999
3000 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003001
Ville Syrjäläb0b33842015-07-08 23:45:55 +03003002 /*
3003 * Leave the power down bit cleared for at least one
3004 * lane so that chv_powergate_phy_ch() will power
3005 * on something when the channel is otherwise unused.
3006 * When the port is off and the override is removed
3007 * the lanes power down anyway, so otherwise it doesn't
3008 * really matter what the state of power down bits is
3009 * after this.
3010 */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003011 chv_phy_powergate_lanes(encoder, false, 0x0);
Ville Syrjäläd6db9952015-07-08 23:45:49 +03003012}
3013
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003014/*
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003015 * Native read with retry for link status and receiver capability reads for
3016 * cases where the sink may still be asleep.
Jani Nikula9d1a1032014-03-14 16:51:15 +02003017 *
3018 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3019 * supposed to retry 3 times per the spec.
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003020 */
Jani Nikula9d1a1032014-03-14 16:51:15 +02003021static ssize_t
3022intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3023 void *buffer, size_t size)
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003024{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003025 ssize_t ret;
3026 int i;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003027
Ville Syrjäläf6a19062014-10-16 20:46:09 +03003028 /*
3029 * Sometime we just get the same incorrect byte repeated
3030 * over the entire buffer. Doing just one throw away read
3031 * initially seems to "solve" it.
3032 */
3033 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3034
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003035 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003036 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3037 if (ret == size)
3038 return ret;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003039 msleep(1);
3040 }
3041
Jani Nikula9d1a1032014-03-14 16:51:15 +02003042 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003043}
3044
3045/*
3046 * Fetch AUX CH registers 0x202 - 0x207 which contain
3047 * link status information
3048 */
3049static bool
Keith Packard93f62da2011-11-01 19:45:03 -07003050intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003051{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003052 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3053 DP_LANE0_1_STATUS,
3054 link_status,
3055 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003056}
3057
Paulo Zanoni11002442014-06-13 18:45:41 -03003058/* These are source-specific values. */
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003059static uint8_t
Keith Packard1a2eb462011-11-16 16:26:07 -08003060intel_dp_voltage_max(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003061{
Paulo Zanoni30add222012-10-26 19:05:45 -02003062 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303063 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003064 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003065
Vandana Kannan93147262014-11-18 15:45:29 +05303066 if (IS_BROXTON(dev))
3067 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3068 else if (INTEL_INFO(dev)->gen >= 9) {
Sonika Jindal9e458032015-05-06 17:35:48 +05303069 if (dev_priv->edp_low_vswing && port == PORT_A)
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303070 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003071 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303072 } else if (IS_VALLEYVIEW(dev))
Sonika Jindalbd600182014-08-08 16:23:41 +05303073 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003074 else if (IS_GEN7(dev) && port == PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05303075 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003076 else if (HAS_PCH_CPT(dev) && port != PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05303077 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Keith Packard1a2eb462011-11-16 16:26:07 -08003078 else
Sonika Jindalbd600182014-08-08 16:23:41 +05303079 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Keith Packard1a2eb462011-11-16 16:26:07 -08003080}
3081
3082static uint8_t
3083intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3084{
Paulo Zanoni30add222012-10-26 19:05:45 -02003085 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003086 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003087
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003088 if (INTEL_INFO(dev)->gen >= 9) {
3089 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3090 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3091 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3092 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3093 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3094 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3095 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303096 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3097 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003098 default:
3099 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3100 }
3101 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003102 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303103 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3104 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3105 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3106 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3107 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3108 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3109 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003110 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303111 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003112 }
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003113 } else if (IS_VALLEYVIEW(dev)) {
3114 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303115 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3116 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3117 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3118 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3119 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3120 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3121 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003122 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303123 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003124 }
Imre Deakbc7d38a2013-05-16 14:40:36 +03003125 } else if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08003126 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303127 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3128 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3131 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Keith Packard1a2eb462011-11-16 16:26:07 -08003132 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303133 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003134 }
3135 } else {
3136 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303137 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3138 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3140 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3142 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3143 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packard1a2eb462011-11-16 16:26:07 -08003144 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303145 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003146 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003147 }
3148}
3149
Daniel Vetter5829975c2015-04-16 11:36:52 +02003150static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003151{
3152 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3153 struct drm_i915_private *dev_priv = dev->dev_private;
3154 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003155 struct intel_crtc *intel_crtc =
3156 to_intel_crtc(dport->base.base.crtc);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003157 unsigned long demph_reg_value, preemph_reg_value,
3158 uniqtranscale_reg_value;
3159 uint8_t train_set = intel_dp->train_set[0];
Chon Ming Leee4607fc2013-11-06 14:36:35 +08003160 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003161 int pipe = intel_crtc->pipe;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003162
3163 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303164 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003165 preemph_reg_value = 0x0004000;
3166 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303167 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003168 demph_reg_value = 0x2B405555;
3169 uniqtranscale_reg_value = 0x552AB83A;
3170 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303171 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003172 demph_reg_value = 0x2B404040;
3173 uniqtranscale_reg_value = 0x5548B83A;
3174 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303175 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003176 demph_reg_value = 0x2B245555;
3177 uniqtranscale_reg_value = 0x5560B83A;
3178 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303179 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003180 demph_reg_value = 0x2B405555;
3181 uniqtranscale_reg_value = 0x5598DA3A;
3182 break;
3183 default:
3184 return 0;
3185 }
3186 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303187 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003188 preemph_reg_value = 0x0002000;
3189 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303190 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003191 demph_reg_value = 0x2B404040;
3192 uniqtranscale_reg_value = 0x5552B83A;
3193 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303194 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003195 demph_reg_value = 0x2B404848;
3196 uniqtranscale_reg_value = 0x5580B83A;
3197 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303198 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003199 demph_reg_value = 0x2B404040;
3200 uniqtranscale_reg_value = 0x55ADDA3A;
3201 break;
3202 default:
3203 return 0;
3204 }
3205 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303206 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003207 preemph_reg_value = 0x0000000;
3208 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303209 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003210 demph_reg_value = 0x2B305555;
3211 uniqtranscale_reg_value = 0x5570B83A;
3212 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303213 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003214 demph_reg_value = 0x2B2B4040;
3215 uniqtranscale_reg_value = 0x55ADDA3A;
3216 break;
3217 default:
3218 return 0;
3219 }
3220 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303221 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003222 preemph_reg_value = 0x0006000;
3223 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303224 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003225 demph_reg_value = 0x1B405555;
3226 uniqtranscale_reg_value = 0x55ADDA3A;
3227 break;
3228 default:
3229 return 0;
3230 }
3231 break;
3232 default:
3233 return 0;
3234 }
3235
Ville Syrjäläa5805162015-05-26 20:42:30 +03003236 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003237 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3238 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3239 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003240 uniqtranscale_reg_value);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003241 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3242 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3243 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3244 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03003245 mutex_unlock(&dev_priv->sb_lock);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003246
3247 return 0;
3248}
3249
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003250static bool chv_need_uniq_trans_scale(uint8_t train_set)
3251{
3252 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3253 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3254}
3255
Daniel Vetter5829975c2015-04-16 11:36:52 +02003256static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003257{
3258 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3259 struct drm_i915_private *dev_priv = dev->dev_private;
3260 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3261 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003262 u32 deemph_reg_value, margin_reg_value, val;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003263 uint8_t train_set = intel_dp->train_set[0];
3264 enum dpio_channel ch = vlv_dport_to_channel(dport);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003265 enum pipe pipe = intel_crtc->pipe;
3266 int i;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003267
3268 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303269 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003270 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003272 deemph_reg_value = 128;
3273 margin_reg_value = 52;
3274 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003276 deemph_reg_value = 128;
3277 margin_reg_value = 77;
3278 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303279 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003280 deemph_reg_value = 128;
3281 margin_reg_value = 102;
3282 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303283 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003284 deemph_reg_value = 128;
3285 margin_reg_value = 154;
3286 /* FIXME extra to set for 1200 */
3287 break;
3288 default:
3289 return 0;
3290 }
3291 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303292 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003293 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303294 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003295 deemph_reg_value = 85;
3296 margin_reg_value = 78;
3297 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303298 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003299 deemph_reg_value = 85;
3300 margin_reg_value = 116;
3301 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003303 deemph_reg_value = 85;
3304 margin_reg_value = 154;
3305 break;
3306 default:
3307 return 0;
3308 }
3309 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303310 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003311 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003313 deemph_reg_value = 64;
3314 margin_reg_value = 104;
3315 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003317 deemph_reg_value = 64;
3318 margin_reg_value = 154;
3319 break;
3320 default:
3321 return 0;
3322 }
3323 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303324 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003325 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303326 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003327 deemph_reg_value = 43;
3328 margin_reg_value = 154;
3329 break;
3330 default:
3331 return 0;
3332 }
3333 break;
3334 default:
3335 return 0;
3336 }
3337
Ville Syrjäläa5805162015-05-26 20:42:30 +03003338 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003339
3340 /* Clear calc init */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003341 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3342 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003343 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3344 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
Ville Syrjälä1966e592014-04-09 13:29:04 +03003345 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3346
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003347 if (intel_crtc->config->lane_count > 2) {
3348 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3349 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3350 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3351 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3352 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3353 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003354
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003355 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3356 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3357 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3358 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3359
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003360 if (intel_crtc->config->lane_count > 2) {
3361 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3362 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3363 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3364 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3365 }
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003366
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003367 /* Program swing deemph */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003368 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003369 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3370 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3371 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3372 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3373 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003374
3375 /* Program swing margin */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003376 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003377 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003378
Ville Syrjälä1fb44502014-06-28 02:04:03 +03003379 val &= ~DPIO_SWING_MARGIN000_MASK;
3380 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003381
3382 /*
3383 * Supposedly this value shouldn't matter when unique transition
3384 * scale is disabled, but in fact it does matter. Let's just
3385 * always program the same value and hope it's OK.
3386 */
3387 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3388 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3389
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003390 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3391 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003392
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003393 /*
3394 * The document said it needs to set bit 27 for ch0 and bit 26
3395 * for ch1. Might be a typo in the doc.
3396 * For now, for this unique transition scale selection, set bit
3397 * 27 for ch0 and ch1.
3398 */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003399 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003400 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003401 if (chv_need_uniq_trans_scale(train_set))
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003402 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003403 else
3404 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3405 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003406 }
3407
3408 /* Start swing calculation */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003409 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3410 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3411 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3412
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003413 if (intel_crtc->config->lane_count > 2) {
3414 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3415 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3416 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3417 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003418
3419 /* LRC Bypass */
3420 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3421 val |= DPIO_LRC_BYPASS;
3422 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3423
Ville Syrjäläa5805162015-05-26 20:42:30 +03003424 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003425
3426 return 0;
3427}
3428
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003429static void
Jani Nikula0301b3a2013-10-15 09:36:08 +03003430intel_get_adjust_train(struct intel_dp *intel_dp,
3431 const uint8_t link_status[DP_LINK_STATUS_SIZE])
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003432{
3433 uint8_t v = 0;
3434 uint8_t p = 0;
3435 int lane;
Keith Packard1a2eb462011-11-16 16:26:07 -08003436 uint8_t voltage_max;
3437 uint8_t preemph_max;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003438
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003439 for (lane = 0; lane < intel_dp->lane_count; lane++) {
Daniel Vetter0f037bd2012-10-18 10:15:27 +02003440 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3441 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003442
3443 if (this_v > v)
3444 v = this_v;
3445 if (this_p > p)
3446 p = this_p;
3447 }
3448
Keith Packard1a2eb462011-11-16 16:26:07 -08003449 voltage_max = intel_dp_voltage_max(intel_dp);
Keith Packard417e8222011-11-01 19:54:11 -07003450 if (v >= voltage_max)
3451 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003452
Keith Packard1a2eb462011-11-16 16:26:07 -08003453 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3454 if (p >= preemph_max)
3455 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003456
3457 for (lane = 0; lane < 4; lane++)
Jesse Barnes33a34e42010-09-08 12:42:02 -07003458 intel_dp->train_set[lane] = v | p;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003459}
3460
3461static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003462gen4_signal_levels(uint8_t train_set)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003463{
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003464 uint32_t signal_levels = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003465
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003466 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303467 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003468 default:
3469 signal_levels |= DP_VOLTAGE_0_4;
3470 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303471 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003472 signal_levels |= DP_VOLTAGE_0_6;
3473 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303474 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003475 signal_levels |= DP_VOLTAGE_0_8;
3476 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303477 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003478 signal_levels |= DP_VOLTAGE_1_2;
3479 break;
3480 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003481 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303482 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003483 default:
3484 signal_levels |= DP_PRE_EMPHASIS_0;
3485 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303486 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003487 signal_levels |= DP_PRE_EMPHASIS_3_5;
3488 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303489 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003490 signal_levels |= DP_PRE_EMPHASIS_6;
3491 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303492 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003493 signal_levels |= DP_PRE_EMPHASIS_9_5;
3494 break;
3495 }
3496 return signal_levels;
3497}
3498
Zhenyu Wange3421a12010-04-08 09:43:27 +08003499/* Gen6's DP voltage swing and pre-emphasis control */
3500static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003501gen6_edp_signal_levels(uint8_t train_set)
Zhenyu Wange3421a12010-04-08 09:43:27 +08003502{
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003503 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3504 DP_TRAIN_PRE_EMPHASIS_MASK);
3505 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303506 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3507 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003508 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303509 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003510 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303511 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3512 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003513 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303514 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3515 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003516 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303517 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3518 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003519 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003520 default:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003521 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3522 "0x%x\n", signal_levels);
3523 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003524 }
3525}
3526
Keith Packard1a2eb462011-11-16 16:26:07 -08003527/* Gen7's DP voltage swing and pre-emphasis control */
3528static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003529gen7_edp_signal_levels(uint8_t train_set)
Keith Packard1a2eb462011-11-16 16:26:07 -08003530{
3531 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3532 DP_TRAIN_PRE_EMPHASIS_MASK);
3533 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303534 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003535 return EDP_LINK_TRAIN_400MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303536 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003537 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303538 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packard1a2eb462011-11-16 16:26:07 -08003539 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3540
Sonika Jindalbd600182014-08-08 16:23:41 +05303541 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003542 return EDP_LINK_TRAIN_600MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303543 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003544 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3545
Sonika Jindalbd600182014-08-08 16:23:41 +05303546 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003547 return EDP_LINK_TRAIN_800MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303548 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003549 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3550
3551 default:
3552 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3553 "0x%x\n", signal_levels);
3554 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3555 }
3556}
3557
Paulo Zanonif0a34242012-12-06 16:51:50 -02003558/* Properly updates "DP" with the correct signal levels. */
3559static void
3560intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3561{
3562 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003563 enum port port = intel_dig_port->port;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003564 struct drm_device *dev = intel_dig_port->base.base.dev;
David Weinehallf8896f52015-06-25 11:11:03 +03003565 uint32_t signal_levels, mask = 0;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003566 uint8_t train_set = intel_dp->train_set[0];
3567
David Weinehallf8896f52015-06-25 11:11:03 +03003568 if (HAS_DDI(dev)) {
3569 signal_levels = ddi_signal_levels(intel_dp);
3570
3571 if (IS_BROXTON(dev))
3572 signal_levels = 0;
3573 else
3574 mask = DDI_BUF_EMP_MASK;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003575 } else if (IS_CHERRYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003576 signal_levels = chv_signal_levels(intel_dp);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003577 } else if (IS_VALLEYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003578 signal_levels = vlv_signal_levels(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003579 } else if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003580 signal_levels = gen7_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003581 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003582 } else if (IS_GEN6(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003583 signal_levels = gen6_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003584 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3585 } else {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003586 signal_levels = gen4_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003587 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3588 }
3589
Vandana Kannan96fb9f92014-11-18 15:45:27 +05303590 if (mask)
3591 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3592
3593 DRM_DEBUG_KMS("Using vswing level %d\n",
3594 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3595 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3596 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3597 DP_TRAIN_PRE_EMPHASIS_SHIFT);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003598
3599 *DP = (*DP & ~mask) | signal_levels;
3600}
3601
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003602static bool
Chris Wilsonea5b2132010-08-04 13:50:23 +01003603intel_dp_set_link_train(struct intel_dp *intel_dp,
Jani Nikula70aff662013-09-27 15:10:44 +03003604 uint32_t *DP,
Chris Wilson58e10eb2010-10-03 10:56:11 +01003605 uint8_t dp_train_pat)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003606{
Paulo Zanoni174edf12012-10-26 19:05:50 -02003607 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03003608 struct drm_i915_private *dev_priv =
3609 to_i915(intel_dig_port->base.base.dev);
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003610 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3611 int ret, len;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003612
Ville Syrjälä7b13b582014-08-18 22:16:08 +03003613 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003614
Jani Nikula70aff662013-09-27 15:10:44 +03003615 I915_WRITE(intel_dp->output_reg, *DP);
Chris Wilsonea5b2132010-08-04 13:50:23 +01003616 POSTING_READ(intel_dp->output_reg);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003617
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003618 buf[0] = dp_train_pat;
3619 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003620 DP_TRAINING_PATTERN_DISABLE) {
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003621 /* don't write DP_TRAINING_LANEx_SET on disable */
3622 len = 1;
3623 } else {
3624 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003625 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3626 len = intel_dp->lane_count + 1;
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003627 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003628
Jani Nikula9d1a1032014-03-14 16:51:15 +02003629 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3630 buf, len);
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003631
3632 return ret == len;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003633}
3634
Jani Nikula70aff662013-09-27 15:10:44 +03003635static bool
3636intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3637 uint8_t dp_train_pat)
3638{
Mika Kahola4e96c972015-04-29 09:17:39 +03003639 if (!intel_dp->train_set_valid)
3640 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
Jani Nikula70aff662013-09-27 15:10:44 +03003641 intel_dp_set_signal_levels(intel_dp, DP);
3642 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3643}
3644
3645static bool
3646intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
Jani Nikula0301b3a2013-10-15 09:36:08 +03003647 const uint8_t link_status[DP_LINK_STATUS_SIZE])
Jani Nikula70aff662013-09-27 15:10:44 +03003648{
3649 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03003650 struct drm_i915_private *dev_priv =
3651 to_i915(intel_dig_port->base.base.dev);
Jani Nikula70aff662013-09-27 15:10:44 +03003652 int ret;
3653
3654 intel_get_adjust_train(intel_dp, link_status);
3655 intel_dp_set_signal_levels(intel_dp, DP);
3656
3657 I915_WRITE(intel_dp->output_reg, *DP);
3658 POSTING_READ(intel_dp->output_reg);
3659
Jani Nikula9d1a1032014-03-14 16:51:15 +02003660 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003661 intel_dp->train_set, intel_dp->lane_count);
Jani Nikula70aff662013-09-27 15:10:44 +03003662
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003663 return ret == intel_dp->lane_count;
Jani Nikula70aff662013-09-27 15:10:44 +03003664}
3665
Imre Deak3ab9c632013-05-03 12:57:41 +03003666static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3667{
3668 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3669 struct drm_device *dev = intel_dig_port->base.base.dev;
3670 struct drm_i915_private *dev_priv = dev->dev_private;
3671 enum port port = intel_dig_port->port;
3672 uint32_t val;
3673
3674 if (!HAS_DDI(dev))
3675 return;
3676
3677 val = I915_READ(DP_TP_CTL(port));
3678 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3679 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3680 I915_WRITE(DP_TP_CTL(port), val);
3681
3682 /*
3683 * On PORT_A we can have only eDP in SST mode. There the only reason
3684 * we need to set idle transmission mode is to work around a HW issue
3685 * where we enable the pipe while not in idle link-training mode.
3686 * In this case there is requirement to wait for a minimum number of
3687 * idle patterns to be sent.
3688 */
3689 if (port == PORT_A)
3690 return;
3691
3692 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3693 1))
3694 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3695}
3696
Jesse Barnes33a34e42010-09-08 12:42:02 -07003697/* Enable corresponding port and start training pattern 1 */
Paulo Zanonic19b0662012-10-15 15:51:41 -03003698void
Jesse Barnes33a34e42010-09-08 12:42:02 -07003699intel_dp_start_link_train(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003700{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003701 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
Paulo Zanonic19b0662012-10-15 15:51:41 -03003702 struct drm_device *dev = encoder->dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003703 int i;
3704 uint8_t voltage;
Keith Packardcdb0e952011-11-01 20:00:06 -07003705 int voltage_tries, loop_tries;
Chris Wilsonea5b2132010-08-04 13:50:23 +01003706 uint32_t DP = intel_dp->DP;
Jani Nikula6aba5b62013-10-04 15:08:10 +03003707 uint8_t link_config[2];
Ville Syrjälä04a60f92015-07-06 15:10:06 +03003708 uint8_t link_bw, rate_select;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003709
Paulo Zanoniaffa9352012-11-23 15:30:39 -02003710 if (HAS_DDI(dev))
Paulo Zanonic19b0662012-10-15 15:51:41 -03003711 intel_ddi_prepare_link_retrain(encoder);
3712
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003713 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
Ville Syrjälä04a60f92015-07-06 15:10:06 +03003714 &link_bw, &rate_select);
3715
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003716 /* Write the link configuration data */
Ville Syrjälä04a60f92015-07-06 15:10:06 +03003717 link_config[0] = link_bw;
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003718 link_config[1] = intel_dp->lane_count;
Jani Nikula6aba5b62013-10-04 15:08:10 +03003719 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3720 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
Jani Nikula9d1a1032014-03-14 16:51:15 +02003721 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003722 if (intel_dp->num_sink_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05303723 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
Ville Syrjälä04a60f92015-07-06 15:10:06 +03003724 &rate_select, 1);
Jani Nikula6aba5b62013-10-04 15:08:10 +03003725
3726 link_config[0] = 0;
3727 link_config[1] = DP_SET_ANSI_8B10B;
Jani Nikula9d1a1032014-03-14 16:51:15 +02003728 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003729
3730 DP |= DP_PORT_EN;
Keith Packard1a2eb462011-11-16 16:26:07 -08003731
Jani Nikula70aff662013-09-27 15:10:44 +03003732 /* clock recovery */
3733 if (!intel_dp_reset_link_train(intel_dp, &DP,
3734 DP_TRAINING_PATTERN_1 |
3735 DP_LINK_SCRAMBLING_DISABLE)) {
3736 DRM_ERROR("failed to enable link training\n");
3737 return;
3738 }
3739
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003740 voltage = 0xff;
Keith Packardcdb0e952011-11-01 20:00:06 -07003741 voltage_tries = 0;
3742 loop_tries = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003743 for (;;) {
Jani Nikula70aff662013-09-27 15:10:44 +03003744 uint8_t link_status[DP_LINK_STATUS_SIZE];
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003745
Daniel Vettera7c96552012-10-18 10:15:30 +02003746 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
Keith Packard93f62da2011-11-01 19:45:03 -07003747 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3748 DRM_ERROR("failed to get link status\n");
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003749 break;
Keith Packard93f62da2011-11-01 19:45:03 -07003750 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003751
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003752 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
Keith Packard93f62da2011-11-01 19:45:03 -07003753 DRM_DEBUG_KMS("clock recovery OK\n");
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003754 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003755 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003756
Mika Kahola4e96c972015-04-29 09:17:39 +03003757 /*
3758 * if we used previously trained voltage and pre-emphasis values
3759 * and we don't get clock recovery, reset link training values
3760 */
3761 if (intel_dp->train_set_valid) {
3762 DRM_DEBUG_KMS("clock recovery not ok, reset");
3763 /* clear the flag as we are not reusing train set */
3764 intel_dp->train_set_valid = false;
3765 if (!intel_dp_reset_link_train(intel_dp, &DP,
3766 DP_TRAINING_PATTERN_1 |
3767 DP_LINK_SCRAMBLING_DISABLE)) {
3768 DRM_ERROR("failed to enable link training\n");
3769 return;
3770 }
3771 continue;
3772 }
3773
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003774 /* Check to see if we've tried the max voltage */
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003775 for (i = 0; i < intel_dp->lane_count; i++)
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003776 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3777 break;
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003778 if (i == intel_dp->lane_count) {
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003779 ++loop_tries;
3780 if (loop_tries == 5) {
Jani Nikula3def84b2013-10-05 16:13:56 +03003781 DRM_ERROR("too many full retries, give up\n");
Keith Packardcdb0e952011-11-01 20:00:06 -07003782 break;
3783 }
Jani Nikula70aff662013-09-27 15:10:44 +03003784 intel_dp_reset_link_train(intel_dp, &DP,
3785 DP_TRAINING_PATTERN_1 |
3786 DP_LINK_SCRAMBLING_DISABLE);
Keith Packardcdb0e952011-11-01 20:00:06 -07003787 voltage_tries = 0;
3788 continue;
3789 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003790
3791 /* Check to see if we've tried the same voltage 5 times */
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003792 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
Chris Wilson24773672012-09-26 16:48:30 +01003793 ++voltage_tries;
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003794 if (voltage_tries == 5) {
Jani Nikula3def84b2013-10-05 16:13:56 +03003795 DRM_ERROR("too many voltage retries, give up\n");
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003796 break;
3797 }
3798 } else
3799 voltage_tries = 0;
3800 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003801
Jani Nikula70aff662013-09-27 15:10:44 +03003802 /* Update training set as requested by target */
3803 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3804 DRM_ERROR("failed to update link training\n");
3805 break;
3806 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003807 }
3808
Jesse Barnes33a34e42010-09-08 12:42:02 -07003809 intel_dp->DP = DP;
3810}
3811
Paulo Zanonic19b0662012-10-15 15:51:41 -03003812void
Jesse Barnes33a34e42010-09-08 12:42:02 -07003813intel_dp_complete_link_train(struct intel_dp *intel_dp)
3814{
Jani Nikulabc5133d2015-09-03 11:16:07 +03003815 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3816 struct drm_device *dev = dig_port->base.base.dev;
Jesse Barnes33a34e42010-09-08 12:42:02 -07003817 bool channel_eq = false;
Jesse Barnes37f80972011-01-05 14:45:24 -08003818 int tries, cr_tries;
Jesse Barnes33a34e42010-09-08 12:42:02 -07003819 uint32_t DP = intel_dp->DP;
Todd Previte06ea66b2014-01-20 10:19:39 -07003820 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3821
Jani Nikulabc5133d2015-09-03 11:16:07 +03003822 /*
3823 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3824 *
3825 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3826 * also mandatory for downstream devices that support HBR2.
3827 *
3828 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3829 * supported but still not enabled.
3830 */
Jani Nikula1da7d712015-09-03 11:16:08 +03003831 if (intel_dp_source_supports_hbr2(dev) &&
3832 drm_dp_tps3_supported(intel_dp->dpcd))
Todd Previte06ea66b2014-01-20 10:19:39 -07003833 training_pattern = DP_TRAINING_PATTERN_3;
Jani Nikula1da7d712015-09-03 11:16:08 +03003834 else if (intel_dp->link_rate == 540000)
3835 DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
Jesse Barnes33a34e42010-09-08 12:42:02 -07003836
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003837 /* channel equalization */
Jani Nikula70aff662013-09-27 15:10:44 +03003838 if (!intel_dp_set_link_train(intel_dp, &DP,
Todd Previte06ea66b2014-01-20 10:19:39 -07003839 training_pattern |
Jani Nikula70aff662013-09-27 15:10:44 +03003840 DP_LINK_SCRAMBLING_DISABLE)) {
3841 DRM_ERROR("failed to start channel equalization\n");
3842 return;
3843 }
3844
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003845 tries = 0;
Jesse Barnes37f80972011-01-05 14:45:24 -08003846 cr_tries = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003847 channel_eq = false;
3848 for (;;) {
Jani Nikula70aff662013-09-27 15:10:44 +03003849 uint8_t link_status[DP_LINK_STATUS_SIZE];
Zhenyu Wange3421a12010-04-08 09:43:27 +08003850
Jesse Barnes37f80972011-01-05 14:45:24 -08003851 if (cr_tries > 5) {
3852 DRM_ERROR("failed to train DP, aborting\n");
Jesse Barnes37f80972011-01-05 14:45:24 -08003853 break;
3854 }
3855
Daniel Vettera7c96552012-10-18 10:15:30 +02003856 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
Jani Nikula70aff662013-09-27 15:10:44 +03003857 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3858 DRM_ERROR("failed to get link status\n");
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003859 break;
Jani Nikula70aff662013-09-27 15:10:44 +03003860 }
Jesse Barnes869184a2010-10-07 16:01:22 -07003861
Jesse Barnes37f80972011-01-05 14:45:24 -08003862 /* Make sure clock is still ok */
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03003863 if (!drm_dp_clock_recovery_ok(link_status,
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003864 intel_dp->lane_count)) {
Mika Kahola4e96c972015-04-29 09:17:39 +03003865 intel_dp->train_set_valid = false;
Jesse Barnes37f80972011-01-05 14:45:24 -08003866 intel_dp_start_link_train(intel_dp);
Jani Nikula70aff662013-09-27 15:10:44 +03003867 intel_dp_set_link_train(intel_dp, &DP,
Todd Previte06ea66b2014-01-20 10:19:39 -07003868 training_pattern |
Jani Nikula70aff662013-09-27 15:10:44 +03003869 DP_LINK_SCRAMBLING_DISABLE);
Jesse Barnes37f80972011-01-05 14:45:24 -08003870 cr_tries++;
3871 continue;
3872 }
3873
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03003874 if (drm_dp_channel_eq_ok(link_status,
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003875 intel_dp->lane_count)) {
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003876 channel_eq = true;
3877 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003878 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003879
Jesse Barnes37f80972011-01-05 14:45:24 -08003880 /* Try 5 times, then try clock recovery if that fails */
3881 if (tries > 5) {
Mika Kahola4e96c972015-04-29 09:17:39 +03003882 intel_dp->train_set_valid = false;
Jesse Barnes37f80972011-01-05 14:45:24 -08003883 intel_dp_start_link_train(intel_dp);
Jani Nikula70aff662013-09-27 15:10:44 +03003884 intel_dp_set_link_train(intel_dp, &DP,
Todd Previte06ea66b2014-01-20 10:19:39 -07003885 training_pattern |
Jani Nikula70aff662013-09-27 15:10:44 +03003886 DP_LINK_SCRAMBLING_DISABLE);
Jesse Barnes37f80972011-01-05 14:45:24 -08003887 tries = 0;
3888 cr_tries++;
3889 continue;
3890 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003891
Jani Nikula70aff662013-09-27 15:10:44 +03003892 /* Update training set as requested by target */
3893 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3894 DRM_ERROR("failed to update link training\n");
3895 break;
3896 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003897 ++tries;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003898 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003899
Imre Deak3ab9c632013-05-03 12:57:41 +03003900 intel_dp_set_idle_link_train(intel_dp);
3901
3902 intel_dp->DP = DP;
3903
Mika Kahola4e96c972015-04-29 09:17:39 +03003904 if (channel_eq) {
Mika Kahola5fa836a2015-04-29 09:17:40 +03003905 intel_dp->train_set_valid = true;
Masanari Iida07f42252013-03-20 11:00:34 +09003906 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
Mika Kahola4e96c972015-04-29 09:17:39 +03003907 }
Imre Deak3ab9c632013-05-03 12:57:41 +03003908}
3909
3910void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3911{
Jani Nikula70aff662013-09-27 15:10:44 +03003912 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
Imre Deak3ab9c632013-05-03 12:57:41 +03003913 DP_TRAINING_PATTERN_DISABLE);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003914}
3915
3916static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01003917intel_dp_link_down(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003918{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003919 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003920 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003921 enum port port = intel_dig_port->port;
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003922 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003923 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonea5b2132010-08-04 13:50:23 +01003924 uint32_t DP = intel_dp->DP;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003925
Daniel Vetterbc76e3202014-05-20 22:46:50 +02003926 if (WARN_ON(HAS_DDI(dev)))
Paulo Zanonic19b0662012-10-15 15:51:41 -03003927 return;
3928
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02003929 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
Chris Wilson1b39d6f2010-12-06 11:20:45 +00003930 return;
3931
Zhao Yakui28c97732009-10-09 11:39:41 +08003932 DRM_DEBUG_KMS("\n");
Zhenyu Wang32f9d652009-07-24 01:00:32 +08003933
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03003934 if ((IS_GEN7(dev) && port == PORT_A) ||
3935 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Zhenyu Wange3421a12010-04-08 09:43:27 +08003936 DP &= ~DP_LINK_TRAIN_MASK_CPT;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003937 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003938 } else {
Ville Syrjäläaad3d142014-06-28 02:04:25 +03003939 if (IS_CHERRYVIEW(dev))
3940 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3941 else
3942 DP &= ~DP_LINK_TRAIN_MASK;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003943 DP |= DP_LINK_TRAIN_PAT_IDLE;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003944 }
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003945 I915_WRITE(intel_dp->output_reg, DP);
Chris Wilsonfe255d02010-09-11 21:37:48 +01003946 POSTING_READ(intel_dp->output_reg);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08003947
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003948 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3949 I915_WRITE(intel_dp->output_reg, DP);
3950 POSTING_READ(intel_dp->output_reg);
3951
3952 /*
3953 * HW workaround for IBX, we need to move the port
3954 * to transcoder A after disabling it to allow the
3955 * matching HDMI port to be enabled on transcoder A.
3956 */
3957 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3958 /* always enable with pattern 1 (as per spec) */
3959 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3960 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3961 I915_WRITE(intel_dp->output_reg, DP);
3962 POSTING_READ(intel_dp->output_reg);
3963
3964 DP &= ~DP_PORT_EN;
Eric Anholt5bddd172010-11-18 09:32:59 +08003965 I915_WRITE(intel_dp->output_reg, DP);
Daniel Vetter0ca09682014-11-24 16:54:11 +01003966 POSTING_READ(intel_dp->output_reg);
Eric Anholt5bddd172010-11-18 09:32:59 +08003967 }
3968
Keith Packardf01eca22011-09-28 16:48:10 -07003969 msleep(intel_dp->panel_power_down_delay);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003970}
3971
Keith Packard26d61aa2011-07-25 20:01:09 -07003972static bool
3973intel_dp_get_dpcd(struct intel_dp *intel_dp)
Keith Packard92fd8fd2011-07-25 19:50:10 -07003974{
Rodrigo Vivia031d702013-10-03 16:15:06 -03003975 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3976 struct drm_device *dev = dig_port->base.base.dev;
3977 struct drm_i915_private *dev_priv = dev->dev_private;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303978 uint8_t rev;
Rodrigo Vivia031d702013-10-03 16:15:06 -03003979
Jani Nikula9d1a1032014-03-14 16:51:15 +02003980 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3981 sizeof(intel_dp->dpcd)) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003982 return false; /* aux transfer failed */
Keith Packard92fd8fd2011-07-25 19:50:10 -07003983
Andy Shevchenkoa8e98152014-09-01 14:12:01 +03003984 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
Damien Lespiau577c7a52012-12-13 16:09:02 +00003985
Adam Jacksonedb39242012-09-18 10:58:49 -04003986 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3987 return false; /* DPCD not present */
3988
Shobhit Kumar2293bb52013-07-11 18:44:56 -03003989 /* Check if the panel supports PSR */
3990 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
Jani Nikula50003932013-09-20 16:42:17 +03003991 if (is_edp(intel_dp)) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003992 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3993 intel_dp->psr_dpcd,
3994 sizeof(intel_dp->psr_dpcd));
Rodrigo Vivia031d702013-10-03 16:15:06 -03003995 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3996 dev_priv->psr.sink_support = true;
Jani Nikula50003932013-09-20 16:42:17 +03003997 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
Rodrigo Vivia031d702013-10-03 16:15:06 -03003998 }
Sonika Jindal474d1ec2015-04-02 11:02:44 +05303999
4000 if (INTEL_INFO(dev)->gen >= 9 &&
4001 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4002 uint8_t frame_sync_cap;
4003
4004 dev_priv->psr.sink_support = true;
4005 intel_dp_dpcd_read_wake(&intel_dp->aux,
4006 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4007 &frame_sync_cap, 1);
4008 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4009 /* PSR2 needs frame sync as well */
4010 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4011 DRM_DEBUG_KMS("PSR2 %s on sink",
4012 dev_priv->psr.psr2_support ? "supported" : "not supported");
4013 }
Jani Nikula50003932013-09-20 16:42:17 +03004014 }
4015
Jani Nikulabc5133d2015-09-03 11:16:07 +03004016 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03004017 yesno(intel_dp_source_supports_hbr2(dev)),
4018 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
Todd Previte06ea66b2014-01-20 10:19:39 -07004019
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05304020 /* Intermediate frequency support */
4021 if (is_edp(intel_dp) &&
4022 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4023 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4024 (rev >= 0x03)) { /* eDp v1.4 or higher */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02004025 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02004026 int i;
4027
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05304028 intel_dp_dpcd_read_wake(&intel_dp->aux,
4029 DP_SUPPORTED_LINK_RATES,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02004030 sink_rates,
4031 sizeof(sink_rates));
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02004032
Ville Syrjälä94ca7192015-03-13 19:40:31 +02004033 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4034 int val = le16_to_cpu(sink_rates[i]);
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02004035
4036 if (val == 0)
4037 break;
4038
Sonika Jindalaf77b972015-05-07 13:59:28 +05304039 /* Value read is in kHz while drm clock is saved in deca-kHz */
4040 intel_dp->sink_rates[i] = (val * 200) / 10;
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02004041 }
Ville Syrjälä94ca7192015-03-13 19:40:31 +02004042 intel_dp->num_sink_rates = i;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05304043 }
Ville Syrjälä0336400e2015-03-12 17:10:39 +02004044
4045 intel_dp_print_rates(intel_dp);
4046
Adam Jacksonedb39242012-09-18 10:58:49 -04004047 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4048 DP_DWN_STRM_PORT_PRESENT))
4049 return true; /* native DP sink */
4050
4051 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4052 return true; /* no per-port downstream info */
4053
Jani Nikula9d1a1032014-03-14 16:51:15 +02004054 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4055 intel_dp->downstream_ports,
4056 DP_MAX_DOWNSTREAM_PORTS) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04004057 return false; /* downstream port status fetch failed */
4058
4059 return true;
Keith Packard92fd8fd2011-07-25 19:50:10 -07004060}
4061
Adam Jackson0d198322012-05-14 16:05:47 -04004062static void
4063intel_dp_probe_oui(struct intel_dp *intel_dp)
4064{
4065 u8 buf[3];
4066
4067 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4068 return;
4069
Jani Nikula9d1a1032014-03-14 16:51:15 +02004070 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04004071 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4072 buf[0], buf[1], buf[2]);
4073
Jani Nikula9d1a1032014-03-14 16:51:15 +02004074 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04004075 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4076 buf[0], buf[1], buf[2]);
4077}
4078
Dave Airlie0e32b392014-05-02 14:02:48 +10004079static bool
4080intel_dp_probe_mst(struct intel_dp *intel_dp)
4081{
4082 u8 buf[1];
4083
4084 if (!intel_dp->can_mst)
4085 return false;
4086
4087 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4088 return false;
4089
Dave Airlie0e32b392014-05-02 14:02:48 +10004090 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4091 if (buf[0] & DP_MST_CAP) {
4092 DRM_DEBUG_KMS("Sink is MST capable\n");
4093 intel_dp->is_mst = true;
4094 } else {
4095 DRM_DEBUG_KMS("Sink is not MST capable\n");
4096 intel_dp->is_mst = false;
4097 }
4098 }
Dave Airlie0e32b392014-05-02 14:02:48 +10004099
4100 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4101 return intel_dp->is_mst;
4102}
4103
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004104static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004105{
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004106 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4107 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004108 u8 buf;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004109 int ret = 0;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004110
4111 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004112 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004113 ret = -EIO;
4114 goto out;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004115 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004116
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004117 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004118 buf & ~DP_TEST_SINK_START) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004119 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004120 ret = -EIO;
4121 goto out;
4122 }
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004123
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004124 intel_dp->sink_crc.started = false;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004125 out:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004126 hsw_enable_ips(intel_crtc);
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004127 return ret;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004128}
4129
4130static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4131{
4132 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4133 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4134 u8 buf;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004135 int ret;
4136
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004137 if (intel_dp->sink_crc.started) {
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004138 ret = intel_dp_sink_crc_stop(intel_dp);
4139 if (ret)
4140 return ret;
4141 }
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004142
4143 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4144 return -EIO;
4145
4146 if (!(buf & DP_TEST_CRC_SUPPORTED))
4147 return -ENOTTY;
4148
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004149 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4150
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004151 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4152 return -EIO;
4153
4154 hsw_disable_ips(intel_crtc);
4155
4156 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4157 buf | DP_TEST_SINK_START) < 0) {
4158 hsw_enable_ips(intel_crtc);
4159 return -EIO;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004160 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004161
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004162 intel_dp->sink_crc.started = true;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004163 return 0;
4164}
4165
4166int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4167{
4168 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4169 struct drm_device *dev = dig_port->base.base.dev;
4170 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4171 u8 buf;
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004172 int count, ret;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004173 int attempts = 6;
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07004174 bool old_equal_new;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004175
4176 ret = intel_dp_sink_crc_start(intel_dp);
4177 if (ret)
4178 return ret;
4179
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004180 do {
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004181 intel_wait_for_vblank(dev, intel_crtc->pipe);
4182
Rodrigo Vivi1dda5f92014-10-01 07:32:37 -07004183 if (drm_dp_dpcd_readb(&intel_dp->aux,
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004184 DP_TEST_SINK_MISC, &buf) < 0) {
4185 ret = -EIO;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004186 goto stop;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004187 }
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004188 count = buf & DP_TEST_COUNT_MASK;
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07004189
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004190 /*
4191 * Count might be reset during the loop. In this case
4192 * last known count needs to be reset as well.
4193 */
4194 if (count == 0)
4195 intel_dp->sink_crc.last_count = 0;
4196
4197 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4198 ret = -EIO;
4199 goto stop;
4200 }
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07004201
4202 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4203 !memcmp(intel_dp->sink_crc.last_crc, crc,
4204 6 * sizeof(u8)));
4205
4206 } while (--attempts && (count == 0 || old_equal_new));
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004207
4208 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4209 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004210
4211 if (attempts == 0) {
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07004212 if (old_equal_new) {
4213 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4214 } else {
4215 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4216 ret = -ETIMEDOUT;
4217 goto stop;
4218 }
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004219 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004220
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004221stop:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004222 intel_dp_sink_crc_stop(intel_dp);
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004223 return ret;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004224}
4225
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004226static bool
4227intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4228{
Jani Nikula9d1a1032014-03-14 16:51:15 +02004229 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4230 DP_DEVICE_SERVICE_IRQ_VECTOR,
4231 sink_irq_vector, 1) == 1;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004232}
4233
Dave Airlie0e32b392014-05-02 14:02:48 +10004234static bool
4235intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4236{
4237 int ret;
4238
4239 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4240 DP_SINK_COUNT_ESI,
4241 sink_irq_vector, 14);
4242 if (ret != 14)
4243 return false;
4244
4245 return true;
4246}
4247
Todd Previtec5d5ab72015-04-15 08:38:38 -07004248static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004249{
Todd Previtec5d5ab72015-04-15 08:38:38 -07004250 uint8_t test_result = DP_TEST_ACK;
4251 return test_result;
4252}
4253
4254static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4255{
4256 uint8_t test_result = DP_TEST_NAK;
4257 return test_result;
4258}
4259
4260static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4261{
4262 uint8_t test_result = DP_TEST_NAK;
Todd Previte559be302015-05-04 07:48:20 -07004263 struct intel_connector *intel_connector = intel_dp->attached_connector;
4264 struct drm_connector *connector = &intel_connector->base;
4265
4266 if (intel_connector->detect_edid == NULL ||
Daniel Vetterac6f2e22015-05-08 16:15:41 +02004267 connector->edid_corrupt ||
Todd Previte559be302015-05-04 07:48:20 -07004268 intel_dp->aux.i2c_defer_count > 6) {
4269 /* Check EDID read for NACKs, DEFERs and corruption
4270 * (DP CTS 1.2 Core r1.1)
4271 * 4.2.2.4 : Failed EDID read, I2C_NAK
4272 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4273 * 4.2.2.6 : EDID corruption detected
4274 * Use failsafe mode for all cases
4275 */
4276 if (intel_dp->aux.i2c_nack_count > 0 ||
4277 intel_dp->aux.i2c_defer_count > 0)
4278 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4279 intel_dp->aux.i2c_nack_count,
4280 intel_dp->aux.i2c_defer_count);
4281 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4282 } else {
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304283 struct edid *block = intel_connector->detect_edid;
4284
4285 /* We have to write the checksum
4286 * of the last block read
4287 */
4288 block += intel_connector->detect_edid->extensions;
4289
Todd Previte559be302015-05-04 07:48:20 -07004290 if (!drm_dp_dpcd_write(&intel_dp->aux,
4291 DP_TEST_EDID_CHECKSUM,
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304292 &block->checksum,
Dan Carpenter5a1cc652015-05-12 21:07:37 +03004293 1))
Todd Previte559be302015-05-04 07:48:20 -07004294 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4295
4296 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4297 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4298 }
4299
4300 /* Set test active flag here so userspace doesn't interrupt things */
4301 intel_dp->compliance_test_active = 1;
4302
Todd Previtec5d5ab72015-04-15 08:38:38 -07004303 return test_result;
4304}
4305
4306static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4307{
4308 uint8_t test_result = DP_TEST_NAK;
4309 return test_result;
4310}
4311
4312static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4313{
4314 uint8_t response = DP_TEST_NAK;
4315 uint8_t rxdata = 0;
4316 int status = 0;
4317
Todd Previte559be302015-05-04 07:48:20 -07004318 intel_dp->compliance_test_active = 0;
Todd Previtec5d5ab72015-04-15 08:38:38 -07004319 intel_dp->compliance_test_type = 0;
Todd Previte559be302015-05-04 07:48:20 -07004320 intel_dp->compliance_test_data = 0;
4321
Todd Previtec5d5ab72015-04-15 08:38:38 -07004322 intel_dp->aux.i2c_nack_count = 0;
4323 intel_dp->aux.i2c_defer_count = 0;
4324
4325 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4326 if (status <= 0) {
4327 DRM_DEBUG_KMS("Could not read test request from sink\n");
4328 goto update_status;
4329 }
4330
4331 switch (rxdata) {
4332 case DP_TEST_LINK_TRAINING:
4333 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4334 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4335 response = intel_dp_autotest_link_training(intel_dp);
4336 break;
4337 case DP_TEST_LINK_VIDEO_PATTERN:
4338 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4339 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4340 response = intel_dp_autotest_video_pattern(intel_dp);
4341 break;
4342 case DP_TEST_LINK_EDID_READ:
4343 DRM_DEBUG_KMS("EDID test requested\n");
4344 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4345 response = intel_dp_autotest_edid(intel_dp);
4346 break;
4347 case DP_TEST_LINK_PHY_TEST_PATTERN:
4348 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4349 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4350 response = intel_dp_autotest_phy_pattern(intel_dp);
4351 break;
4352 default:
4353 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4354 break;
4355 }
4356
4357update_status:
4358 status = drm_dp_dpcd_write(&intel_dp->aux,
4359 DP_TEST_RESPONSE,
4360 &response, 1);
4361 if (status <= 0)
4362 DRM_DEBUG_KMS("Could not write test response to sink\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004363}
4364
Dave Airlie0e32b392014-05-02 14:02:48 +10004365static int
4366intel_dp_check_mst_status(struct intel_dp *intel_dp)
4367{
4368 bool bret;
4369
4370 if (intel_dp->is_mst) {
4371 u8 esi[16] = { 0 };
4372 int ret = 0;
4373 int retry;
4374 bool handled;
4375 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4376go_again:
4377 if (bret == true) {
4378
4379 /* check link status - esi[10] = 0x200c */
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03004380 if (intel_dp->active_mst_links &&
Ville Syrjälä901c2da2015-08-17 18:05:12 +03004381 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
Dave Airlie0e32b392014-05-02 14:02:48 +10004382 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4383 intel_dp_start_link_train(intel_dp);
4384 intel_dp_complete_link_train(intel_dp);
4385 intel_dp_stop_link_train(intel_dp);
4386 }
4387
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004388 DRM_DEBUG_KMS("got esi %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004389 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4390
4391 if (handled) {
4392 for (retry = 0; retry < 3; retry++) {
4393 int wret;
4394 wret = drm_dp_dpcd_write(&intel_dp->aux,
4395 DP_SINK_COUNT_ESI+1,
4396 &esi[1], 3);
4397 if (wret == 3) {
4398 break;
4399 }
4400 }
4401
4402 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4403 if (bret == true) {
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004404 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004405 goto go_again;
4406 }
4407 } else
4408 ret = 0;
4409
4410 return ret;
4411 } else {
4412 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4413 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4414 intel_dp->is_mst = false;
4415 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4416 /* send a hotplug event */
4417 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4418 }
4419 }
4420 return -EINVAL;
4421}
4422
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004423/*
4424 * According to DP spec
4425 * 5.1.2:
4426 * 1. Read DPCD
4427 * 2. Configure link according to Receiver Capabilities
4428 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4429 * 4. Check link status on receipt of hot-plug interrupt
4430 */
Damien Lespiaua5146202015-02-10 19:32:22 +00004431static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01004432intel_dp_check_link_status(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004433{
Dave Airlie5b215bc2014-08-05 10:40:20 +10004434 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004435 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004436 u8 sink_irq_vector;
Keith Packard93f62da2011-11-01 19:45:03 -07004437 u8 link_status[DP_LINK_STATUS_SIZE];
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004438
Dave Airlie5b215bc2014-08-05 10:40:20 +10004439 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4440
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02004441 if (!intel_encoder->base.crtc)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004442 return;
4443
Imre Deak1a125d82014-08-18 14:42:46 +03004444 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4445 return;
4446
Keith Packard92fd8fd2011-07-25 19:50:10 -07004447 /* Try to read receiver status if the link appears to be up */
Keith Packard93f62da2011-11-01 19:45:03 -07004448 if (!intel_dp_get_link_status(intel_dp, link_status)) {
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004449 return;
4450 }
4451
Keith Packard92fd8fd2011-07-25 19:50:10 -07004452 /* Now read the DPCD to see if it's actually running */
Keith Packard26d61aa2011-07-25 20:01:09 -07004453 if (!intel_dp_get_dpcd(intel_dp)) {
Jesse Barnes59cd09e2011-07-07 11:10:59 -07004454 return;
4455 }
4456
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004457 /* Try to read the source of the interrupt */
4458 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4459 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4460 /* Clear interrupt source */
Jani Nikula9d1a1032014-03-14 16:51:15 +02004461 drm_dp_dpcd_writeb(&intel_dp->aux,
4462 DP_DEVICE_SERVICE_IRQ_VECTOR,
4463 sink_irq_vector);
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004464
4465 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
Todd Previte09b1eb12015-04-20 15:27:34 -07004466 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004467 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4468 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4469 }
4470
Ville Syrjälä901c2da2015-08-17 18:05:12 +03004471 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
Keith Packard92fd8fd2011-07-25 19:50:10 -07004472 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
Jani Nikula8e329a02014-06-03 14:56:21 +03004473 intel_encoder->base.name);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004474 intel_dp_start_link_train(intel_dp);
4475 intel_dp_complete_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03004476 intel_dp_stop_link_train(intel_dp);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004477 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004478}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004479
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004480/* XXX this is probably wrong for multiple downstream ports */
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004481static enum drm_connector_status
Keith Packard26d61aa2011-07-25 20:01:09 -07004482intel_dp_detect_dpcd(struct intel_dp *intel_dp)
Adam Jackson71ba90002011-07-12 17:38:04 -04004483{
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004484 uint8_t *dpcd = intel_dp->dpcd;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004485 uint8_t type;
4486
4487 if (!intel_dp_get_dpcd(intel_dp))
4488 return connector_status_disconnected;
4489
4490 /* if there's no downstream port, we're done */
4491 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
Keith Packard26d61aa2011-07-25 20:01:09 -07004492 return connector_status_connected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004493
4494 /* If we're HPD-aware, SINK_COUNT changes dynamically */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004495 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4496 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
Adam Jackson23235172012-09-20 16:42:45 -04004497 uint8_t reg;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004498
4499 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4500 &reg, 1) < 0)
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004501 return connector_status_unknown;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004502
Adam Jackson23235172012-09-20 16:42:45 -04004503 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4504 : connector_status_disconnected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004505 }
4506
4507 /* If no HPD, poke DDC gently */
Jani Nikula0b998362014-03-14 16:51:17 +02004508 if (drm_probe_ddc(&intel_dp->aux.ddc))
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004509 return connector_status_connected;
4510
4511 /* Well we tried, say unknown for unreliable port types */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004512 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4513 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4514 if (type == DP_DS_PORT_TYPE_VGA ||
4515 type == DP_DS_PORT_TYPE_NON_EDID)
4516 return connector_status_unknown;
4517 } else {
4518 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4519 DP_DWN_STRM_PORT_TYPE_MASK;
4520 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4521 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4522 return connector_status_unknown;
4523 }
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004524
4525 /* Anything else is out of spec, warn and ignore */
4526 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
Keith Packard26d61aa2011-07-25 20:01:09 -07004527 return connector_status_disconnected;
Adam Jackson71ba90002011-07-12 17:38:04 -04004528}
4529
4530static enum drm_connector_status
Chris Wilsond410b562014-09-02 20:03:59 +01004531edp_detect(struct intel_dp *intel_dp)
4532{
4533 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4534 enum drm_connector_status status;
4535
4536 status = intel_panel_detect(dev);
4537 if (status == connector_status_unknown)
4538 status = connector_status_connected;
4539
4540 return status;
4541}
4542
Jani Nikulab93433c2015-08-20 10:47:36 +03004543static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4544 struct intel_digital_port *port)
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004545{
Jani Nikulab93433c2015-08-20 10:47:36 +03004546 u32 bit;
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07004547
Jani Nikula0df53b72015-08-20 10:47:40 +03004548 switch (port->port) {
4549 case PORT_A:
4550 return true;
4551 case PORT_B:
4552 bit = SDE_PORTB_HOTPLUG;
4553 break;
4554 case PORT_C:
4555 bit = SDE_PORTC_HOTPLUG;
4556 break;
4557 case PORT_D:
4558 bit = SDE_PORTD_HOTPLUG;
4559 break;
4560 default:
4561 MISSING_CASE(port->port);
4562 return false;
4563 }
4564
4565 return I915_READ(SDEISR) & bit;
4566}
4567
4568static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4569 struct intel_digital_port *port)
4570{
4571 u32 bit;
4572
4573 switch (port->port) {
4574 case PORT_A:
4575 return true;
4576 case PORT_B:
4577 bit = SDE_PORTB_HOTPLUG_CPT;
4578 break;
4579 case PORT_C:
4580 bit = SDE_PORTC_HOTPLUG_CPT;
4581 break;
4582 case PORT_D:
4583 bit = SDE_PORTD_HOTPLUG_CPT;
4584 break;
4585 default:
4586 MISSING_CASE(port->port);
4587 return false;
Jani Nikulab93433c2015-08-20 10:47:36 +03004588 }
Damien Lespiau1b469632012-12-13 16:09:01 +00004589
Jani Nikulab93433c2015-08-20 10:47:36 +03004590 return I915_READ(SDEISR) & bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004591}
4592
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004593static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
Jani Nikula1d245982015-08-20 10:47:37 +03004594 struct intel_digital_port *port)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004595{
Jani Nikula9642c812015-08-20 10:47:41 +03004596 u32 bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004597
Jani Nikula9642c812015-08-20 10:47:41 +03004598 switch (port->port) {
4599 case PORT_B:
4600 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4601 break;
4602 case PORT_C:
4603 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4604 break;
4605 case PORT_D:
4606 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4607 break;
4608 default:
4609 MISSING_CASE(port->port);
4610 return false;
4611 }
4612
4613 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4614}
4615
4616static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4617 struct intel_digital_port *port)
4618{
4619 u32 bit;
4620
4621 switch (port->port) {
4622 case PORT_B:
4623 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4624 break;
4625 case PORT_C:
4626 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4627 break;
4628 case PORT_D:
4629 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4630 break;
4631 default:
4632 MISSING_CASE(port->port);
4633 return false;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004634 }
4635
Jani Nikula1d245982015-08-20 10:47:37 +03004636 return I915_READ(PORT_HOTPLUG_STAT) & bit;
Dave Airlie2a592be2014-09-01 16:58:12 +10004637}
4638
Jani Nikulae464bfd2015-08-20 10:47:42 +03004639static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304640 struct intel_digital_port *intel_dig_port)
Jani Nikulae464bfd2015-08-20 10:47:42 +03004641{
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304642 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4643 enum port port;
Jani Nikulae464bfd2015-08-20 10:47:42 +03004644 u32 bit;
4645
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304646 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4647 switch (port) {
Jani Nikulae464bfd2015-08-20 10:47:42 +03004648 case PORT_A:
4649 bit = BXT_DE_PORT_HP_DDIA;
4650 break;
4651 case PORT_B:
4652 bit = BXT_DE_PORT_HP_DDIB;
4653 break;
4654 case PORT_C:
4655 bit = BXT_DE_PORT_HP_DDIC;
4656 break;
4657 default:
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304658 MISSING_CASE(port);
Jani Nikulae464bfd2015-08-20 10:47:42 +03004659 return false;
4660 }
4661
4662 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4663}
4664
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004665/*
4666 * intel_digital_port_connected - is the specified port connected?
4667 * @dev_priv: i915 private structure
4668 * @port: the port to test
4669 *
4670 * Return %true if @port is connected, %false otherwise.
4671 */
4672static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4673 struct intel_digital_port *port)
4674{
Jani Nikula0df53b72015-08-20 10:47:40 +03004675 if (HAS_PCH_IBX(dev_priv))
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004676 return ibx_digital_port_connected(dev_priv, port);
Jani Nikula0df53b72015-08-20 10:47:40 +03004677 if (HAS_PCH_SPLIT(dev_priv))
4678 return cpt_digital_port_connected(dev_priv, port);
Jani Nikulae464bfd2015-08-20 10:47:42 +03004679 else if (IS_BROXTON(dev_priv))
4680 return bxt_digital_port_connected(dev_priv, port);
Jani Nikula9642c812015-08-20 10:47:41 +03004681 else if (IS_VALLEYVIEW(dev_priv))
4682 return vlv_digital_port_connected(dev_priv, port);
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004683 else
4684 return g4x_digital_port_connected(dev_priv, port);
4685}
4686
Dave Airlie2a592be2014-09-01 16:58:12 +10004687static enum drm_connector_status
Jani Nikulab93433c2015-08-20 10:47:36 +03004688ironlake_dp_detect(struct intel_dp *intel_dp)
4689{
4690 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4691 struct drm_i915_private *dev_priv = dev->dev_private;
4692 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4693
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004694 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
Jani Nikulab93433c2015-08-20 10:47:36 +03004695 return connector_status_disconnected;
4696
4697 return intel_dp_detect_dpcd(intel_dp);
4698}
4699
4700static enum drm_connector_status
Dave Airlie2a592be2014-09-01 16:58:12 +10004701g4x_dp_detect(struct intel_dp *intel_dp)
4702{
4703 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Dave Airlie2a592be2014-09-01 16:58:12 +10004705
4706 /* Can't disconnect eDP, but you can close the lid... */
4707 if (is_edp(intel_dp)) {
4708 enum drm_connector_status status;
4709
4710 status = intel_panel_detect(dev);
4711 if (status == connector_status_unknown)
4712 status = connector_status_connected;
4713 return status;
4714 }
4715
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004716 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004717 return connector_status_disconnected;
4718
Keith Packard26d61aa2011-07-25 20:01:09 -07004719 return intel_dp_detect_dpcd(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004720}
4721
Keith Packard8c241fe2011-09-28 16:38:44 -07004722static struct edid *
Chris Wilsonbeb60602014-09-02 20:04:00 +01004723intel_dp_get_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004724{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004725 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packard8c241fe2011-09-28 16:38:44 -07004726
Jani Nikula9cd300e2012-10-19 14:51:52 +03004727 /* use cached edid if we have one */
4728 if (intel_connector->edid) {
Jani Nikula9cd300e2012-10-19 14:51:52 +03004729 /* invalid edid */
4730 if (IS_ERR(intel_connector->edid))
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004731 return NULL;
4732
Jani Nikula55e9ede2013-10-01 10:38:54 +03004733 return drm_edid_duplicate(intel_connector->edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004734 } else
4735 return drm_get_edid(&intel_connector->base,
4736 &intel_dp->aux.ddc);
Keith Packard8c241fe2011-09-28 16:38:44 -07004737}
4738
Chris Wilsonbeb60602014-09-02 20:04:00 +01004739static void
4740intel_dp_set_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004741{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004742 struct intel_connector *intel_connector = intel_dp->attached_connector;
4743 struct edid *edid;
Keith Packard8c241fe2011-09-28 16:38:44 -07004744
Chris Wilsonbeb60602014-09-02 20:04:00 +01004745 edid = intel_dp_get_edid(intel_dp);
4746 intel_connector->detect_edid = edid;
Jani Nikula9cd300e2012-10-19 14:51:52 +03004747
Chris Wilsonbeb60602014-09-02 20:04:00 +01004748 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4749 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4750 else
4751 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4752}
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004753
Chris Wilsonbeb60602014-09-02 20:04:00 +01004754static void
4755intel_dp_unset_edid(struct intel_dp *intel_dp)
4756{
4757 struct intel_connector *intel_connector = intel_dp->attached_connector;
4758
4759 kfree(intel_connector->detect_edid);
4760 intel_connector->detect_edid = NULL;
4761
4762 intel_dp->has_audio = false;
4763}
4764
4765static enum intel_display_power_domain
4766intel_dp_power_get(struct intel_dp *dp)
4767{
4768 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4769 enum intel_display_power_domain power_domain;
4770
4771 power_domain = intel_display_port_power_domain(encoder);
4772 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4773
4774 return power_domain;
4775}
4776
4777static void
4778intel_dp_power_put(struct intel_dp *dp,
4779 enum intel_display_power_domain power_domain)
4780{
4781 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4782 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
Keith Packard8c241fe2011-09-28 16:38:44 -07004783}
4784
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004785static enum drm_connector_status
4786intel_dp_detect(struct drm_connector *connector, bool force)
4787{
4788 struct intel_dp *intel_dp = intel_attached_dp(connector);
Paulo Zanonid63885d2012-10-26 19:05:49 -02004789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4790 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004791 struct drm_device *dev = connector->dev;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004792 enum drm_connector_status status;
Imre Deak671dedd2014-03-05 16:20:53 +02004793 enum intel_display_power_domain power_domain;
Dave Airlie0e32b392014-05-02 14:02:48 +10004794 bool ret;
Todd Previte09b1eb12015-04-20 15:27:34 -07004795 u8 sink_irq_vector;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004796
Chris Wilson164c8592013-07-20 20:27:08 +01004797 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03004798 connector->base.id, connector->name);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004799 intel_dp_unset_edid(intel_dp);
Chris Wilson164c8592013-07-20 20:27:08 +01004800
Dave Airlie0e32b392014-05-02 14:02:48 +10004801 if (intel_dp->is_mst) {
4802 /* MST devices are disconnected from a monitor POV */
4803 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4804 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004805 return connector_status_disconnected;
Dave Airlie0e32b392014-05-02 14:02:48 +10004806 }
4807
Chris Wilsonbeb60602014-09-02 20:04:00 +01004808 power_domain = intel_dp_power_get(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004809
Chris Wilsond410b562014-09-02 20:03:59 +01004810 /* Can't disconnect eDP, but you can close the lid... */
4811 if (is_edp(intel_dp))
4812 status = edp_detect(intel_dp);
4813 else if (HAS_PCH_SPLIT(dev))
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004814 status = ironlake_dp_detect(intel_dp);
4815 else
4816 status = g4x_dp_detect(intel_dp);
4817 if (status != connector_status_connected)
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004818 goto out;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004819
Adam Jackson0d198322012-05-14 16:05:47 -04004820 intel_dp_probe_oui(intel_dp);
4821
Dave Airlie0e32b392014-05-02 14:02:48 +10004822 ret = intel_dp_probe_mst(intel_dp);
4823 if (ret) {
4824 /* if we are in MST mode then this connector
4825 won't appear connected or have anything with EDID on it */
4826 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4827 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4828 status = connector_status_disconnected;
4829 goto out;
4830 }
4831
Chris Wilsonbeb60602014-09-02 20:04:00 +01004832 intel_dp_set_edid(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004833
Paulo Zanonid63885d2012-10-26 19:05:49 -02004834 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4835 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004836 status = connector_status_connected;
4837
Todd Previte09b1eb12015-04-20 15:27:34 -07004838 /* Try to read the source of the interrupt */
4839 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4840 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4841 /* Clear interrupt source */
4842 drm_dp_dpcd_writeb(&intel_dp->aux,
4843 DP_DEVICE_SERVICE_IRQ_VECTOR,
4844 sink_irq_vector);
4845
4846 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4847 intel_dp_handle_test_request(intel_dp);
4848 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4849 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4850 }
4851
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004852out:
Chris Wilsonbeb60602014-09-02 20:04:00 +01004853 intel_dp_power_put(intel_dp, power_domain);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004854 return status;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004855}
4856
Chris Wilsonbeb60602014-09-02 20:04:00 +01004857static void
4858intel_dp_force(struct drm_connector *connector)
4859{
4860 struct intel_dp *intel_dp = intel_attached_dp(connector);
4861 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4862 enum intel_display_power_domain power_domain;
4863
4864 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4865 connector->base.id, connector->name);
4866 intel_dp_unset_edid(intel_dp);
4867
4868 if (connector->status != connector_status_connected)
4869 return;
4870
4871 power_domain = intel_dp_power_get(intel_dp);
4872
4873 intel_dp_set_edid(intel_dp);
4874
4875 intel_dp_power_put(intel_dp, power_domain);
4876
4877 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4878 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4879}
4880
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004881static int intel_dp_get_modes(struct drm_connector *connector)
4882{
Jani Nikuladd06f902012-10-19 14:51:50 +03004883 struct intel_connector *intel_connector = to_intel_connector(connector);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004884 struct edid *edid;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004885
Chris Wilsonbeb60602014-09-02 20:04:00 +01004886 edid = intel_connector->detect_edid;
4887 if (edid) {
4888 int ret = intel_connector_update_modes(connector, edid);
4889 if (ret)
4890 return ret;
4891 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004892
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004893 /* if eDP has no EDID, fall back to fixed mode */
Chris Wilsonbeb60602014-09-02 20:04:00 +01004894 if (is_edp(intel_attached_dp(connector)) &&
4895 intel_connector->panel.fixed_mode) {
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004896 struct drm_display_mode *mode;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004897
4898 mode = drm_mode_duplicate(connector->dev,
Jani Nikuladd06f902012-10-19 14:51:50 +03004899 intel_connector->panel.fixed_mode);
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004900 if (mode) {
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004901 drm_mode_probed_add(connector, mode);
4902 return 1;
4903 }
4904 }
Chris Wilsonbeb60602014-09-02 20:04:00 +01004905
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004906 return 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004907}
4908
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004909static bool
4910intel_dp_detect_audio(struct drm_connector *connector)
4911{
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004912 bool has_audio = false;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004913 struct edid *edid;
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004914
Chris Wilsonbeb60602014-09-02 20:04:00 +01004915 edid = to_intel_connector(connector)->detect_edid;
4916 if (edid)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004917 has_audio = drm_detect_monitor_audio(edid);
Imre Deak671dedd2014-03-05 16:20:53 +02004918
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004919 return has_audio;
4920}
4921
Chris Wilsonf6849602010-09-19 09:29:33 +01004922static int
4923intel_dp_set_property(struct drm_connector *connector,
4924 struct drm_property *property,
4925 uint64_t val)
4926{
Chris Wilsone953fd72011-02-21 22:23:52 +00004927 struct drm_i915_private *dev_priv = connector->dev->dev_private;
Yuly Novikov53b41832012-10-26 12:04:00 +03004928 struct intel_connector *intel_connector = to_intel_connector(connector);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004929 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4930 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonf6849602010-09-19 09:29:33 +01004931 int ret;
4932
Rob Clark662595d2012-10-11 20:36:04 -05004933 ret = drm_object_property_set_value(&connector->base, property, val);
Chris Wilsonf6849602010-09-19 09:29:33 +01004934 if (ret)
4935 return ret;
4936
Chris Wilson3f43c482011-05-12 22:17:24 +01004937 if (property == dev_priv->force_audio_property) {
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004938 int i = val;
4939 bool has_audio;
4940
4941 if (i == intel_dp->force_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004942 return 0;
4943
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004944 intel_dp->force_audio = i;
Chris Wilsonf6849602010-09-19 09:29:33 +01004945
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004946 if (i == HDMI_AUDIO_AUTO)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004947 has_audio = intel_dp_detect_audio(connector);
4948 else
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004949 has_audio = (i == HDMI_AUDIO_ON);
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004950
4951 if (has_audio == intel_dp->has_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004952 return 0;
4953
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004954 intel_dp->has_audio = has_audio;
Chris Wilsonf6849602010-09-19 09:29:33 +01004955 goto done;
4956 }
4957
Chris Wilsone953fd72011-02-21 22:23:52 +00004958 if (property == dev_priv->broadcast_rgb_property) {
Daniel Vetterae4edb82013-04-22 17:07:23 +02004959 bool old_auto = intel_dp->color_range_auto;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004960 bool old_range = intel_dp->limited_color_range;
Daniel Vetterae4edb82013-04-22 17:07:23 +02004961
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004962 switch (val) {
4963 case INTEL_BROADCAST_RGB_AUTO:
4964 intel_dp->color_range_auto = true;
4965 break;
4966 case INTEL_BROADCAST_RGB_FULL:
4967 intel_dp->color_range_auto = false;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004968 intel_dp->limited_color_range = false;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004969 break;
4970 case INTEL_BROADCAST_RGB_LIMITED:
4971 intel_dp->color_range_auto = false;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004972 intel_dp->limited_color_range = true;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004973 break;
4974 default:
4975 return -EINVAL;
4976 }
Daniel Vetterae4edb82013-04-22 17:07:23 +02004977
4978 if (old_auto == intel_dp->color_range_auto &&
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004979 old_range == intel_dp->limited_color_range)
Daniel Vetterae4edb82013-04-22 17:07:23 +02004980 return 0;
4981
Chris Wilsone953fd72011-02-21 22:23:52 +00004982 goto done;
4983 }
4984
Yuly Novikov53b41832012-10-26 12:04:00 +03004985 if (is_edp(intel_dp) &&
4986 property == connector->dev->mode_config.scaling_mode_property) {
4987 if (val == DRM_MODE_SCALE_NONE) {
4988 DRM_DEBUG_KMS("no scaling not supported\n");
4989 return -EINVAL;
4990 }
4991
4992 if (intel_connector->panel.fitting_mode == val) {
4993 /* the eDP scaling property is not changed */
4994 return 0;
4995 }
4996 intel_connector->panel.fitting_mode = val;
4997
4998 goto done;
4999 }
5000
Chris Wilsonf6849602010-09-19 09:29:33 +01005001 return -EINVAL;
5002
5003done:
Chris Wilsonc0c36b942012-12-19 16:08:43 +00005004 if (intel_encoder->base.crtc)
5005 intel_crtc_restore_mode(intel_encoder->base.crtc);
Chris Wilsonf6849602010-09-19 09:29:33 +01005006
5007 return 0;
5008}
5009
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005010static void
Paulo Zanoni73845ad2013-06-12 17:27:30 -03005011intel_dp_connector_destroy(struct drm_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005012{
Jani Nikula1d508702012-10-19 14:51:49 +03005013 struct intel_connector *intel_connector = to_intel_connector(connector);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02005014
Chris Wilson10e972d2014-09-04 21:43:45 +01005015 kfree(intel_connector->detect_edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01005016
Jani Nikula9cd300e2012-10-19 14:51:52 +03005017 if (!IS_ERR_OR_NULL(intel_connector->edid))
5018 kfree(intel_connector->edid);
5019
Paulo Zanoniacd8db102013-06-12 17:27:23 -03005020 /* Can't call is_edp() since the encoder may have been destroyed
5021 * already. */
5022 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
Jani Nikula1d508702012-10-19 14:51:49 +03005023 intel_panel_fini(&intel_connector->panel);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02005024
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005025 drm_connector_cleanup(connector);
Zhenyu Wang55f78c42010-03-29 16:13:57 +08005026 kfree(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005027}
5028
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005029void intel_dp_encoder_destroy(struct drm_encoder *encoder)
Daniel Vetter24d05922010-08-20 18:08:28 +02005030{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02005031 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5032 struct intel_dp *intel_dp = &intel_dig_port->dp;
Daniel Vetter24d05922010-08-20 18:08:28 +02005033
Dave Airlie4f71d0c2014-06-04 16:02:28 +10005034 drm_dp_aux_unregister(&intel_dp->aux);
Dave Airlie0e32b392014-05-02 14:02:48 +10005035 intel_dp_mst_encoder_cleanup(intel_dig_port);
Keith Packardbd943152011-09-18 23:09:52 -07005036 if (is_edp(intel_dp)) {
5037 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03005038 /*
5039 * vdd might still be enabled do to the delayed vdd off.
5040 * Make sure vdd is actually turned off here.
5041 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005042 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01005043 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005044 pps_unlock(intel_dp);
5045
Clint Taylor01527b32014-07-07 13:01:46 -07005046 if (intel_dp->edp_notifier.notifier_call) {
5047 unregister_reboot_notifier(&intel_dp->edp_notifier);
5048 intel_dp->edp_notifier.notifier_call = NULL;
5049 }
Keith Packardbd943152011-09-18 23:09:52 -07005050 }
Imre Deakc8bd0e42014-12-12 17:57:38 +02005051 drm_encoder_cleanup(encoder);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02005052 kfree(intel_dig_port);
Daniel Vetter24d05922010-08-20 18:08:28 +02005053}
5054
Imre Deak07f9cd02014-08-18 14:42:45 +03005055static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5056{
5057 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5058
5059 if (!is_edp(intel_dp))
5060 return;
5061
Ville Syrjälä951468f2014-09-04 14:55:31 +03005062 /*
5063 * vdd might still be enabled do to the delayed vdd off.
5064 * Make sure vdd is actually turned off here.
5065 */
Ville Syrjäläafa4e532014-11-25 15:43:48 +02005066 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005067 pps_lock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03005068 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005069 pps_unlock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03005070}
5071
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02005072static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5073{
5074 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5075 struct drm_device *dev = intel_dig_port->base.base.dev;
5076 struct drm_i915_private *dev_priv = dev->dev_private;
5077 enum intel_display_power_domain power_domain;
5078
5079 lockdep_assert_held(&dev_priv->pps_mutex);
5080
5081 if (!edp_have_panel_vdd(intel_dp))
5082 return;
5083
5084 /*
5085 * The VDD bit needs a power domain reference, so if the bit is
5086 * already enabled when we boot or resume, grab this reference and
5087 * schedule a vdd off, so we don't hold on to the reference
5088 * indefinitely.
5089 */
5090 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5091 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
5092 intel_display_power_get(dev_priv, power_domain);
5093
5094 edp_panel_vdd_schedule_off(intel_dp);
5095}
5096
Imre Deak6d93c0c2014-07-31 14:03:36 +03005097static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5098{
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02005099 struct intel_dp *intel_dp;
5100
5101 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5102 return;
5103
5104 intel_dp = enc_to_intel_dp(encoder);
5105
5106 pps_lock(intel_dp);
5107
5108 /*
5109 * Read out the current power sequencer assignment,
5110 * in case the BIOS did something with it.
5111 */
5112 if (IS_VALLEYVIEW(encoder->dev))
5113 vlv_initial_power_sequencer_setup(intel_dp);
5114
5115 intel_edp_panel_vdd_sanitize(intel_dp);
5116
5117 pps_unlock(intel_dp);
Imre Deak6d93c0c2014-07-31 14:03:36 +03005118}
5119
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005120static const struct drm_connector_funcs intel_dp_connector_funcs = {
Maarten Lankhorst4d688a22015-08-05 12:37:06 +02005121 .dpms = drm_atomic_helper_connector_dpms,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005122 .detect = intel_dp_detect,
Chris Wilsonbeb60602014-09-02 20:04:00 +01005123 .force = intel_dp_force,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005124 .fill_modes = drm_helper_probe_single_connector_modes,
Chris Wilsonf6849602010-09-19 09:29:33 +01005125 .set_property = intel_dp_set_property,
Matt Roper2545e4a2015-01-22 16:51:27 -08005126 .atomic_get_property = intel_connector_atomic_get_property,
Paulo Zanoni73845ad2013-06-12 17:27:30 -03005127 .destroy = intel_dp_connector_destroy,
Matt Roperc6f95f22015-01-22 16:50:32 -08005128 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
Ander Conselvan de Oliveira98969722015-03-20 16:18:06 +02005129 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005130};
5131
5132static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5133 .get_modes = intel_dp_get_modes,
5134 .mode_valid = intel_dp_mode_valid,
Chris Wilsondf0e9242010-09-09 16:20:55 +01005135 .best_encoder = intel_best_encoder,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005136};
5137
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005138static const struct drm_encoder_funcs intel_dp_enc_funcs = {
Imre Deak6d93c0c2014-07-31 14:03:36 +03005139 .reset = intel_dp_encoder_reset,
Daniel Vetter24d05922010-08-20 18:08:28 +02005140 .destroy = intel_dp_encoder_destroy,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005141};
5142
Daniel Vetterb2c5c182015-01-23 06:00:31 +01005143enum irqreturn
Dave Airlie13cf5502014-06-18 11:29:35 +10005144intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5145{
5146 struct intel_dp *intel_dp = &intel_dig_port->dp;
Imre Deak1c767b32014-08-18 14:42:42 +03005147 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Dave Airlie0e32b392014-05-02 14:02:48 +10005148 struct drm_device *dev = intel_dig_port->base.base.dev;
5149 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak1c767b32014-08-18 14:42:42 +03005150 enum intel_display_power_domain power_domain;
Daniel Vetterb2c5c182015-01-23 06:00:31 +01005151 enum irqreturn ret = IRQ_NONE;
Imre Deak1c767b32014-08-18 14:42:42 +03005152
Dave Airlie0e32b392014-05-02 14:02:48 +10005153 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5154 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
Dave Airlie13cf5502014-06-18 11:29:35 +10005155
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03005156 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5157 /*
5158 * vdd off can generate a long pulse on eDP which
5159 * would require vdd on to handle it, and thus we
5160 * would end up in an endless cycle of
5161 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5162 */
5163 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5164 port_name(intel_dig_port->port));
Ville Syrjäläa8b3d522015-02-10 14:11:46 +02005165 return IRQ_HANDLED;
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03005166 }
5167
Ville Syrjälä26fbb772014-08-11 18:37:37 +03005168 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5169 port_name(intel_dig_port->port),
Dave Airlie0e32b392014-05-02 14:02:48 +10005170 long_hpd ? "long" : "short");
Dave Airlie13cf5502014-06-18 11:29:35 +10005171
Imre Deak1c767b32014-08-18 14:42:42 +03005172 power_domain = intel_display_port_power_domain(intel_encoder);
5173 intel_display_power_get(dev_priv, power_domain);
5174
Dave Airlie0e32b392014-05-02 14:02:48 +10005175 if (long_hpd) {
Mika Kahola5fa836a2015-04-29 09:17:40 +03005176 /* indicate that we need to restart link training */
5177 intel_dp->train_set_valid = false;
Dave Airlie2a592be2014-09-01 16:58:12 +10005178
Jani Nikula7e66bcf2015-08-20 10:47:39 +03005179 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5180 goto mst_fail;
Dave Airlie0e32b392014-05-02 14:02:48 +10005181
5182 if (!intel_dp_get_dpcd(intel_dp)) {
5183 goto mst_fail;
5184 }
5185
5186 intel_dp_probe_oui(intel_dp);
5187
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03005188 if (!intel_dp_probe_mst(intel_dp)) {
5189 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5190 intel_dp_check_link_status(intel_dp);
5191 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10005192 goto mst_fail;
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03005193 }
Dave Airlie0e32b392014-05-02 14:02:48 +10005194 } else {
5195 if (intel_dp->is_mst) {
Imre Deak1c767b32014-08-18 14:42:42 +03005196 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
Dave Airlie0e32b392014-05-02 14:02:48 +10005197 goto mst_fail;
5198 }
5199
5200 if (!intel_dp->is_mst) {
Dave Airlie5b215bc2014-08-05 10:40:20 +10005201 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
Dave Airlie0e32b392014-05-02 14:02:48 +10005202 intel_dp_check_link_status(intel_dp);
Dave Airlie5b215bc2014-08-05 10:40:20 +10005203 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10005204 }
5205 }
Daniel Vetterb2c5c182015-01-23 06:00:31 +01005206
5207 ret = IRQ_HANDLED;
5208
Imre Deak1c767b32014-08-18 14:42:42 +03005209 goto put_power;
Dave Airlie0e32b392014-05-02 14:02:48 +10005210mst_fail:
5211 /* if we were in MST mode, and device is not there get out of MST mode */
5212 if (intel_dp->is_mst) {
5213 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5214 intel_dp->is_mst = false;
5215 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5216 }
Imre Deak1c767b32014-08-18 14:42:42 +03005217put_power:
5218 intel_display_power_put(dev_priv, power_domain);
5219
5220 return ret;
Dave Airlie13cf5502014-06-18 11:29:35 +10005221}
5222
Zhenyu Wange3421a12010-04-08 09:43:27 +08005223/* Return which DP Port should be selected for Transcoder DP control */
5224int
Akshay Joshi0206e352011-08-16 15:34:10 -04005225intel_trans_dp_port_sel(struct drm_crtc *crtc)
Zhenyu Wange3421a12010-04-08 09:43:27 +08005226{
5227 struct drm_device *dev = crtc->dev;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005228 struct intel_encoder *intel_encoder;
5229 struct intel_dp *intel_dp;
Zhenyu Wange3421a12010-04-08 09:43:27 +08005230
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005231 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5232 intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonea5b2132010-08-04 13:50:23 +01005233
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005234 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5235 intel_encoder->type == INTEL_OUTPUT_EDP)
Chris Wilsonea5b2132010-08-04 13:50:23 +01005236 return intel_dp->output_reg;
Zhenyu Wange3421a12010-04-08 09:43:27 +08005237 }
Chris Wilsonea5b2132010-08-04 13:50:23 +01005238
Zhenyu Wange3421a12010-04-08 09:43:27 +08005239 return -1;
5240}
5241
Rodrigo Vivi477ec322015-08-06 15:51:39 +08005242/* check the VBT to see whether the eDP is on another port */
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005243bool intel_dp_is_edp(struct drm_device *dev, enum port port)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005244{
5245 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni768f69c2013-09-11 18:02:47 -03005246 union child_device_config *p_child;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005247 int i;
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005248 static const short port_mapping[] = {
Rodrigo Vivi477ec322015-08-06 15:51:39 +08005249 [PORT_B] = DVO_PORT_DPB,
5250 [PORT_C] = DVO_PORT_DPC,
5251 [PORT_D] = DVO_PORT_DPD,
5252 [PORT_E] = DVO_PORT_DPE,
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005253 };
Zhao Yakui36e83a12010-06-12 14:32:21 +08005254
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005255 if (port == PORT_A)
5256 return true;
5257
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005258 if (!dev_priv->vbt.child_dev_num)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005259 return false;
5260
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005261 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5262 p_child = dev_priv->vbt.child_dev + i;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005263
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005264 if (p_child->common.dvo_port == port_mapping[port] &&
Ville Syrjäläf02586d2013-11-01 20:32:08 +02005265 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5266 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
Zhao Yakui36e83a12010-06-12 14:32:21 +08005267 return true;
5268 }
5269 return false;
5270}
5271
Dave Airlie0e32b392014-05-02 14:02:48 +10005272void
Chris Wilsonf6849602010-09-19 09:29:33 +01005273intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5274{
Yuly Novikov53b41832012-10-26 12:04:00 +03005275 struct intel_connector *intel_connector = to_intel_connector(connector);
5276
Chris Wilson3f43c482011-05-12 22:17:24 +01005277 intel_attach_force_audio_property(connector);
Chris Wilsone953fd72011-02-21 22:23:52 +00005278 intel_attach_broadcast_rgb_property(connector);
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02005279 intel_dp->color_range_auto = true;
Yuly Novikov53b41832012-10-26 12:04:00 +03005280
5281 if (is_edp(intel_dp)) {
5282 drm_mode_create_scaling_mode_property(connector->dev);
Rob Clark6de6d842012-10-11 20:36:04 -05005283 drm_object_attach_property(
5284 &connector->base,
Yuly Novikov53b41832012-10-26 12:04:00 +03005285 connector->dev->mode_config.scaling_mode_property,
Yuly Novikov8e740cd2012-10-26 12:04:01 +03005286 DRM_MODE_SCALE_ASPECT);
5287 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
Yuly Novikov53b41832012-10-26 12:04:00 +03005288 }
Chris Wilsonf6849602010-09-19 09:29:33 +01005289}
5290
Imre Deakdada1a92014-01-29 13:25:41 +02005291static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5292{
5293 intel_dp->last_power_cycle = jiffies;
5294 intel_dp->last_power_on = jiffies;
5295 intel_dp->last_backlight_off = jiffies;
5296}
5297
Daniel Vetter67a54562012-10-20 20:57:45 +02005298static void
5299intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005300 struct intel_dp *intel_dp)
Daniel Vetter67a54562012-10-20 20:57:45 +02005301{
5302 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005303 struct edp_power_seq cur, vbt, spec,
5304 *final = &intel_dp->pps_delays;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305305 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5306 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
Jesse Barnes453c5422013-03-28 09:55:41 -07005307
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005308 lockdep_assert_held(&dev_priv->pps_mutex);
5309
Ville Syrjälä81ddbc62014-10-16 21:27:31 +03005310 /* already initialized? */
5311 if (final->t11_t12 != 0)
5312 return;
5313
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305314 if (IS_BROXTON(dev)) {
5315 /*
5316 * TODO: BXT has 2 sets of PPS registers.
5317 * Correct Register for Broxton need to be identified
5318 * using VBT. hardcoding for now
5319 */
5320 pp_ctrl_reg = BXT_PP_CONTROL(0);
5321 pp_on_reg = BXT_PP_ON_DELAYS(0);
5322 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5323 } else if (HAS_PCH_SPLIT(dev)) {
Jani Nikulabf13e812013-09-06 07:40:05 +03005324 pp_ctrl_reg = PCH_PP_CONTROL;
Jesse Barnes453c5422013-03-28 09:55:41 -07005325 pp_on_reg = PCH_PP_ON_DELAYS;
5326 pp_off_reg = PCH_PP_OFF_DELAYS;
5327 pp_div_reg = PCH_PP_DIVISOR;
5328 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005329 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5330
5331 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5332 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5333 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5334 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005335 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005336
5337 /* Workaround: Need to write PP_CONTROL with the unlock key as
5338 * the very first thing. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305339 pp_ctl = ironlake_get_pp_control(intel_dp);
Daniel Vetter67a54562012-10-20 20:57:45 +02005340
Jesse Barnes453c5422013-03-28 09:55:41 -07005341 pp_on = I915_READ(pp_on_reg);
5342 pp_off = I915_READ(pp_off_reg);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305343 if (!IS_BROXTON(dev)) {
5344 I915_WRITE(pp_ctrl_reg, pp_ctl);
5345 pp_div = I915_READ(pp_div_reg);
5346 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005347
5348 /* Pull timing values out of registers */
5349 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5350 PANEL_POWER_UP_DELAY_SHIFT;
5351
5352 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5353 PANEL_LIGHT_ON_DELAY_SHIFT;
5354
5355 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5356 PANEL_LIGHT_OFF_DELAY_SHIFT;
5357
5358 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5359 PANEL_POWER_DOWN_DELAY_SHIFT;
5360
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305361 if (IS_BROXTON(dev)) {
5362 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5363 BXT_POWER_CYCLE_DELAY_SHIFT;
5364 if (tmp > 0)
5365 cur.t11_t12 = (tmp - 1) * 1000;
5366 else
5367 cur.t11_t12 = 0;
5368 } else {
5369 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
Daniel Vetter67a54562012-10-20 20:57:45 +02005370 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305371 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005372
5373 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5374 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5375
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005376 vbt = dev_priv->vbt.edp_pps;
Daniel Vetter67a54562012-10-20 20:57:45 +02005377
5378 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5379 * our hw here, which are all in 100usec. */
5380 spec.t1_t3 = 210 * 10;
5381 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5382 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5383 spec.t10 = 500 * 10;
5384 /* This one is special and actually in units of 100ms, but zero
5385 * based in the hw (so we need to add 100 ms). But the sw vbt
5386 * table multiplies it with 1000 to make it in units of 100usec,
5387 * too. */
5388 spec.t11_t12 = (510 + 100) * 10;
5389
5390 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5391 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5392
5393 /* Use the max of the register settings and vbt. If both are
5394 * unset, fall back to the spec limits. */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005395#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
Daniel Vetter67a54562012-10-20 20:57:45 +02005396 spec.field : \
5397 max(cur.field, vbt.field))
5398 assign_final(t1_t3);
5399 assign_final(t8);
5400 assign_final(t9);
5401 assign_final(t10);
5402 assign_final(t11_t12);
5403#undef assign_final
5404
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005405#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
Daniel Vetter67a54562012-10-20 20:57:45 +02005406 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5407 intel_dp->backlight_on_delay = get_delay(t8);
5408 intel_dp->backlight_off_delay = get_delay(t9);
5409 intel_dp->panel_power_down_delay = get_delay(t10);
5410 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5411#undef get_delay
5412
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005413 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5414 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5415 intel_dp->panel_power_cycle_delay);
5416
5417 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5418 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005419}
5420
5421static void
5422intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005423 struct intel_dp *intel_dp)
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005424{
5425 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07005426 u32 pp_on, pp_off, pp_div, port_sel = 0;
5427 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305428 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
Ville Syrjäläad933b52014-08-18 22:15:56 +03005429 enum port port = dp_to_dig_port(intel_dp)->port;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005430 const struct edp_power_seq *seq = &intel_dp->pps_delays;
Jesse Barnes453c5422013-03-28 09:55:41 -07005431
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005432 lockdep_assert_held(&dev_priv->pps_mutex);
Jesse Barnes453c5422013-03-28 09:55:41 -07005433
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305434 if (IS_BROXTON(dev)) {
5435 /*
5436 * TODO: BXT has 2 sets of PPS registers.
5437 * Correct Register for Broxton need to be identified
5438 * using VBT. hardcoding for now
5439 */
5440 pp_ctrl_reg = BXT_PP_CONTROL(0);
5441 pp_on_reg = BXT_PP_ON_DELAYS(0);
5442 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5443
5444 } else if (HAS_PCH_SPLIT(dev)) {
Jesse Barnes453c5422013-03-28 09:55:41 -07005445 pp_on_reg = PCH_PP_ON_DELAYS;
5446 pp_off_reg = PCH_PP_OFF_DELAYS;
5447 pp_div_reg = PCH_PP_DIVISOR;
5448 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005449 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5450
5451 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5452 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5453 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005454 }
5455
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005456 /*
5457 * And finally store the new values in the power sequencer. The
5458 * backlight delays are set to 1 because we do manual waits on them. For
5459 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5460 * we'll end up waiting for the backlight off delay twice: once when we
5461 * do the manual sleep, and once when we disable the panel and wait for
5462 * the PP_STATUS bit to become zero.
5463 */
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005464 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005465 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5466 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005467 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
Daniel Vetter67a54562012-10-20 20:57:45 +02005468 /* Compute the divisor for the pp clock, simply match the Bspec
5469 * formula. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305470 if (IS_BROXTON(dev)) {
5471 pp_div = I915_READ(pp_ctrl_reg);
5472 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5473 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5474 << BXT_POWER_CYCLE_DELAY_SHIFT);
5475 } else {
5476 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5477 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5478 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5479 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005480
5481 /* Haswell doesn't have any port selection bits for the panel
5482 * power sequencer any more. */
Imre Deakbc7d38a2013-05-16 14:40:36 +03005483 if (IS_VALLEYVIEW(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005484 port_sel = PANEL_PORT_SELECT_VLV(port);
Imre Deakbc7d38a2013-05-16 14:40:36 +03005485 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005486 if (port == PORT_A)
Jani Nikulaa24c1442013-09-05 16:44:46 +03005487 port_sel = PANEL_PORT_SELECT_DPA;
Daniel Vetter67a54562012-10-20 20:57:45 +02005488 else
Jani Nikulaa24c1442013-09-05 16:44:46 +03005489 port_sel = PANEL_PORT_SELECT_DPD;
Daniel Vetter67a54562012-10-20 20:57:45 +02005490 }
5491
Jesse Barnes453c5422013-03-28 09:55:41 -07005492 pp_on |= port_sel;
5493
5494 I915_WRITE(pp_on_reg, pp_on);
5495 I915_WRITE(pp_off_reg, pp_off);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305496 if (IS_BROXTON(dev))
5497 I915_WRITE(pp_ctrl_reg, pp_div);
5498 else
5499 I915_WRITE(pp_div_reg, pp_div);
Daniel Vetter67a54562012-10-20 20:57:45 +02005500
Daniel Vetter67a54562012-10-20 20:57:45 +02005501 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07005502 I915_READ(pp_on_reg),
5503 I915_READ(pp_off_reg),
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305504 IS_BROXTON(dev) ?
5505 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
Jesse Barnes453c5422013-03-28 09:55:41 -07005506 I915_READ(pp_div_reg));
Keith Packardc8110e52009-05-06 11:51:10 -07005507}
5508
Vandana Kannanb33a2812015-02-13 15:33:03 +05305509/**
5510 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5511 * @dev: DRM device
5512 * @refresh_rate: RR to be programmed
5513 *
5514 * This function gets called when refresh rate (RR) has to be changed from
5515 * one frequency to another. Switches can be between high and low RR
5516 * supported by the panel or to any other RR based on media playback (in
5517 * this case, RR value needs to be passed from user space).
5518 *
5519 * The caller of this function needs to take a lock on dev_priv->drrs.
5520 */
Vandana Kannan96178ee2015-01-10 02:25:56 +05305521static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305522{
5523 struct drm_i915_private *dev_priv = dev->dev_private;
5524 struct intel_encoder *encoder;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305525 struct intel_digital_port *dig_port = NULL;
5526 struct intel_dp *intel_dp = dev_priv->drrs.dp;
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02005527 struct intel_crtc_state *config = NULL;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305528 struct intel_crtc *intel_crtc = NULL;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305529 u32 reg, val;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305530 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305531
5532 if (refresh_rate <= 0) {
5533 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5534 return;
5535 }
5536
Vandana Kannan96178ee2015-01-10 02:25:56 +05305537 if (intel_dp == NULL) {
5538 DRM_DEBUG_KMS("DRRS not supported.\n");
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305539 return;
5540 }
5541
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005542 /*
Rodrigo Vivie4d59f62014-11-20 02:22:08 -08005543 * FIXME: This needs proper synchronization with psr state for some
5544 * platforms that cannot have PSR and DRRS enabled at the same time.
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005545 */
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305546
Vandana Kannan96178ee2015-01-10 02:25:56 +05305547 dig_port = dp_to_dig_port(intel_dp);
5548 encoder = &dig_port->base;
Ander Conselvan de Oliveira723f9aa2015-03-20 16:18:18 +02005549 intel_crtc = to_intel_crtc(encoder->base.crtc);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305550
5551 if (!intel_crtc) {
5552 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5553 return;
5554 }
5555
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005556 config = intel_crtc->config;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305557
Vandana Kannan96178ee2015-01-10 02:25:56 +05305558 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305559 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5560 return;
5561 }
5562
Vandana Kannan96178ee2015-01-10 02:25:56 +05305563 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5564 refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305565 index = DRRS_LOW_RR;
5566
Vandana Kannan96178ee2015-01-10 02:25:56 +05305567 if (index == dev_priv->drrs.refresh_rate_type) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305568 DRM_DEBUG_KMS(
5569 "DRRS requested for previously set RR...ignoring\n");
5570 return;
5571 }
5572
5573 if (!intel_crtc->active) {
5574 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5575 return;
5576 }
5577
Durgadoss R44395bf2015-02-13 15:33:02 +05305578 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
Vandana Kannana4c30b12015-02-13 15:33:00 +05305579 switch (index) {
5580 case DRRS_HIGH_RR:
5581 intel_dp_set_m_n(intel_crtc, M1_N1);
5582 break;
5583 case DRRS_LOW_RR:
5584 intel_dp_set_m_n(intel_crtc, M2_N2);
5585 break;
5586 case DRRS_MAX_RR:
5587 default:
5588 DRM_ERROR("Unsupported refreshrate type\n");
5589 }
5590 } else if (INTEL_INFO(dev)->gen > 6) {
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005591 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305592 val = I915_READ(reg);
Vandana Kannana4c30b12015-02-13 15:33:00 +05305593
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305594 if (index > DRRS_HIGH_RR) {
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305595 if (IS_VALLEYVIEW(dev))
5596 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5597 else
5598 val |= PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305599 } else {
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305600 if (IS_VALLEYVIEW(dev))
5601 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5602 else
5603 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305604 }
5605 I915_WRITE(reg, val);
5606 }
5607
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305608 dev_priv->drrs.refresh_rate_type = index;
5609
5610 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5611}
5612
Vandana Kannanb33a2812015-02-13 15:33:03 +05305613/**
5614 * intel_edp_drrs_enable - init drrs struct if supported
5615 * @intel_dp: DP struct
5616 *
5617 * Initializes frontbuffer_bits and drrs.dp
5618 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305619void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5620{
5621 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5622 struct drm_i915_private *dev_priv = dev->dev_private;
5623 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5624 struct drm_crtc *crtc = dig_port->base.base.crtc;
5625 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5626
5627 if (!intel_crtc->config->has_drrs) {
5628 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5629 return;
5630 }
5631
5632 mutex_lock(&dev_priv->drrs.mutex);
5633 if (WARN_ON(dev_priv->drrs.dp)) {
5634 DRM_ERROR("DRRS already enabled\n");
5635 goto unlock;
5636 }
5637
5638 dev_priv->drrs.busy_frontbuffer_bits = 0;
5639
5640 dev_priv->drrs.dp = intel_dp;
5641
5642unlock:
5643 mutex_unlock(&dev_priv->drrs.mutex);
5644}
5645
Vandana Kannanb33a2812015-02-13 15:33:03 +05305646/**
5647 * intel_edp_drrs_disable - Disable DRRS
5648 * @intel_dp: DP struct
5649 *
5650 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305651void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5652{
5653 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5654 struct drm_i915_private *dev_priv = dev->dev_private;
5655 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5656 struct drm_crtc *crtc = dig_port->base.base.crtc;
5657 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5658
5659 if (!intel_crtc->config->has_drrs)
5660 return;
5661
5662 mutex_lock(&dev_priv->drrs.mutex);
5663 if (!dev_priv->drrs.dp) {
5664 mutex_unlock(&dev_priv->drrs.mutex);
5665 return;
5666 }
5667
5668 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5669 intel_dp_set_drrs_state(dev_priv->dev,
5670 intel_dp->attached_connector->panel.
5671 fixed_mode->vrefresh);
5672
5673 dev_priv->drrs.dp = NULL;
5674 mutex_unlock(&dev_priv->drrs.mutex);
5675
5676 cancel_delayed_work_sync(&dev_priv->drrs.work);
5677}
5678
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305679static void intel_edp_drrs_downclock_work(struct work_struct *work)
5680{
5681 struct drm_i915_private *dev_priv =
5682 container_of(work, typeof(*dev_priv), drrs.work.work);
5683 struct intel_dp *intel_dp;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305684
Vandana Kannan96178ee2015-01-10 02:25:56 +05305685 mutex_lock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305686
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305687 intel_dp = dev_priv->drrs.dp;
5688
5689 if (!intel_dp)
5690 goto unlock;
5691
5692 /*
5693 * The delayed work can race with an invalidate hence we need to
5694 * recheck.
5695 */
5696
5697 if (dev_priv->drrs.busy_frontbuffer_bits)
5698 goto unlock;
5699
5700 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5701 intel_dp_set_drrs_state(dev_priv->dev,
5702 intel_dp->attached_connector->panel.
5703 downclock_mode->vrefresh);
5704
5705unlock:
Vandana Kannan96178ee2015-01-10 02:25:56 +05305706 mutex_unlock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305707}
5708
Vandana Kannanb33a2812015-02-13 15:33:03 +05305709/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305710 * intel_edp_drrs_invalidate - Disable Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305711 * @dev: DRM device
5712 * @frontbuffer_bits: frontbuffer plane tracking bits
5713 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305714 * This function gets called everytime rendering on the given planes start.
5715 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
Vandana Kannanb33a2812015-02-13 15:33:03 +05305716 *
5717 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5718 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305719void intel_edp_drrs_invalidate(struct drm_device *dev,
5720 unsigned frontbuffer_bits)
5721{
5722 struct drm_i915_private *dev_priv = dev->dev_private;
5723 struct drm_crtc *crtc;
5724 enum pipe pipe;
5725
Daniel Vetter9da7d692015-04-09 16:44:15 +02005726 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305727 return;
5728
Daniel Vetter88f933a2015-04-09 16:44:16 +02005729 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305730
Vandana Kannana93fad02015-01-10 02:25:59 +05305731 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005732 if (!dev_priv->drrs.dp) {
5733 mutex_unlock(&dev_priv->drrs.mutex);
5734 return;
5735 }
5736
Vandana Kannana93fad02015-01-10 02:25:59 +05305737 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5738 pipe = to_intel_crtc(crtc)->pipe;
5739
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005740 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5741 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5742
Ramalingam C0ddfd202015-06-15 20:50:05 +05305743 /* invalidate means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005744 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Vandana Kannana93fad02015-01-10 02:25:59 +05305745 intel_dp_set_drrs_state(dev_priv->dev,
5746 dev_priv->drrs.dp->attached_connector->panel.
5747 fixed_mode->vrefresh);
Vandana Kannana93fad02015-01-10 02:25:59 +05305748
Vandana Kannana93fad02015-01-10 02:25:59 +05305749 mutex_unlock(&dev_priv->drrs.mutex);
5750}
5751
Vandana Kannanb33a2812015-02-13 15:33:03 +05305752/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305753 * intel_edp_drrs_flush - Restart Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305754 * @dev: DRM device
5755 * @frontbuffer_bits: frontbuffer plane tracking bits
5756 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305757 * This function gets called every time rendering on the given planes has
5758 * completed or flip on a crtc is completed. So DRRS should be upclocked
5759 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5760 * if no other planes are dirty.
Vandana Kannanb33a2812015-02-13 15:33:03 +05305761 *
5762 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5763 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305764void intel_edp_drrs_flush(struct drm_device *dev,
5765 unsigned frontbuffer_bits)
5766{
5767 struct drm_i915_private *dev_priv = dev->dev_private;
5768 struct drm_crtc *crtc;
5769 enum pipe pipe;
5770
Daniel Vetter9da7d692015-04-09 16:44:15 +02005771 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305772 return;
5773
Daniel Vetter88f933a2015-04-09 16:44:16 +02005774 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305775
Vandana Kannana93fad02015-01-10 02:25:59 +05305776 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005777 if (!dev_priv->drrs.dp) {
5778 mutex_unlock(&dev_priv->drrs.mutex);
5779 return;
5780 }
5781
Vandana Kannana93fad02015-01-10 02:25:59 +05305782 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5783 pipe = to_intel_crtc(crtc)->pipe;
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005784
5785 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Vandana Kannana93fad02015-01-10 02:25:59 +05305786 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5787
Ramalingam C0ddfd202015-06-15 20:50:05 +05305788 /* flush means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005789 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Ramalingam C0ddfd202015-06-15 20:50:05 +05305790 intel_dp_set_drrs_state(dev_priv->dev,
5791 dev_priv->drrs.dp->attached_connector->panel.
5792 fixed_mode->vrefresh);
5793
5794 /*
5795 * flush also means no more activity hence schedule downclock, if all
5796 * other fbs are quiescent too
5797 */
5798 if (!dev_priv->drrs.busy_frontbuffer_bits)
Vandana Kannana93fad02015-01-10 02:25:59 +05305799 schedule_delayed_work(&dev_priv->drrs.work,
5800 msecs_to_jiffies(1000));
5801 mutex_unlock(&dev_priv->drrs.mutex);
5802}
5803
Vandana Kannanb33a2812015-02-13 15:33:03 +05305804/**
5805 * DOC: Display Refresh Rate Switching (DRRS)
5806 *
5807 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5808 * which enables swtching between low and high refresh rates,
5809 * dynamically, based on the usage scenario. This feature is applicable
5810 * for internal panels.
5811 *
5812 * Indication that the panel supports DRRS is given by the panel EDID, which
5813 * would list multiple refresh rates for one resolution.
5814 *
5815 * DRRS is of 2 types - static and seamless.
5816 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5817 * (may appear as a blink on screen) and is used in dock-undock scenario.
5818 * Seamless DRRS involves changing RR without any visual effect to the user
5819 * and can be used during normal system usage. This is done by programming
5820 * certain registers.
5821 *
5822 * Support for static/seamless DRRS may be indicated in the VBT based on
5823 * inputs from the panel spec.
5824 *
5825 * DRRS saves power by switching to low RR based on usage scenarios.
5826 *
5827 * eDP DRRS:-
5828 * The implementation is based on frontbuffer tracking implementation.
5829 * When there is a disturbance on the screen triggered by user activity or a
5830 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5831 * When there is no movement on screen, after a timeout of 1 second, a switch
5832 * to low RR is made.
5833 * For integration with frontbuffer tracking code,
5834 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5835 *
5836 * DRRS can be further extended to support other internal panels and also
5837 * the scenario of video playback wherein RR is set based on the rate
5838 * requested by userspace.
5839 */
5840
5841/**
5842 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5843 * @intel_connector: eDP connector
5844 * @fixed_mode: preferred mode of panel
5845 *
5846 * This function is called only once at driver load to initialize basic
5847 * DRRS stuff.
5848 *
5849 * Returns:
5850 * Downclock mode if panel supports it, else return NULL.
5851 * DRRS support is determined by the presence of downclock mode (apart
5852 * from VBT setting).
5853 */
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305854static struct drm_display_mode *
Vandana Kannan96178ee2015-01-10 02:25:56 +05305855intel_dp_drrs_init(struct intel_connector *intel_connector,
5856 struct drm_display_mode *fixed_mode)
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305857{
5858 struct drm_connector *connector = &intel_connector->base;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305859 struct drm_device *dev = connector->dev;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305860 struct drm_i915_private *dev_priv = dev->dev_private;
5861 struct drm_display_mode *downclock_mode = NULL;
5862
Daniel Vetter9da7d692015-04-09 16:44:15 +02005863 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5864 mutex_init(&dev_priv->drrs.mutex);
5865
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305866 if (INTEL_INFO(dev)->gen <= 6) {
5867 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5868 return NULL;
5869 }
5870
5871 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005872 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305873 return NULL;
5874 }
5875
5876 downclock_mode = intel_find_panel_downclock
5877 (dev, fixed_mode, connector);
5878
5879 if (!downclock_mode) {
Ramalingam Ca1d26342015-02-23 17:38:33 +05305880 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305881 return NULL;
5882 }
5883
Vandana Kannan96178ee2015-01-10 02:25:56 +05305884 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305885
Vandana Kannan96178ee2015-01-10 02:25:56 +05305886 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005887 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305888 return downclock_mode;
5889}
5890
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005891static bool intel_edp_init_connector(struct intel_dp *intel_dp,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005892 struct intel_connector *intel_connector)
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005893{
5894 struct drm_connector *connector = &intel_connector->base;
5895 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005896 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5897 struct drm_device *dev = intel_encoder->base.dev;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005898 struct drm_i915_private *dev_priv = dev->dev_private;
5899 struct drm_display_mode *fixed_mode = NULL;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305900 struct drm_display_mode *downclock_mode = NULL;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005901 bool has_dpcd;
5902 struct drm_display_mode *scan;
5903 struct edid *edid;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005904 enum pipe pipe = INVALID_PIPE;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005905
5906 if (!is_edp(intel_dp))
5907 return true;
5908
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02005909 pps_lock(intel_dp);
5910 intel_edp_panel_vdd_sanitize(intel_dp);
5911 pps_unlock(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005912
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005913 /* Cache DPCD and EDID for edp. */
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005914 has_dpcd = intel_dp_get_dpcd(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005915
5916 if (has_dpcd) {
5917 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5918 dev_priv->no_aux_handshake =
5919 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5920 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5921 } else {
5922 /* if this fails, presume the device is a ghost */
5923 DRM_INFO("failed to retrieve link info, disabling eDP\n");
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005924 return false;
5925 }
5926
5927 /* We now know it's not a ghost, init power sequence regs. */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005928 pps_lock(intel_dp);
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005929 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005930 pps_unlock(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005931
Daniel Vetter060c8772014-03-21 23:22:35 +01005932 mutex_lock(&dev->mode_config.mutex);
Jani Nikula0b998362014-03-14 16:51:17 +02005933 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005934 if (edid) {
5935 if (drm_add_edid_modes(connector, edid)) {
5936 drm_mode_connector_update_edid_property(connector,
5937 edid);
5938 drm_edid_to_eld(connector, edid);
5939 } else {
5940 kfree(edid);
5941 edid = ERR_PTR(-EINVAL);
5942 }
5943 } else {
5944 edid = ERR_PTR(-ENOENT);
5945 }
5946 intel_connector->edid = edid;
5947
5948 /* prefer fixed mode from EDID if available */
5949 list_for_each_entry(scan, &connector->probed_modes, head) {
5950 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5951 fixed_mode = drm_mode_duplicate(dev, scan);
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305952 downclock_mode = intel_dp_drrs_init(
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305953 intel_connector, fixed_mode);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005954 break;
5955 }
5956 }
5957
5958 /* fallback to VBT if available for eDP */
5959 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5960 fixed_mode = drm_mode_duplicate(dev,
5961 dev_priv->vbt.lfp_lvds_vbt_mode);
5962 if (fixed_mode)
5963 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5964 }
Daniel Vetter060c8772014-03-21 23:22:35 +01005965 mutex_unlock(&dev->mode_config.mutex);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005966
Clint Taylor01527b32014-07-07 13:01:46 -07005967 if (IS_VALLEYVIEW(dev)) {
5968 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5969 register_reboot_notifier(&intel_dp->edp_notifier);
Ville Syrjälä6517d272014-11-07 11:16:02 +02005970
5971 /*
5972 * Figure out the current pipe for the initial backlight setup.
5973 * If the current pipe isn't valid, try the PPS pipe, and if that
5974 * fails just assume pipe A.
5975 */
5976 if (IS_CHERRYVIEW(dev))
5977 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5978 else
5979 pipe = PORT_TO_PIPE(intel_dp->DP);
5980
5981 if (pipe != PIPE_A && pipe != PIPE_B)
5982 pipe = intel_dp->pps_pipe;
5983
5984 if (pipe != PIPE_A && pipe != PIPE_B)
5985 pipe = PIPE_A;
5986
5987 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5988 pipe_name(pipe));
Clint Taylor01527b32014-07-07 13:01:46 -07005989 }
5990
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305991 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
Jani Nikula73580fb72014-08-12 17:11:41 +03005992 intel_connector->panel.backlight_power = intel_edp_backlight_power;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005993 intel_panel_setup_backlight(connector, pipe);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005994
5995 return true;
5996}
5997
Paulo Zanoni16c25532013-06-12 17:27:25 -03005998bool
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005999intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6000 struct intel_connector *intel_connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07006001{
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006002 struct drm_connector *connector = &intel_connector->base;
6003 struct intel_dp *intel_dp = &intel_dig_port->dp;
6004 struct intel_encoder *intel_encoder = &intel_dig_port->base;
6005 struct drm_device *dev = intel_encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07006006 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni174edf12012-10-26 19:05:50 -02006007 enum port port = intel_dig_port->port;
Jani Nikula0b998362014-03-14 16:51:17 +02006008 int type;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07006009
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03006010 intel_dp->pps_pipe = INVALID_PIPE;
6011
Damien Lespiauec5b01d2014-01-21 13:35:39 +00006012 /* intel_dp vfuncs */
Damien Lespiaub6b5e382014-01-20 16:00:59 +00006013 if (INTEL_INFO(dev)->gen >= 9)
6014 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6015 else if (IS_VALLEYVIEW(dev))
Damien Lespiauec5b01d2014-01-21 13:35:39 +00006016 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
6017 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6018 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6019 else if (HAS_PCH_SPLIT(dev))
6020 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6021 else
6022 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6023
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +00006024 if (INTEL_INFO(dev)->gen >= 9)
6025 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6026 else
6027 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
Damien Lespiau153b1102014-01-21 13:37:15 +00006028
Daniel Vetter07679352012-09-06 22:15:42 +02006029 /* Preserve the current hw state. */
6030 intel_dp->DP = I915_READ(intel_dp->output_reg);
Jani Nikuladd06f902012-10-19 14:51:50 +03006031 intel_dp->attached_connector = intel_connector;
Chris Wilson3d3dc142011-02-12 10:33:12 +00006032
Ville Syrjälä3b32a352013-11-01 18:22:41 +02006033 if (intel_dp_is_edp(dev, port))
Gajanan Bhat19c03922012-09-27 19:13:07 +05306034 type = DRM_MODE_CONNECTOR_eDP;
Ville Syrjälä3b32a352013-11-01 18:22:41 +02006035 else
6036 type = DRM_MODE_CONNECTOR_DisplayPort;
Adam Jacksonb3295302010-07-16 14:46:28 -04006037
Imre Deakf7d24902013-05-08 13:14:05 +03006038 /*
6039 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6040 * for DP the encoder type can be set by the caller to
6041 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6042 */
6043 if (type == DRM_MODE_CONNECTOR_eDP)
6044 intel_encoder->type = INTEL_OUTPUT_EDP;
6045
Ville Syrjäläc17ed5b2014-10-16 21:27:27 +03006046 /* eDP only on port B and/or C on vlv/chv */
6047 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6048 port != PORT_B && port != PORT_C))
6049 return false;
6050
Imre Deake7281ea2013-05-08 13:14:08 +03006051 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6052 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6053 port_name(port));
6054
Adam Jacksonb3295302010-07-16 14:46:28 -04006055 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07006056 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6057
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006058 connector->interlace_allowed = true;
6059 connector->doublescan_allowed = 0;
Ma Lingf8aed702009-08-24 13:50:24 +08006060
Daniel Vetter66a92782012-07-12 20:08:18 +02006061 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
Daniel Vetter4be73782014-01-17 14:39:48 +01006062 edp_panel_vdd_work);
Zhenyu Wang6251ec02010-01-12 05:38:32 +08006063
Chris Wilsondf0e9242010-09-09 16:20:55 +01006064 intel_connector_attach_encoder(intel_connector, intel_encoder);
Thomas Wood34ea3d32014-05-29 16:57:41 +01006065 drm_connector_register(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07006066
Paulo Zanoniaffa9352012-11-23 15:30:39 -02006067 if (HAS_DDI(dev))
Paulo Zanonibcbc8892012-10-26 19:05:51 -02006068 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6069 else
6070 intel_connector->get_hw_state = intel_connector_get_hw_state;
Imre Deak80f65de2014-02-11 17:12:49 +02006071 intel_connector->unregister = intel_dp_connector_unregister;
Paulo Zanonibcbc8892012-10-26 19:05:51 -02006072
Jani Nikula0b998362014-03-14 16:51:17 +02006073 /* Set up the hotplug pin. */
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03006074 switch (port) {
6075 case PORT_A:
Egbert Eich1d843f92013-02-25 12:06:49 -05006076 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03006077 break;
6078 case PORT_B:
Egbert Eich1d843f92013-02-25 12:06:49 -05006079 intel_encoder->hpd_pin = HPD_PORT_B;
Sonika Jindalcf1d5882015-08-10 10:35:36 +05306080 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6081 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03006082 break;
6083 case PORT_C:
Egbert Eich1d843f92013-02-25 12:06:49 -05006084 intel_encoder->hpd_pin = HPD_PORT_C;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03006085 break;
6086 case PORT_D:
Egbert Eich1d843f92013-02-25 12:06:49 -05006087 intel_encoder->hpd_pin = HPD_PORT_D;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03006088 break;
Xiong Zhang26951ca2015-08-17 15:55:50 +08006089 case PORT_E:
6090 intel_encoder->hpd_pin = HPD_PORT_E;
6091 break;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03006092 default:
Damien Lespiauad1c0b12013-03-07 15:30:28 +00006093 BUG();
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08006094 }
6095
Imre Deakdada1a92014-01-29 13:25:41 +02006096 if (is_edp(intel_dp)) {
Ville Syrjälä773538e82014-09-04 14:54:56 +03006097 pps_lock(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02006098 intel_dp_init_panel_power_timestamps(intel_dp);
6099 if (IS_VALLEYVIEW(dev))
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03006100 vlv_initial_power_sequencer_setup(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02006101 else
Ville Syrjälä36b5f422014-10-16 21:27:30 +03006102 intel_dp_init_panel_power_sequencer(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03006103 pps_unlock(intel_dp);
Imre Deakdada1a92014-01-29 13:25:41 +02006104 }
Paulo Zanoni0095e6d2013-12-19 14:29:39 -02006105
Jani Nikula9d1a1032014-03-14 16:51:15 +02006106 intel_dp_aux_init(intel_dp, intel_connector);
Dave Airliec1f05262012-08-30 11:06:18 +10006107
Dave Airlie0e32b392014-05-02 14:02:48 +10006108 /* init MST on ports that can support it */
Jani Nikula0c9b3712015-05-18 17:10:01 +03006109 if (HAS_DP_MST(dev) &&
6110 (port == PORT_B || port == PORT_C || port == PORT_D))
6111 intel_dp_mst_encoder_init(intel_dig_port,
6112 intel_connector->base.base.id);
Dave Airlie0e32b392014-05-02 14:02:48 +10006113
Ville Syrjälä36b5f422014-10-16 21:27:30 +03006114 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10006115 drm_dp_aux_unregister(&intel_dp->aux);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03006116 if (is_edp(intel_dp)) {
6117 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03006118 /*
6119 * vdd might still be enabled do to the delayed vdd off.
6120 * Make sure vdd is actually turned off here.
6121 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03006122 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01006123 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03006124 pps_unlock(intel_dp);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03006125 }
Thomas Wood34ea3d32014-05-29 16:57:41 +01006126 drm_connector_unregister(connector);
Paulo Zanonib2f246a2013-06-12 17:27:26 -03006127 drm_connector_cleanup(connector);
Paulo Zanoni16c25532013-06-12 17:27:25 -03006128 return false;
Paulo Zanonib2f246a2013-06-12 17:27:26 -03006129 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08006130
Chris Wilsonf6849602010-09-19 09:29:33 +01006131 intel_dp_add_properties(intel_dp, connector);
6132
Keith Packarda4fc5ed2009-04-07 16:16:42 -07006133 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6134 * 0xd. Failure to do so will result in spurious interrupts being
6135 * generated on the port when a cable is not attached.
6136 */
6137 if (IS_G4X(dev) && !IS_GM45(dev)) {
6138 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6139 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6140 }
Paulo Zanoni16c25532013-06-12 17:27:25 -03006141
Jani Nikulaaa7471d2015-04-01 11:15:21 +03006142 i915_debugfs_connector_add(connector);
6143
Paulo Zanoni16c25532013-06-12 17:27:25 -03006144 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07006145}
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006146
6147void
6148intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6149{
Dave Airlie13cf5502014-06-18 11:29:35 +10006150 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006151 struct intel_digital_port *intel_dig_port;
6152 struct intel_encoder *intel_encoder;
6153 struct drm_encoder *encoder;
6154 struct intel_connector *intel_connector;
6155
Daniel Vetterb14c5672013-09-19 12:18:32 +02006156 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006157 if (!intel_dig_port)
6158 return;
6159
Ander Conselvan de Oliveira08d9bc92015-04-10 10:59:10 +03006160 intel_connector = intel_connector_alloc();
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006161 if (!intel_connector) {
6162 kfree(intel_dig_port);
6163 return;
6164 }
6165
6166 intel_encoder = &intel_dig_port->base;
6167 encoder = &intel_encoder->base;
6168
6169 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6170 DRM_MODE_ENCODER_TMDS);
6171
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01006172 intel_encoder->compute_config = intel_dp_compute_config;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02006173 intel_encoder->disable = intel_disable_dp;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02006174 intel_encoder->get_hw_state = intel_dp_get_hw_state;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07006175 intel_encoder->get_config = intel_dp_get_config;
Imre Deak07f9cd02014-08-18 14:42:45 +03006176 intel_encoder->suspend = intel_dp_encoder_suspend;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03006177 if (IS_CHERRYVIEW(dev)) {
Ville Syrjälä9197c882014-04-09 13:29:05 +03006178 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03006179 intel_encoder->pre_enable = chv_pre_enable_dp;
6180 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä580d3812014-04-09 13:29:00 +03006181 intel_encoder->post_disable = chv_post_disable_dp;
Ville Syrjäläd6db9952015-07-08 23:45:49 +03006182 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03006183 } else if (IS_VALLEYVIEW(dev)) {
Jani Nikulaecff4f32013-09-06 07:38:29 +03006184 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03006185 intel_encoder->pre_enable = vlv_pre_enable_dp;
6186 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä49277c32014-03-31 18:21:26 +03006187 intel_encoder->post_disable = vlv_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03006188 } else {
Jani Nikulaecff4f32013-09-06 07:38:29 +03006189 intel_encoder->pre_enable = g4x_pre_enable_dp;
6190 intel_encoder->enable = g4x_enable_dp;
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03006191 if (INTEL_INFO(dev)->gen >= 5)
6192 intel_encoder->post_disable = ilk_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03006193 }
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006194
Paulo Zanoni174edf12012-10-26 19:05:50 -02006195 intel_dig_port->port = port;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006196 intel_dig_port->dp.output_reg = output_reg;
6197
Paulo Zanoni00c09d72012-10-26 19:05:52 -02006198 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Ville Syrjälä882ec382014-04-28 14:07:43 +03006199 if (IS_CHERRYVIEW(dev)) {
6200 if (port == PORT_D)
6201 intel_encoder->crtc_mask = 1 << 2;
6202 else
6203 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6204 } else {
6205 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6206 }
Ville Syrjäläbc079e82014-03-03 16:15:28 +02006207 intel_encoder->cloneable = 0;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006208
Dave Airlie13cf5502014-06-18 11:29:35 +10006209 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
Jani Nikula5fcece82015-05-27 15:03:42 +03006210 dev_priv->hotplug.irq_port[port] = intel_dig_port;
Dave Airlie13cf5502014-06-18 11:29:35 +10006211
Paulo Zanoni15b1d172013-06-12 17:27:27 -03006212 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6213 drm_encoder_cleanup(encoder);
6214 kfree(intel_dig_port);
Paulo Zanonib2f246a2013-06-12 17:27:26 -03006215 kfree(intel_connector);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03006216 }
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006217}
Dave Airlie0e32b392014-05-02 14:02:48 +10006218
6219void intel_dp_mst_suspend(struct drm_device *dev)
6220{
6221 struct drm_i915_private *dev_priv = dev->dev_private;
6222 int i;
6223
6224 /* disable MST */
6225 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006226 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006227 if (!intel_dig_port)
6228 continue;
6229
6230 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6231 if (!intel_dig_port->dp.can_mst)
6232 continue;
6233 if (intel_dig_port->dp.is_mst)
6234 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6235 }
6236 }
6237}
6238
6239void intel_dp_mst_resume(struct drm_device *dev)
6240{
6241 struct drm_i915_private *dev_priv = dev->dev_private;
6242 int i;
6243
6244 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006245 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006246 if (!intel_dig_port)
6247 continue;
6248 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6249 int ret;
6250
6251 if (!intel_dig_port->dp.can_mst)
6252 continue;
6253
6254 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6255 if (ret != 0) {
6256 intel_dp_check_mst_status(&intel_dig_port->dp);
6257 }
6258 }
6259 }
6260}