blob: d4e4f7a1a11b33dfc23e98ae153abcf5bc8d8fe7 [file] [log] [blame]
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040030#include <linux/export.h>
Clint Taylor01527b32014-07-07 13:01:46 -070031#include <linux/notifier.h>
32#include <linux/reboot.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
Matt Roperc6f95f22015-01-22 16:50:32 -080034#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010035#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070038#include "intel_drv.h"
David Howells760285e2012-10-02 18:01:07 +010039#include <drm/i915_drm.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070040#include "i915_drv.h"
Keith Packarda4fc5ed2009-04-07 16:16:42 -070041
Keith Packarda4fc5ed2009-04-07 16:16:42 -070042#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
Todd Previte559be302015-05-04 07:48:20 -070044/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080050struct dp_link_dpll {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030051 int clock;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080052 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030056 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080057 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030058 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080059 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030063 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080064 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030065 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080066 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080069static const struct dp_link_dpll vlv_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030070 { 162000,
Chon Ming Lee58f6e632013-09-25 15:47:51 +080071 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030072 { 270000,
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080073 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
Chon Ming Leeef9348c2014-04-09 13:28:18 +030076/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
Ville Syrjälä840b32b2015-08-11 20:21:46 +030086 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030087 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030088 { 270000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030089 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030090 { 540000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030091 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
Sonika Jindal637a9c62015-05-07 09:52:08 +053093
Sonika Jindal64987fc2015-05-26 17:50:13 +053094static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
Sonika Jindal637a9c62015-05-07 09:52:08 +053096static const int skl_rates[] = { 162000, 216000, 270000,
Ville Syrjäläf4896f12015-03-12 17:10:27 +020097 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
Chon Ming Leeef9348c2014-04-09 13:28:18 +030099
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
Paulo Zanonida63a9f2012-10-26 19:05:46 -0200109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700112}
113
Imre Deak68b4d822013-05-08 13:14:06 +0300114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700115{
Imre Deak68b4d822013-05-08 13:14:06 +0300116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700119}
120
Chris Wilsondf0e9242010-09-09 16:20:55 +0100121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
Paulo Zanonifa90ece2012-10-26 19:05:44 -0200123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Chris Wilsondf0e9242010-09-09 16:20:55 +0100124}
125
Chris Wilsonea5b2132010-08-04 13:50:23 +0100126static void intel_dp_link_down(struct intel_dp *intel_dp);
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +0100128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
Ville Syrjälä093e3f12014-10-16 21:27:33 +0300129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700132
Ville Syrjäläe0fce782015-07-08 23:45:54 +0300133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
Ville Syrjäläed4e9c12015-03-12 17:10:36 +0200138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700140{
Jesse Barnes7183dc22011-07-07 11:10:58 -0700141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
Ville Syrjälä1db10e22015-03-12 17:10:32 +0200146 case DP_LINK_BW_5_4:
Imre Deakd4eead52013-07-09 17:05:26 +0300147 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700148 default:
Imre Deakd4eead52013-07-09 17:05:26 +0300149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
Paulo Zanonieeb63242014-05-06 14:56:50 +0300157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 struct drm_device *dev = intel_dig_port->base.base.dev;
161 u8 source_max, sink_max;
162
163 source_max = 4;
164 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 source_max = 2;
167
168 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170 return min(source_max, sink_max);
171}
172
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400173/*
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
176 *
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178 *
179 * 270000 * 1 * 8 / 10 == 216000
180 *
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
185 *
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
188 */
189
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700190static int
Keith Packardc8982612012-01-25 08:16:25 -0800191intel_dp_link_required(int pixel_clock, int bpp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700192{
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400193 return (pixel_clock * bpp + 9) / 10;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700194}
195
196static int
Dave Airliefe27d532010-06-30 11:46:17 +1000197intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198{
199 return (max_link_clock * max_lanes * 8) / 10;
200}
201
Damien Lespiauc19de8e2013-11-28 15:29:18 +0000202static enum drm_mode_status
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700203intel_dp_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
205{
Chris Wilsondf0e9242010-09-09 16:20:55 +0100206 struct intel_dp *intel_dp = intel_attached_dp(connector);
Jani Nikuladd06f902012-10-19 14:51:50 +0300207 struct intel_connector *intel_connector = to_intel_connector(connector);
208 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
Daniel Vetter36008362013-03-27 00:44:59 +0100209 int target_clock = mode->clock;
210 int max_rate, mode_rate, max_lanes, max_link_clock;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700211
Jani Nikuladd06f902012-10-19 14:51:50 +0300212 if (is_edp(intel_dp) && fixed_mode) {
213 if (mode->hdisplay > fixed_mode->hdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100214 return MODE_PANEL;
215
Jani Nikuladd06f902012-10-19 14:51:50 +0300216 if (mode->vdisplay > fixed_mode->vdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100217 return MODE_PANEL;
Daniel Vetter03afc4a2013-04-02 23:42:31 +0200218
219 target_clock = fixed_mode->clock;
Zhao Yakui7de56f42010-07-19 09:43:14 +0100220 }
221
Ville Syrjälä50fec212015-03-12 17:10:34 +0200222 max_link_clock = intel_dp_max_link_rate(intel_dp);
Paulo Zanonieeb63242014-05-06 14:56:50 +0300223 max_lanes = intel_dp_max_lane_count(intel_dp);
Daniel Vetter36008362013-03-27 00:44:59 +0100224
225 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 mode_rate = intel_dp_link_required(target_clock, 18);
227
228 if (mode_rate > max_rate)
Daniel Vetterc4867932012-04-10 10:42:36 +0200229 return MODE_CLOCK_HIGH;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700230
231 if (mode->clock < 10000)
232 return MODE_CLOCK_LOW;
233
Daniel Vetter0af78a22012-05-23 11:30:55 +0200234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 return MODE_H_ILLEGAL;
236
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700237 return MODE_OK;
238}
239
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800240uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700241{
242 int i;
243 uint32_t v = 0;
244
245 if (src_bytes > 4)
246 src_bytes = 4;
247 for (i = 0; i < src_bytes; i++)
248 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249 return v;
250}
251
Damien Lespiauc2af70e2015-02-10 19:32:23 +0000252static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700253{
254 int i;
255 if (dst_bytes > 4)
256 dst_bytes = 4;
257 for (i = 0; i < dst_bytes; i++)
258 dst[i] = src >> ((3-i) * 8);
259}
260
Jani Nikulabf13e812013-09-06 07:40:05 +0300261static void
262intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300263 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300264static void
265intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300266 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300267
Ville Syrjälä773538e82014-09-04 14:54:56 +0300268static void pps_lock(struct intel_dp *intel_dp)
269{
270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 struct intel_encoder *encoder = &intel_dig_port->base;
272 struct drm_device *dev = encoder->base.dev;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 enum intel_display_power_domain power_domain;
275
276 /*
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
279 */
280 power_domain = intel_display_port_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain);
282
283 mutex_lock(&dev_priv->pps_mutex);
284}
285
286static void pps_unlock(struct intel_dp *intel_dp)
287{
288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 struct intel_encoder *encoder = &intel_dig_port->base;
290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 enum intel_display_power_domain power_domain;
293
294 mutex_unlock(&dev_priv->pps_mutex);
295
296 power_domain = intel_display_port_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain);
298}
299
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300300static void
301vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302{
303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 struct drm_device *dev = intel_dig_port->base.base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum pipe pipe = intel_dp->pps_pipe;
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300310 uint32_t DP;
311
312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe), port_name(intel_dig_port->port)))
315 return;
316
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe), port_name(intel_dig_port->port));
319
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
322 */
323 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 DP |= DP_PORT_WIDTH(1);
326 DP |= DP_LINK_TRAIN_PAT_1;
327
328 if (IS_CHERRYVIEW(dev))
329 DP |= DP_PIPE_SELECT_CHV(pipe);
330 else if (pipe == PIPE_B)
331 DP |= DP_PIPEB_SELECT;
332
Ville Syrjäläd288f652014-10-28 13:20:22 +0200333 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335 /*
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
338 */
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300339 if (!pll_enabled) {
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
Ville Syrjäläd288f652014-10-28 13:20:22 +0200343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300345 }
Ville Syrjäläd288f652014-10-28 13:20:22 +0200346
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200361
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300362 if (!pll_enabled) {
Ville Syrjäläd288f652014-10-28 13:20:22 +0200363 vlv_force_pll_off(dev, pipe);
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300368}
369
Jani Nikulabf13e812013-09-06 07:40:05 +0300370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300378 enum pipe pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300379
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300380 lockdep_assert_held(&dev_priv->pps_mutex);
381
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300387
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393 base.head) {
394 struct intel_dp *tmp;
395
396 if (encoder->type != INTEL_OUTPUT_EDP)
397 continue;
398
399 tmp = enc_to_intel_dp(&encoder->base);
400
401 if (tmp->pps_pipe != INVALID_PIPE)
402 pipes &= ~(1 << tmp->pps_pipe);
403 }
404
405 /*
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
408 */
409 if (WARN_ON(pipes == 0))
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300410 pipe = PIPE_A;
411 else
412 pipe = ffs(pipes) - 1;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300413
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300414 vlv_steal_power_sequencer(dev, pipe);
415 intel_dp->pps_pipe = pipe;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300416
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp->pps_pipe),
419 port_name(intel_dig_port->port));
420
421 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300422 intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300424
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300425 /*
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
428 */
429 vlv_power_sequencer_kick(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300430
431 return intel_dp->pps_pipe;
432}
433
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300434typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 enum pipe pipe);
436
437static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439{
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441}
442
443static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445{
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447}
448
449static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450 enum pipe pipe)
451{
452 return true;
453}
454
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300455static enum pipe
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300456vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 enum port port,
458 vlv_pipe_check pipe_check)
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300459{
Jani Nikulabf13e812013-09-06 07:40:05 +0300460 enum pipe pipe;
461
Jani Nikulabf13e812013-09-06 07:40:05 +0300462 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 PANEL_PORT_SELECT_MASK;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300465
466 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 continue;
468
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300469 if (!pipe_check(dev_priv, pipe))
470 continue;
471
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300472 return pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300473 }
474
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300475 return INVALID_PIPE;
476}
477
478static void
479vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480{
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300484 enum port port = intel_dig_port->port;
485
486 lockdep_assert_held(&dev_priv->pps_mutex);
487
488 /* try to find a pipe with this port selected */
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300489 /* first pick one where the panel is on */
490 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 vlv_pipe_has_pp_on);
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp->pps_pipe == INVALID_PIPE)
494 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 vlv_pipe_has_vdd_on);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp->pps_pipe == INVALID_PIPE)
498 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 vlv_pipe_any);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300500
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp->pps_pipe == INVALID_PIPE) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504 port_name(port));
505 return;
506 }
507
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port), pipe_name(intel_dp->pps_pipe));
510
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300511 intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300513}
514
Ville Syrjälä773538e82014-09-04 14:54:56 +0300515void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516{
517 struct drm_device *dev = dev_priv->dev;
518 struct intel_encoder *encoder;
519
520 if (WARN_ON(!IS_VALLEYVIEW(dev)))
521 return;
522
523 /*
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
531 */
532
533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 struct intel_dp *intel_dp;
535
536 if (encoder->type != INTEL_OUTPUT_EDP)
537 continue;
538
539 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE;
541 }
Jani Nikulabf13e812013-09-06 07:40:05 +0300542}
543
544static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
545{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554}
555
556static u32 _pp_stat_reg(struct intel_dp *intel_dp)
557{
558 struct drm_device *dev = intel_dp_to_dev(intel_dp);
559
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530560 if (IS_BROXTON(dev))
561 return BXT_PP_STATUS(0);
562 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300563 return PCH_PP_STATUS;
564 else
565 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
566}
567
Clint Taylor01527b32014-07-07 13:01:46 -0700568/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569 This function only applicable when panel PM state is not to be tracked */
570static int edp_notify_handler(struct notifier_block *this, unsigned long code,
571 void *unused)
572{
573 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
574 edp_notifier);
575 struct drm_device *dev = intel_dp_to_dev(intel_dp);
576 struct drm_i915_private *dev_priv = dev->dev_private;
Clint Taylor01527b32014-07-07 13:01:46 -0700577
578 if (!is_edp(intel_dp) || code != SYS_RESTART)
579 return 0;
580
Ville Syrjälä773538e82014-09-04 14:54:56 +0300581 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300582
Clint Taylor01527b32014-07-07 13:01:46 -0700583 if (IS_VALLEYVIEW(dev)) {
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300584 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
Ville Syrjälä649636e2015-09-22 19:50:01 +0300585 u32 pp_ctrl_reg, pp_div_reg;
586 u32 pp_div;
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300587
Clint Taylor01527b32014-07-07 13:01:46 -0700588 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
589 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
590 pp_div = I915_READ(pp_div_reg);
591 pp_div &= PP_REFERENCE_DIVIDER_MASK;
592
593 /* 0x1F write to PP_DIV_REG sets max cycle delay */
594 I915_WRITE(pp_div_reg, pp_div | 0x1F);
595 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
596 msleep(intel_dp->panel_power_cycle_delay);
597 }
598
Ville Syrjälä773538e82014-09-04 14:54:56 +0300599 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300600
Clint Taylor01527b32014-07-07 13:01:46 -0700601 return 0;
602}
603
Daniel Vetter4be73782014-01-17 14:39:48 +0100604static bool edp_have_panel_power(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700605{
Paulo Zanoni30add222012-10-26 19:05:45 -0200606 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700607 struct drm_i915_private *dev_priv = dev->dev_private;
608
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300609 lockdep_assert_held(&dev_priv->pps_mutex);
610
Ville Syrjälä9a423562014-10-16 21:29:48 +0300611 if (IS_VALLEYVIEW(dev) &&
612 intel_dp->pps_pipe == INVALID_PIPE)
613 return false;
614
Jani Nikulabf13e812013-09-06 07:40:05 +0300615 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
Keith Packardebf33b12011-09-29 15:53:27 -0700616}
617
Daniel Vetter4be73782014-01-17 14:39:48 +0100618static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700619{
Paulo Zanoni30add222012-10-26 19:05:45 -0200620 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700621 struct drm_i915_private *dev_priv = dev->dev_private;
622
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300623 lockdep_assert_held(&dev_priv->pps_mutex);
624
Ville Syrjälä9a423562014-10-16 21:29:48 +0300625 if (IS_VALLEYVIEW(dev) &&
626 intel_dp->pps_pipe == INVALID_PIPE)
627 return false;
628
Ville Syrjälä773538e82014-09-04 14:54:56 +0300629 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -0700630}
631
Keith Packard9b984da2011-09-19 13:54:47 -0700632static void
633intel_dp_check_edp(struct intel_dp *intel_dp)
634{
Paulo Zanoni30add222012-10-26 19:05:45 -0200635 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard9b984da2011-09-19 13:54:47 -0700636 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packardebf33b12011-09-29 15:53:27 -0700637
Keith Packard9b984da2011-09-19 13:54:47 -0700638 if (!is_edp(intel_dp))
639 return;
Jesse Barnes453c5422013-03-28 09:55:41 -0700640
Daniel Vetter4be73782014-01-17 14:39:48 +0100641 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
Keith Packard9b984da2011-09-19 13:54:47 -0700642 WARN(1, "eDP powered off while attempting aux channel communication.\n");
643 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
Jani Nikulabf13e812013-09-06 07:40:05 +0300644 I915_READ(_pp_stat_reg(intel_dp)),
645 I915_READ(_pp_ctrl_reg(intel_dp)));
Keith Packard9b984da2011-09-19 13:54:47 -0700646 }
647}
648
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100649static uint32_t
650intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651{
652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653 struct drm_device *dev = intel_dig_port->base.base.dev;
654 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni9ed35ab2013-02-18 19:00:25 -0300655 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100656 uint32_t status;
657 bool done;
658
Daniel Vetteref04f002012-12-01 21:03:59 +0100659#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100660 if (has_aux_irq)
Paulo Zanonib18ac462013-02-18 19:00:24 -0300661 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
Imre Deak35987062013-05-21 20:03:20 +0300662 msecs_to_jiffies_timeout(10));
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100663 else
664 done = wait_for_atomic(C, 10) == 0;
665 if (!done)
666 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
667 has_aux_irq);
668#undef C
669
670 return status;
671}
672
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000673static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
674{
675 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676 struct drm_device *dev = intel_dig_port->base.base.dev;
677
678 /*
679 * The clock divider is based off the hrawclk, and would like to run at
680 * 2MHz. So, take the hrawclk value and divide by 2 and use that
681 */
682 return index ? 0 : intel_hrawclk(dev) / 2;
683}
684
685static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686{
687 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688 struct drm_device *dev = intel_dig_port->base.base.dev;
Ville Syrjälä469d4b22015-03-31 14:11:59 +0300689 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000690
691 if (index)
692 return 0;
693
694 if (intel_dig_port->port == PORT_A) {
Ville Syrjälä05024da2015-06-03 15:45:08 +0300695 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
696
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000697 } else {
698 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
699 }
700}
701
702static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300703{
704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705 struct drm_device *dev = intel_dig_port->base.base.dev;
706 struct drm_i915_private *dev_priv = dev->dev_private;
707
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000708 if (intel_dig_port->port == PORT_A) {
Chris Wilsonbc866252013-07-21 16:00:03 +0100709 if (index)
710 return 0;
Ville Syrjälä05024da2015-06-03 15:45:08 +0300711 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300712 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
713 /* Workaround for non-ULT HSW */
Chris Wilsonbc866252013-07-21 16:00:03 +0100714 switch (index) {
715 case 0: return 63;
716 case 1: return 72;
717 default: return 0;
718 }
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000719 } else {
Chris Wilsonbc866252013-07-21 16:00:03 +0100720 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300721 }
722}
723
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000724static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725{
726 return index ? 0 : 100;
727}
728
Damien Lespiaub6b5e382014-01-20 16:00:59 +0000729static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730{
731 /*
732 * SKL doesn't need us to program the AUX clock divider (Hardware will
733 * derive the clock from CDCLK automatically). We still implement the
734 * get_aux_clock_divider vfunc to plug-in into the existing code.
735 */
736 return index ? 0 : 1;
737}
738
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000739static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
740 bool has_aux_irq,
741 int send_bytes,
742 uint32_t aux_clock_divider)
743{
744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745 struct drm_device *dev = intel_dig_port->base.base.dev;
746 uint32_t precharge, timeout;
747
748 if (IS_GEN6(dev))
749 precharge = 3;
750 else
751 precharge = 5;
752
753 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
754 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755 else
756 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
757
758 return DP_AUX_CH_CTL_SEND_BUSY |
Damien Lespiau788d4432014-01-20 15:52:31 +0000759 DP_AUX_CH_CTL_DONE |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000760 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000761 DP_AUX_CH_CTL_TIME_OUT_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000762 timeout |
Damien Lespiau788d4432014-01-20 15:52:31 +0000763 DP_AUX_CH_CTL_RECEIVE_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000764 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000766 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000767}
768
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +0000769static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770 bool has_aux_irq,
771 int send_bytes,
772 uint32_t unused)
773{
774 return DP_AUX_CH_CTL_SEND_BUSY |
775 DP_AUX_CH_CTL_DONE |
776 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_TIME_OUT_1600us |
779 DP_AUX_CH_CTL_RECEIVE_ERROR |
780 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
782}
783
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700784static int
Chris Wilsonea5b2132010-08-04 13:50:23 +0100785intel_dp_aux_ch(struct intel_dp *intel_dp,
Daniel Vetterbd9f74a2014-10-02 09:45:35 +0200786 const uint8_t *send, int send_bytes,
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700787 uint8_t *recv, int recv_size)
788{
Paulo Zanoni174edf12012-10-26 19:05:50 -0200789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700791 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni9ed35ab2013-02-18 19:00:25 -0300792 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700793 uint32_t ch_data = ch_ctl + 4;
Chris Wilsonbc866252013-07-21 16:00:03 +0100794 uint32_t aux_clock_divider;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100795 int i, ret, recv_bytes;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700796 uint32_t status;
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000797 int try, clock = 0;
Daniel Vetter4e6b7882014-02-07 16:33:20 +0100798 bool has_aux_irq = HAS_AUX_IRQ(dev);
Jani Nikula884f19e2014-03-14 16:51:14 +0200799 bool vdd;
800
Ville Syrjälä773538e82014-09-04 14:54:56 +0300801 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300802
Ville Syrjälä72c35002014-08-18 22:16:00 +0300803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300809 vdd = edp_panel_vdd_on(intel_dp);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700816
Keith Packard9b984da2011-09-19 13:54:47 -0700817 intel_dp_check_edp(intel_dp);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +0800818
Paulo Zanonic67a4702013-08-19 13:18:09 -0300819 intel_aux_display_runtime_get(dev_priv);
820
Jesse Barnes11bee432011-08-01 15:02:20 -0700821 /* Try to wait for any previous AUX channel activity */
822 for (try = 0; try < 3; try++) {
Daniel Vetteref04f002012-12-01 21:03:59 +0100823 status = I915_READ_NOTRACE(ch_ctl);
Jesse Barnes11bee432011-08-01 15:02:20 -0700824 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
825 break;
826 msleep(1);
827 }
828
829 if (try == 3) {
Mika Kuoppala02196c72015-08-06 16:48:58 +0300830 static u32 last_status = -1;
831 const u32 status = I915_READ(ch_ctl);
832
833 if (status != last_status) {
834 WARN(1, "dp_aux_ch not started status 0x%08x\n",
835 status);
836 last_status = status;
837 }
838
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100839 ret = -EBUSY;
840 goto out;
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100841 }
842
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300843 /* Only 5 data registers! */
844 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
845 ret = -E2BIG;
846 goto out;
847 }
848
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000849 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
Damien Lespiau153b1102014-01-21 13:37:15 +0000850 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
851 has_aux_irq,
852 send_bytes,
853 aux_clock_divider);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000854
Chris Wilsonbc866252013-07-21 16:00:03 +0100855 /* Must try at least 3 times according to DP spec */
856 for (try = 0; try < 5; try++) {
857 /* Load the send data into the aux channel data registers */
858 for (i = 0; i < send_bytes; i += 4)
859 I915_WRITE(ch_data + i,
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800860 intel_dp_pack_aux(send + i,
861 send_bytes - i));
Akshay Joshi0206e352011-08-16 15:34:10 -0400862
Chris Wilsonbc866252013-07-21 16:00:03 +0100863 /* Send the command and wait for it to complete */
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000864 I915_WRITE(ch_ctl, send_ctl);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100865
Chris Wilsonbc866252013-07-21 16:00:03 +0100866 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
Akshay Joshi0206e352011-08-16 15:34:10 -0400867
Chris Wilsonbc866252013-07-21 16:00:03 +0100868 /* Clear done status and any errors */
869 I915_WRITE(ch_ctl,
870 status |
871 DP_AUX_CH_CTL_DONE |
872 DP_AUX_CH_CTL_TIME_OUT_ERROR |
873 DP_AUX_CH_CTL_RECEIVE_ERROR);
Adam Jacksond7e96fe2011-07-26 15:39:46 -0400874
Todd Previte74ebf292015-04-15 08:38:41 -0700875 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
Chris Wilsonbc866252013-07-21 16:00:03 +0100876 continue;
Todd Previte74ebf292015-04-15 08:38:41 -0700877
878 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
879 * 400us delay required for errors and timeouts
880 * Timeout errors from the HW already meet this
881 * requirement so skip to next iteration
882 */
883 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
884 usleep_range(400, 500);
885 continue;
886 }
Chris Wilsonbc866252013-07-21 16:00:03 +0100887 if (status & DP_AUX_CH_CTL_DONE)
Jim Bridee058c942015-05-27 10:21:48 -0700888 goto done;
Chris Wilsonbc866252013-07-21 16:00:03 +0100889 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700890 }
891
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700892 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700893 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100894 ret = -EBUSY;
895 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700896 }
897
Jim Bridee058c942015-05-27 10:21:48 -0700898done:
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700899 /* Check for timeout or receive error.
900 * Timeouts occur when the sink is not connected
901 */
Keith Packarda5b3da52009-06-11 22:30:32 -0700902 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700903 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100904 ret = -EIO;
905 goto out;
Keith Packarda5b3da52009-06-11 22:30:32 -0700906 }
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700907
908 /* Timeouts occur when the device isn't connected, so they're
909 * "normal" -- don't fill the kernel log with these */
Keith Packarda5b3da52009-06-11 22:30:32 -0700910 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
Zhao Yakui28c97732009-10-09 11:39:41 +0800911 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100912 ret = -ETIMEDOUT;
913 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700914 }
915
916 /* Unload any bytes sent back from the other side */
917 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
918 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700919 if (recv_bytes > recv_size)
920 recv_bytes = recv_size;
Akshay Joshi0206e352011-08-16 15:34:10 -0400921
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100922 for (i = 0; i < recv_bytes; i += 4)
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800923 intel_dp_unpack_aux(I915_READ(ch_data + i),
924 recv + i, recv_bytes - i);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700925
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100926 ret = recv_bytes;
927out:
928 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
Paulo Zanonic67a4702013-08-19 13:18:09 -0300929 intel_aux_display_runtime_put(dev_priv);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100930
Jani Nikula884f19e2014-03-14 16:51:14 +0200931 if (vdd)
932 edp_panel_vdd_off(intel_dp, false);
933
Ville Syrjälä773538e82014-09-04 14:54:56 +0300934 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300935
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100936 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700937}
938
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300939#define BARE_ADDRESS_SIZE 3
940#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
Jani Nikula9d1a1032014-03-14 16:51:15 +0200941static ssize_t
942intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700943{
Jani Nikula9d1a1032014-03-14 16:51:15 +0200944 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
945 uint8_t txbuf[20], rxbuf[20];
946 size_t txsize, rxsize;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700947 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700948
Ville Syrjäläd2d9cbb2015-03-19 11:44:06 +0200949 txbuf[0] = (msg->request << 4) |
950 ((msg->address >> 16) & 0xf);
951 txbuf[1] = (msg->address >> 8) & 0xff;
Jani Nikula9d1a1032014-03-14 16:51:15 +0200952 txbuf[2] = msg->address & 0xff;
953 txbuf[3] = msg->size - 1;
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300954
Jani Nikula9d1a1032014-03-14 16:51:15 +0200955 switch (msg->request & ~DP_AUX_I2C_MOT) {
956 case DP_AUX_NATIVE_WRITE:
957 case DP_AUX_I2C_WRITE:
Ville Syrjäläc1e741222015-08-27 17:23:27 +0300958 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300959 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200960 rxsize = 2; /* 0 or 1 data bytes */
Jani Nikulaf51a44b2014-02-11 11:52:05 +0200961
Jani Nikula9d1a1032014-03-14 16:51:15 +0200962 if (WARN_ON(txsize > 20))
963 return -E2BIG;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700964
Jani Nikula9d1a1032014-03-14 16:51:15 +0200965 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700966
Jani Nikula9d1a1032014-03-14 16:51:15 +0200967 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
968 if (ret > 0) {
969 msg->reply = rxbuf[0] >> 4;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700970
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200971 if (ret > 1) {
972 /* Number of bytes written in a short write. */
973 ret = clamp_t(int, rxbuf[1], 0, msg->size);
974 } else {
975 /* Return payload size. */
976 ret = msg->size;
977 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700978 }
Jani Nikula9d1a1032014-03-14 16:51:15 +0200979 break;
980
981 case DP_AUX_NATIVE_READ:
982 case DP_AUX_I2C_READ:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300983 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
Jani Nikula9d1a1032014-03-14 16:51:15 +0200984 rxsize = msg->size + 1;
985
986 if (WARN_ON(rxsize > 20))
987 return -E2BIG;
988
989 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
990 if (ret > 0) {
991 msg->reply = rxbuf[0] >> 4;
992 /*
993 * Assume happy day, and copy the data. The caller is
994 * expected to check msg->reply before touching it.
995 *
996 * Return payload size.
997 */
998 ret--;
999 memcpy(msg->buffer, rxbuf + 1, ret);
1000 }
1001 break;
1002
1003 default:
1004 ret = -EINVAL;
1005 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001006 }
Jani Nikulaf51a44b2014-02-11 11:52:05 +02001007
Jani Nikula9d1a1032014-03-14 16:51:15 +02001008 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001009}
1010
Jani Nikula9d1a1032014-03-14 16:51:15 +02001011static void
1012intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001013{
Jani Nikula9d1a1032014-03-14 16:51:15 +02001014 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001015 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikula33ad6622014-03-14 16:51:16 +02001016 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1017 enum port port = intel_dig_port->port;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001018 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
Jani Nikula0b998362014-03-14 16:51:17 +02001019 const char *name = NULL;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001020 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
Dave Airlieab2c0672009-12-04 10:55:24 +10001021 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001022
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001023 /* On SKL we don't have Aux for port E so we rely on VBT to set
1024 * a proper alternate aux channel.
1025 */
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001026 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && port == PORT_E) {
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001027 switch (info->alternate_aux_channel) {
1028 case DP_AUX_B:
1029 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1030 break;
1031 case DP_AUX_C:
1032 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1033 break;
1034 case DP_AUX_D:
1035 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1036 break;
1037 case DP_AUX_A:
1038 default:
1039 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1040 }
1041 }
1042
Jani Nikula33ad6622014-03-14 16:51:16 +02001043 switch (port) {
1044 case PORT_A:
1045 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001046 name = "DPDDC-A";
Dave Airlieab2c0672009-12-04 10:55:24 +10001047 break;
Jani Nikula33ad6622014-03-14 16:51:16 +02001048 case PORT_B:
1049 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001050 name = "DPDDC-B";
Jani Nikula33ad6622014-03-14 16:51:16 +02001051 break;
1052 case PORT_C:
1053 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001054 name = "DPDDC-C";
Jani Nikula33ad6622014-03-14 16:51:16 +02001055 break;
1056 case PORT_D:
1057 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001058 name = "DPDDC-D";
Dave Airlieab2c0672009-12-04 10:55:24 +10001059 break;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001060 case PORT_E:
1061 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1062 name = "DPDDC-E";
1063 break;
Dave Airlieab2c0672009-12-04 10:55:24 +10001064 default:
Jani Nikula33ad6622014-03-14 16:51:16 +02001065 BUG();
Dave Airlieab2c0672009-12-04 10:55:24 +10001066 }
1067
Damien Lespiau1b1aad72013-12-03 13:56:29 +00001068 /*
1069 * The AUX_CTL register is usually DP_CTL + 0x10.
1070 *
1071 * On Haswell and Broadwell though:
1072 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1073 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1074 *
1075 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1076 */
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001077 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
Jani Nikula33ad6622014-03-14 16:51:16 +02001078 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
David Flynn8316f332010-12-08 16:10:21 +00001079
Jani Nikula0b998362014-03-14 16:51:17 +02001080 intel_dp->aux.name = name;
Jani Nikula9d1a1032014-03-14 16:51:15 +02001081 intel_dp->aux.dev = dev->dev;
1082 intel_dp->aux.transfer = intel_dp_aux_transfer;
David Flynn8316f332010-12-08 16:10:21 +00001083
Jani Nikula0b998362014-03-14 16:51:17 +02001084 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1085 connector->base.kdev->kobj.name);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001086
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001087 ret = drm_dp_aux_register(&intel_dp->aux);
Jani Nikula0b998362014-03-14 16:51:17 +02001088 if (ret < 0) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001089 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
Jani Nikula0b998362014-03-14 16:51:17 +02001090 name, ret);
1091 return;
Dave Airlieab2c0672009-12-04 10:55:24 +10001092 }
David Flynn8316f332010-12-08 16:10:21 +00001093
Jani Nikula0b998362014-03-14 16:51:17 +02001094 ret = sysfs_create_link(&connector->base.kdev->kobj,
1095 &intel_dp->aux.ddc.dev.kobj,
1096 intel_dp->aux.ddc.dev.kobj.name);
1097 if (ret < 0) {
1098 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001099 drm_dp_aux_unregister(&intel_dp->aux);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001100 }
1101}
1102
Imre Deak80f65de2014-02-11 17:12:49 +02001103static void
1104intel_dp_connector_unregister(struct intel_connector *intel_connector)
1105{
1106 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1107
Dave Airlie0e32b392014-05-02 14:02:48 +10001108 if (!intel_connector->mst_port)
1109 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1110 intel_dp->aux.ddc.dev.kobj.name);
Imre Deak80f65de2014-02-11 17:12:49 +02001111 intel_connector_unregister(intel_connector);
1112}
1113
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001114static void
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001115skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
Damien Lespiau5416d872014-11-14 17:24:33 +00001116{
1117 u32 ctrl1;
1118
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03001119 memset(&pipe_config->dpll_hw_state, 0,
1120 sizeof(pipe_config->dpll_hw_state));
1121
Damien Lespiau5416d872014-11-14 17:24:33 +00001122 pipe_config->ddi_pll_sel = SKL_DPLL0;
1123 pipe_config->dpll_hw_state.cfgcr1 = 0;
1124 pipe_config->dpll_hw_state.cfgcr2 = 0;
1125
1126 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001127 switch (pipe_config->port_clock / 2) {
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301128 case 81000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001129 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
Damien Lespiau5416d872014-11-14 17:24:33 +00001130 SKL_DPLL0);
1131 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301132 case 135000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001133 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
Damien Lespiau5416d872014-11-14 17:24:33 +00001134 SKL_DPLL0);
1135 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301136 case 270000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001137 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
Damien Lespiau5416d872014-11-14 17:24:33 +00001138 SKL_DPLL0);
1139 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301140 case 162000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001141 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301142 SKL_DPLL0);
1143 break;
1144 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1145 results in CDCLK change. Need to handle the change of CDCLK by
1146 disabling pipes and re-enabling them */
1147 case 108000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001148 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301149 SKL_DPLL0);
1150 break;
1151 case 216000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001152 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301153 SKL_DPLL0);
1154 break;
1155
Damien Lespiau5416d872014-11-14 17:24:33 +00001156 }
1157 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1158}
1159
Ander Conselvan de Oliveira6fa2d192015-08-31 11:23:28 +03001160void
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001161hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
Daniel Vetter0e503382014-07-04 11:26:04 -03001162{
Ander Conselvan de Oliveiraee46f3c72015-06-30 16:10:38 +03001163 memset(&pipe_config->dpll_hw_state, 0,
1164 sizeof(pipe_config->dpll_hw_state));
1165
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001166 switch (pipe_config->port_clock / 2) {
1167 case 81000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001168 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1169 break;
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001170 case 135000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001171 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1172 break;
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001173 case 270000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001174 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1175 break;
1176 }
1177}
1178
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301179static int
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001180intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301181{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001182 if (intel_dp->num_sink_rates) {
1183 *sink_rates = intel_dp->sink_rates;
1184 return intel_dp->num_sink_rates;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301185 }
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001186
1187 *sink_rates = default_rates;
1188
1189 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301190}
1191
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001192bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301193{
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001194 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1195 struct drm_device *dev = dig_port->base.base.dev;
1196
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301197 /* WaDisableHBR2:skl */
Jani Nikulae87a0052015-10-20 15:22:02 +03001198 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301199 return false;
1200
1201 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1202 (INTEL_INFO(dev)->gen >= 9))
1203 return true;
1204 else
1205 return false;
1206}
1207
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301208static int
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001209intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301210{
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001211 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1212 struct drm_device *dev = dig_port->base.base.dev;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301213 int size;
1214
Sonika Jindal64987fc2015-05-26 17:50:13 +05301215 if (IS_BROXTON(dev)) {
1216 *source_rates = bxt_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301217 size = ARRAY_SIZE(bxt_rates);
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001218 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
Sonika Jindal637a9c62015-05-07 09:52:08 +05301219 *source_rates = skl_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301220 size = ARRAY_SIZE(skl_rates);
1221 } else {
1222 *source_rates = default_rates;
1223 size = ARRAY_SIZE(default_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301224 }
Ville Syrjälä636280b2015-03-12 17:10:29 +02001225
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301226 /* This depends on the fact that 5.4 is last value in the array */
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001227 if (!intel_dp_source_supports_hbr2(intel_dp))
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301228 size--;
Ville Syrjälä636280b2015-03-12 17:10:29 +02001229
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301230 return size;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301231}
1232
Daniel Vetter0e503382014-07-04 11:26:04 -03001233static void
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001234intel_dp_set_clock(struct intel_encoder *encoder,
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001235 struct intel_crtc_state *pipe_config)
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001236{
1237 struct drm_device *dev = encoder->base.dev;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001238 const struct dp_link_dpll *divisor = NULL;
1239 int i, count = 0;
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001240
1241 if (IS_G4X(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001242 divisor = gen4_dpll;
1243 count = ARRAY_SIZE(gen4_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001244 } else if (HAS_PCH_SPLIT(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001245 divisor = pch_dpll;
1246 count = ARRAY_SIZE(pch_dpll);
Chon Ming Leeef9348c2014-04-09 13:28:18 +03001247 } else if (IS_CHERRYVIEW(dev)) {
1248 divisor = chv_dpll;
1249 count = ARRAY_SIZE(chv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001250 } else if (IS_VALLEYVIEW(dev)) {
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +08001251 divisor = vlv_dpll;
1252 count = ARRAY_SIZE(vlv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001253 }
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001254
1255 if (divisor && count) {
1256 for (i = 0; i < count; i++) {
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001257 if (pipe_config->port_clock == divisor[i].clock) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001258 pipe_config->dpll = divisor[i].dpll;
1259 pipe_config->clock_set = true;
1260 break;
1261 }
1262 }
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001263 }
1264}
1265
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001266static int intersect_rates(const int *source_rates, int source_len,
1267 const int *sink_rates, int sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001268 int *common_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301269{
1270 int i = 0, j = 0, k = 0;
1271
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301272 while (i < source_len && j < sink_len) {
1273 if (source_rates[i] == sink_rates[j]) {
Ville Syrjäläe6bda3e2015-03-12 17:10:37 +02001274 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1275 return k;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001276 common_rates[k] = source_rates[i];
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301277 ++k;
1278 ++i;
1279 ++j;
1280 } else if (source_rates[i] < sink_rates[j]) {
1281 ++i;
1282 } else {
1283 ++j;
1284 }
1285 }
1286 return k;
1287}
1288
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001289static int intel_dp_common_rates(struct intel_dp *intel_dp,
1290 int *common_rates)
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001291{
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001292 const int *source_rates, *sink_rates;
1293 int source_len, sink_len;
1294
1295 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001296 source_len = intel_dp_source_rates(intel_dp, &source_rates);
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001297
1298 return intersect_rates(source_rates, source_len,
1299 sink_rates, sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001300 common_rates);
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001301}
1302
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001303static void snprintf_int_array(char *str, size_t len,
1304 const int *array, int nelem)
1305{
1306 int i;
1307
1308 str[0] = '\0';
1309
1310 for (i = 0; i < nelem; i++) {
Jani Nikulab2f505b2015-05-18 16:01:45 +03001311 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001312 if (r >= len)
1313 return;
1314 str += r;
1315 len -= r;
1316 }
1317}
1318
1319static void intel_dp_print_rates(struct intel_dp *intel_dp)
1320{
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001321 const int *source_rates, *sink_rates;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001322 int source_len, sink_len, common_len;
1323 int common_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001324 char str[128]; /* FIXME: too big for stack? */
1325
1326 if ((drm_debug & DRM_UT_KMS) == 0)
1327 return;
1328
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001329 source_len = intel_dp_source_rates(intel_dp, &source_rates);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001330 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1331 DRM_DEBUG_KMS("source rates: %s\n", str);
1332
1333 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1334 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1335 DRM_DEBUG_KMS("sink rates: %s\n", str);
1336
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001337 common_len = intel_dp_common_rates(intel_dp, common_rates);
1338 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1339 DRM_DEBUG_KMS("common rates: %s\n", str);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001340}
1341
Ville Syrjäläf4896f12015-03-12 17:10:27 +02001342static int rate_to_index(int find, const int *rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301343{
1344 int i = 0;
1345
1346 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1347 if (find == rates[i])
1348 break;
1349
1350 return i;
1351}
1352
Ville Syrjälä50fec212015-03-12 17:10:34 +02001353int
1354intel_dp_max_link_rate(struct intel_dp *intel_dp)
1355{
1356 int rates[DP_MAX_SUPPORTED_RATES] = {};
1357 int len;
1358
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001359 len = intel_dp_common_rates(intel_dp, rates);
Ville Syrjälä50fec212015-03-12 17:10:34 +02001360 if (WARN_ON(len <= 0))
1361 return 162000;
1362
1363 return rates[rate_to_index(0, rates) - 1];
1364}
1365
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001366int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1367{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001368 return rate_to_index(rate, intel_dp->sink_rates);
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001369}
1370
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03001371void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1372 uint8_t *link_bw, uint8_t *rate_select)
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001373{
1374 if (intel_dp->num_sink_rates) {
1375 *link_bw = 0;
1376 *rate_select =
1377 intel_dp_rate_select(intel_dp, port_clock);
1378 } else {
1379 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1380 *rate_select = 0;
1381 }
1382}
1383
Paulo Zanoni00c09d72012-10-26 19:05:52 -02001384bool
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001385intel_dp_compute_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02001386 struct intel_crtc_state *pipe_config)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001387{
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001388 struct drm_device *dev = encoder->base.dev;
Daniel Vetter36008362013-03-27 00:44:59 +01001389 struct drm_i915_private *dev_priv = dev->dev_private;
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02001390 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001391 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001392 enum port port = dp_to_dig_port(intel_dp)->port;
Ander Conselvan de Oliveira84556d52015-03-20 16:18:10 +02001393 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
Jani Nikuladd06f902012-10-19 14:51:50 +03001394 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001395 int lane_count, clock;
Jani Nikula56071a22014-05-06 14:56:52 +03001396 int min_lane_count = 1;
Paulo Zanonieeb63242014-05-06 14:56:50 +03001397 int max_lane_count = intel_dp_max_lane_count(intel_dp);
Todd Previte06ea66b2014-01-20 10:19:39 -07001398 /* Conveniently, the link BW constants become indices with a shift...*/
Jani Nikula56071a22014-05-06 14:56:52 +03001399 int min_clock = 0;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301400 int max_clock;
Daniel Vetter083f9562012-04-20 20:23:49 +02001401 int bpp, mode_rate;
Daniel Vetterff9a6752013-06-01 17:16:21 +02001402 int link_avail, link_clock;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001403 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1404 int common_len;
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001405 uint8_t link_bw, rate_select;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301406
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001407 common_len = intel_dp_common_rates(intel_dp, common_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301408
1409 /* No common link rates between source and sink */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001410 WARN_ON(common_len <= 0);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301411
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001412 max_clock = common_len - 1;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001413
Imre Deakbc7d38a2013-05-16 14:40:36 +03001414 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001415 pipe_config->has_pch_encoder = true;
1416
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001417 pipe_config->has_dp_encoder = true;
Vandana Kannanf769cd22014-08-05 07:51:22 -07001418 pipe_config->has_drrs = false;
Jani Nikula9fcb1702015-05-05 16:32:12 +03001419 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001420
Jani Nikuladd06f902012-10-19 14:51:50 +03001421 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1422 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1423 adjusted_mode);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001424
1425 if (INTEL_INFO(dev)->gen >= 9) {
1426 int ret;
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02001427 ret = skl_update_scaler_crtc(pipe_config);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001428 if (ret)
1429 return ret;
1430 }
1431
Matt Roperb56676272015-11-04 09:05:27 -08001432 if (HAS_GMCH_DISPLAY(dev))
Jesse Barnes2dd24552013-04-25 12:55:01 -07001433 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1434 intel_connector->panel.fitting_mode);
1435 else
Jesse Barnesb074cec2013-04-25 12:55:02 -07001436 intel_pch_panel_fitting(intel_crtc, pipe_config,
1437 intel_connector->panel.fitting_mode);
Zhao Yakui0d3a1be2010-07-19 09:43:13 +01001438 }
1439
Daniel Vettercb1793c2012-06-04 18:39:21 +02001440 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
Daniel Vetter0af78a22012-05-23 11:30:55 +02001441 return false;
1442
Daniel Vetter083f9562012-04-20 20:23:49 +02001443 DRM_DEBUG_KMS("DP link computation with max lane count %i "
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301444 "max bw %d pixel clock %iKHz\n",
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001445 max_lane_count, common_rates[max_clock],
Damien Lespiau241bfc32013-09-25 16:45:37 +01001446 adjusted_mode->crtc_clock);
Daniel Vetter083f9562012-04-20 20:23:49 +02001447
Daniel Vetter36008362013-03-27 00:44:59 +01001448 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1449 * bpc in between. */
Daniel Vetter3e7ca982013-06-01 19:45:56 +02001450 bpp = pipe_config->pipe_bpp;
Jani Nikula56071a22014-05-06 14:56:52 +03001451 if (is_edp(intel_dp)) {
Thulasimani,Sivakumar22ce5622015-07-31 11:05:27 +05301452
1453 /* Get bpp from vbt only for panels that dont have bpp in edid */
1454 if (intel_connector->base.display_info.bpc == 0 &&
1455 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
Jani Nikula56071a22014-05-06 14:56:52 +03001456 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1457 dev_priv->vbt.edp_bpp);
1458 bpp = dev_priv->vbt.edp_bpp;
1459 }
1460
Jani Nikula344c5bb2014-09-09 11:25:13 +03001461 /*
1462 * Use the maximum clock and number of lanes the eDP panel
1463 * advertizes being capable of. The panels are generally
1464 * designed to support only a single clock and lane
1465 * configuration, and typically these values correspond to the
1466 * native resolution of the panel.
1467 */
1468 min_lane_count = max_lane_count;
1469 min_clock = max_clock;
Imre Deak79842112013-07-18 17:44:13 +03001470 }
Daniel Vetter657445f2013-05-04 10:09:18 +02001471
Daniel Vetter36008362013-03-27 00:44:59 +01001472 for (; bpp >= 6*3; bpp -= 2*3) {
Damien Lespiau241bfc32013-09-25 16:45:37 +01001473 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1474 bpp);
Daniel Vetterc4867932012-04-10 10:42:36 +02001475
Dave Airliec6930992014-07-14 11:04:39 +10001476 for (clock = min_clock; clock <= max_clock; clock++) {
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301477 for (lane_count = min_lane_count;
1478 lane_count <= max_lane_count;
1479 lane_count <<= 1) {
1480
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001481 link_clock = common_rates[clock];
Daniel Vetter36008362013-03-27 00:44:59 +01001482 link_avail = intel_dp_max_data_rate(link_clock,
1483 lane_count);
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001484
Daniel Vetter36008362013-03-27 00:44:59 +01001485 if (mode_rate <= link_avail) {
1486 goto found;
1487 }
1488 }
1489 }
1490 }
1491
1492 return false;
1493
1494found:
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001495 if (intel_dp->color_range_auto) {
1496 /*
1497 * See:
1498 * CEA-861-E - 5.1 Default Encoding Parameters
1499 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1500 */
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001501 pipe_config->limited_color_range =
1502 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1503 } else {
1504 pipe_config->limited_color_range =
1505 intel_dp->limited_color_range;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001506 }
1507
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03001508 pipe_config->lane_count = lane_count;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301509
Daniel Vetter657445f2013-05-04 10:09:18 +02001510 pipe_config->pipe_bpp = bpp;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001511 pipe_config->port_clock = common_rates[clock];
Daniel Vetterc4867932012-04-10 10:42:36 +02001512
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001513 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1514 &link_bw, &rate_select);
1515
1516 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1517 link_bw, rate_select, pipe_config->lane_count,
Daniel Vetterff9a6752013-06-01 17:16:21 +02001518 pipe_config->port_clock, bpp);
Daniel Vetter36008362013-03-27 00:44:59 +01001519 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1520 mode_rate, link_avail);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001521
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001522 intel_link_compute_m_n(bpp, lane_count,
Damien Lespiau241bfc32013-09-25 16:45:37 +01001523 adjusted_mode->crtc_clock,
1524 pipe_config->port_clock,
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001525 &pipe_config->dp_m_n);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001526
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301527 if (intel_connector->panel.downclock_mode != NULL &&
Vandana Kannan96178ee2015-01-10 02:25:56 +05301528 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
Vandana Kannanf769cd22014-08-05 07:51:22 -07001529 pipe_config->has_drrs = true;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301530 intel_link_compute_m_n(bpp, lane_count,
1531 intel_connector->panel.downclock_mode->clock,
1532 pipe_config->port_clock,
1533 &pipe_config->dp_m2_n2);
1534 }
1535
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001536 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001537 skl_edp_set_pll_config(pipe_config);
Satheeshakrishna M977bb382014-08-22 09:49:12 +05301538 else if (IS_BROXTON(dev))
1539 /* handled in ddi */;
Damien Lespiau5416d872014-11-14 17:24:33 +00001540 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001541 hsw_dp_set_ddi_pll_sel(pipe_config);
Daniel Vetter0e503382014-07-04 11:26:04 -03001542 else
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001543 intel_dp_set_clock(encoder, pipe_config);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001544
Daniel Vetter36008362013-03-27 00:44:59 +01001545 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001546}
1547
Daniel Vetter7c62a162013-06-01 17:16:20 +02001548static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
Daniel Vetterea9b6002012-11-29 15:59:31 +01001549{
Daniel Vetter7c62a162013-06-01 17:16:20 +02001550 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1551 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1552 struct drm_device *dev = crtc->base.dev;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001553 struct drm_i915_private *dev_priv = dev->dev_private;
1554 u32 dpa_ctl;
1555
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001556 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1557 crtc->config->port_clock);
Daniel Vetterea9b6002012-11-29 15:59:31 +01001558 dpa_ctl = I915_READ(DP_A);
1559 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1560
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001561 if (crtc->config->port_clock == 162000) {
Daniel Vetter1ce17032012-11-29 15:59:32 +01001562 /* For a long time we've carried around a ILK-DevA w/a for the
1563 * 160MHz clock. If we're really unlucky, it's still required.
1564 */
1565 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
Daniel Vetterea9b6002012-11-29 15:59:31 +01001566 dpa_ctl |= DP_PLL_FREQ_160MHZ;
Daniel Vetter7c62a162013-06-01 17:16:20 +02001567 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001568 } else {
1569 dpa_ctl |= DP_PLL_FREQ_270MHZ;
Daniel Vetter7c62a162013-06-01 17:16:20 +02001570 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001571 }
Daniel Vetter1ce17032012-11-29 15:59:32 +01001572
Daniel Vetterea9b6002012-11-29 15:59:31 +01001573 I915_WRITE(DP_A, dpa_ctl);
1574
1575 POSTING_READ(DP_A);
1576 udelay(500);
1577}
1578
Ville Syrjälä901c2da2015-08-17 18:05:12 +03001579void intel_dp_set_link_params(struct intel_dp *intel_dp,
1580 const struct intel_crtc_state *pipe_config)
1581{
1582 intel_dp->link_rate = pipe_config->port_clock;
1583 intel_dp->lane_count = pipe_config->lane_count;
1584}
1585
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02001586static void intel_dp_prepare(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001587{
Daniel Vetterb934223d2013-07-21 21:37:05 +02001588 struct drm_device *dev = encoder->base.dev;
Keith Packard417e8222011-11-01 19:54:11 -07001589 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001590 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001591 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001592 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03001593 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001594
Ville Syrjälä901c2da2015-08-17 18:05:12 +03001595 intel_dp_set_link_params(intel_dp, crtc->config);
1596
Keith Packard417e8222011-11-01 19:54:11 -07001597 /*
Keith Packard1a2eb462011-11-16 16:26:07 -08001598 * There are four kinds of DP registers:
Keith Packard417e8222011-11-01 19:54:11 -07001599 *
1600 * IBX PCH
Keith Packard1a2eb462011-11-16 16:26:07 -08001601 * SNB CPU
1602 * IVB CPU
Keith Packard417e8222011-11-01 19:54:11 -07001603 * CPT PCH
1604 *
1605 * IBX PCH and CPU are the same for almost everything,
1606 * except that the CPU DP PLL is configured in this
1607 * register
1608 *
1609 * CPT PCH is quite different, having many bits moved
1610 * to the TRANS_DP_CTL register instead. That
1611 * configuration happens (oddly) in ironlake_pch_enable
1612 */
Adam Jackson9c9e7922010-04-05 17:57:59 -04001613
Keith Packard417e8222011-11-01 19:54:11 -07001614 /* Preserve the BIOS-computed detected bit. This is
1615 * supposed to be read-only.
1616 */
1617 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001618
Keith Packard417e8222011-11-01 19:54:11 -07001619 /* Handle DP bits in common between all three register formats */
Keith Packard417e8222011-11-01 19:54:11 -07001620 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03001621 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001622
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001623 if (crtc->config->has_audio)
Chris Wilsonea5b2132010-08-04 13:50:23 +01001624 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
Paulo Zanoni247d89f2012-10-15 15:51:33 -03001625
Keith Packard417e8222011-11-01 19:54:11 -07001626 /* Split out the IBX/CPU vs CPT settings */
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001627
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001628 if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08001629 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1630 intel_dp->DP |= DP_SYNC_HS_HIGH;
1631 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1632 intel_dp->DP |= DP_SYNC_VS_HIGH;
1633 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1634
Jani Nikula6aba5b62013-10-04 15:08:10 +03001635 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard1a2eb462011-11-16 16:26:07 -08001636 intel_dp->DP |= DP_ENHANCED_FRAMING;
1637
Daniel Vetter7c62a162013-06-01 17:16:20 +02001638 intel_dp->DP |= crtc->pipe << 29;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001639 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001640 u32 trans_dp;
1641
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001642 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001643
1644 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1645 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1646 trans_dp |= TRANS_DP_ENH_FRAMING;
1647 else
1648 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1649 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001650 } else {
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001651 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1652 crtc->config->limited_color_range)
1653 intel_dp->DP |= DP_COLOR_RANGE_16_235;
Keith Packard417e8222011-11-01 19:54:11 -07001654
1655 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1656 intel_dp->DP |= DP_SYNC_HS_HIGH;
1657 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1658 intel_dp->DP |= DP_SYNC_VS_HIGH;
1659 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1660
Jani Nikula6aba5b62013-10-04 15:08:10 +03001661 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard417e8222011-11-01 19:54:11 -07001662 intel_dp->DP |= DP_ENHANCED_FRAMING;
1663
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001664 if (IS_CHERRYVIEW(dev))
Chon Ming Lee44f37d12014-04-09 13:28:21 +03001665 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001666 else if (crtc->pipe == PIPE_B)
1667 intel_dp->DP |= DP_PIPEB_SELECT;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001668 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001669}
1670
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001671#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1672#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001673
Paulo Zanoni1a5ef5b2013-12-19 14:29:43 -02001674#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1675#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
Keith Packard99ea7122011-11-01 19:57:50 -07001676
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001677#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1678#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001679
Daniel Vetter4be73782014-01-17 14:39:48 +01001680static void wait_panel_status(struct intel_dp *intel_dp,
Keith Packard99ea7122011-11-01 19:57:50 -07001681 u32 mask,
1682 u32 value)
1683{
Paulo Zanoni30add222012-10-26 19:05:45 -02001684 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001685 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07001686 u32 pp_stat_reg, pp_ctrl_reg;
1687
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001688 lockdep_assert_held(&dev_priv->pps_mutex);
1689
Jani Nikulabf13e812013-09-06 07:40:05 +03001690 pp_stat_reg = _pp_stat_reg(intel_dp);
1691 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001692
1693 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001694 mask, value,
1695 I915_READ(pp_stat_reg),
1696 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001697
Jesse Barnes453c5422013-03-28 09:55:41 -07001698 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
Keith Packard99ea7122011-11-01 19:57:50 -07001699 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001700 I915_READ(pp_stat_reg),
1701 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001702 }
Chris Wilson54c136d2013-12-02 09:57:16 +00001703
1704 DRM_DEBUG_KMS("Wait complete\n");
Keith Packard99ea7122011-11-01 19:57:50 -07001705}
1706
Daniel Vetter4be73782014-01-17 14:39:48 +01001707static void wait_panel_on(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001708{
1709 DRM_DEBUG_KMS("Wait for panel power on\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001710 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001711}
1712
Daniel Vetter4be73782014-01-17 14:39:48 +01001713static void wait_panel_off(struct intel_dp *intel_dp)
Keith Packardbd943152011-09-18 23:09:52 -07001714{
Keith Packardbd943152011-09-18 23:09:52 -07001715 DRM_DEBUG_KMS("Wait for panel power off time\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001716 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
Keith Packardbd943152011-09-18 23:09:52 -07001717}
Keith Packardbd943152011-09-18 23:09:52 -07001718
Daniel Vetter4be73782014-01-17 14:39:48 +01001719static void wait_panel_power_cycle(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001720{
1721 DRM_DEBUG_KMS("Wait for panel power cycle\n");
Paulo Zanonidce56b32013-12-19 14:29:40 -02001722
1723 /* When we disable the VDD override bit last we have to do the manual
1724 * wait. */
1725 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1726 intel_dp->panel_power_cycle_delay);
1727
Daniel Vetter4be73782014-01-17 14:39:48 +01001728 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001729}
Keith Packardbd943152011-09-18 23:09:52 -07001730
Daniel Vetter4be73782014-01-17 14:39:48 +01001731static void wait_backlight_on(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001732{
1733 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1734 intel_dp->backlight_on_delay);
1735}
1736
Daniel Vetter4be73782014-01-17 14:39:48 +01001737static void edp_wait_backlight_off(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001738{
1739 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1740 intel_dp->backlight_off_delay);
1741}
Keith Packard99ea7122011-11-01 19:57:50 -07001742
Keith Packard832dd3c2011-11-01 19:34:06 -07001743/* Read the current pp_control value, unlocking the register if it
1744 * is locked
1745 */
1746
Jesse Barnes453c5422013-03-28 09:55:41 -07001747static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
Keith Packard832dd3c2011-11-01 19:34:06 -07001748{
Jesse Barnes453c5422013-03-28 09:55:41 -07001749 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1750 struct drm_i915_private *dev_priv = dev->dev_private;
1751 u32 control;
Jesse Barnes453c5422013-03-28 09:55:41 -07001752
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001753 lockdep_assert_held(&dev_priv->pps_mutex);
1754
Jani Nikulabf13e812013-09-06 07:40:05 +03001755 control = I915_READ(_pp_ctrl_reg(intel_dp));
Vandana Kannanb0a08be2015-06-18 11:00:55 +05301756 if (!IS_BROXTON(dev)) {
1757 control &= ~PANEL_UNLOCK_MASK;
1758 control |= PANEL_UNLOCK_REGS;
1759 }
Keith Packard832dd3c2011-11-01 19:34:06 -07001760 return control;
Keith Packardbd943152011-09-18 23:09:52 -07001761}
1762
Ville Syrjälä951468f2014-09-04 14:55:31 +03001763/*
1764 * Must be paired with edp_panel_vdd_off().
1765 * Must hold pps_mutex around the whole on/off sequence.
1766 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1767 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +03001768static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001769{
Paulo Zanoni30add222012-10-26 19:05:45 -02001770 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001771 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1772 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Jesse Barnes5d613502011-01-24 17:10:54 -08001773 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02001774 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001775 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001776 u32 pp_stat_reg, pp_ctrl_reg;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001777 bool need_to_disable = !intel_dp->want_panel_vdd;
Jesse Barnes5d613502011-01-24 17:10:54 -08001778
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001779 lockdep_assert_held(&dev_priv->pps_mutex);
1780
Keith Packard97af61f572011-09-28 16:23:51 -07001781 if (!is_edp(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001782 return false;
Keith Packardbd943152011-09-18 23:09:52 -07001783
Egbert Eich2c623c12014-11-25 12:54:57 +01001784 cancel_delayed_work(&intel_dp->panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001785 intel_dp->want_panel_vdd = true;
Keith Packard99ea7122011-11-01 19:57:50 -07001786
Daniel Vetter4be73782014-01-17 14:39:48 +01001787 if (edp_have_panel_vdd(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001788 return need_to_disable;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001789
Imre Deak4e6e1a52014-03-27 17:45:11 +02001790 power_domain = intel_display_port_power_domain(intel_encoder);
1791 intel_display_power_get(dev_priv, power_domain);
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001792
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001793 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1794 port_name(intel_dig_port->port));
Keith Packardbd943152011-09-18 23:09:52 -07001795
Daniel Vetter4be73782014-01-17 14:39:48 +01001796 if (!edp_have_panel_power(intel_dp))
1797 wait_panel_power_cycle(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001798
Jesse Barnes453c5422013-03-28 09:55:41 -07001799 pp = ironlake_get_pp_control(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001800 pp |= EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -07001801
Jani Nikulabf13e812013-09-06 07:40:05 +03001802 pp_stat_reg = _pp_stat_reg(intel_dp);
1803 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001804
1805 I915_WRITE(pp_ctrl_reg, pp);
1806 POSTING_READ(pp_ctrl_reg);
1807 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1808 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Keith Packardebf33b12011-09-29 15:53:27 -07001809 /*
1810 * If the panel wasn't on, delay before accessing aux channel
1811 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001812 if (!edp_have_panel_power(intel_dp)) {
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001813 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1814 port_name(intel_dig_port->port));
Keith Packardf01eca22011-09-28 16:48:10 -07001815 msleep(intel_dp->panel_power_up_delay);
Keith Packardf01eca22011-09-28 16:48:10 -07001816 }
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001817
1818 return need_to_disable;
1819}
1820
Ville Syrjälä951468f2014-09-04 14:55:31 +03001821/*
1822 * Must be paired with intel_edp_panel_vdd_off() or
1823 * intel_edp_panel_off().
1824 * Nested calls to these functions are not allowed since
1825 * we drop the lock. Caller must use some higher level
1826 * locking to prevent nested calls from other threads.
1827 */
Daniel Vetterb80d6c72014-03-19 15:54:37 +01001828void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001829{
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001830 bool vdd;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001831
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001832 if (!is_edp(intel_dp))
1833 return;
1834
Ville Syrjälä773538e82014-09-04 14:54:56 +03001835 pps_lock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001836 vdd = edp_panel_vdd_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001837 pps_unlock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001838
Rob Clarke2c719b2014-12-15 13:56:32 -05001839 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001840 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes5d613502011-01-24 17:10:54 -08001841}
1842
Daniel Vetter4be73782014-01-17 14:39:48 +01001843static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001844{
Paulo Zanoni30add222012-10-26 19:05:45 -02001845 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001846 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001847 struct intel_digital_port *intel_dig_port =
1848 dp_to_dig_port(intel_dp);
1849 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1850 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001851 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001852 u32 pp_stat_reg, pp_ctrl_reg;
Jesse Barnes5d613502011-01-24 17:10:54 -08001853
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001854 lockdep_assert_held(&dev_priv->pps_mutex);
Daniel Vettera0e99e62012-12-02 01:05:46 +01001855
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001856 WARN_ON(intel_dp->want_panel_vdd);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001857
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001858 if (!edp_have_panel_vdd(intel_dp))
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001859 return;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001860
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001861 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1862 port_name(intel_dig_port->port));
Jesse Barnes453c5422013-03-28 09:55:41 -07001863
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001864 pp = ironlake_get_pp_control(intel_dp);
1865 pp &= ~EDP_FORCE_VDD;
Jesse Barnes453c5422013-03-28 09:55:41 -07001866
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001867 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1868 pp_stat_reg = _pp_stat_reg(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001869
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001870 I915_WRITE(pp_ctrl_reg, pp);
1871 POSTING_READ(pp_ctrl_reg);
Paulo Zanoni90791a52013-12-06 17:32:42 -02001872
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001873 /* Make sure sequencer is idle before allowing subsequent activity */
1874 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1875 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001876
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001877 if ((pp & POWER_TARGET_ON) == 0)
1878 intel_dp->last_power_cycle = jiffies;
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001879
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001880 power_domain = intel_display_port_power_domain(intel_encoder);
1881 intel_display_power_put(dev_priv, power_domain);
Keith Packardbd943152011-09-18 23:09:52 -07001882}
1883
Daniel Vetter4be73782014-01-17 14:39:48 +01001884static void edp_panel_vdd_work(struct work_struct *__work)
Keith Packardbd943152011-09-18 23:09:52 -07001885{
1886 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1887 struct intel_dp, panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001888
Ville Syrjälä773538e82014-09-04 14:54:56 +03001889 pps_lock(intel_dp);
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001890 if (!intel_dp->want_panel_vdd)
1891 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001892 pps_unlock(intel_dp);
Keith Packardbd943152011-09-18 23:09:52 -07001893}
1894
Imre Deakaba86892014-07-30 15:57:31 +03001895static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1896{
1897 unsigned long delay;
1898
1899 /*
1900 * Queue the timer to fire a long time from now (relative to the power
1901 * down delay) to keep the panel power up across a sequence of
1902 * operations.
1903 */
1904 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1905 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1906}
1907
Ville Syrjälä951468f2014-09-04 14:55:31 +03001908/*
1909 * Must be paired with edp_panel_vdd_on().
1910 * Must hold pps_mutex around the whole on/off sequence.
1911 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1912 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001913static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
Keith Packardbd943152011-09-18 23:09:52 -07001914{
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001915 struct drm_i915_private *dev_priv =
1916 intel_dp_to_dev(intel_dp)->dev_private;
1917
1918 lockdep_assert_held(&dev_priv->pps_mutex);
1919
Keith Packard97af61f572011-09-28 16:23:51 -07001920 if (!is_edp(intel_dp))
1921 return;
Jesse Barnes5d613502011-01-24 17:10:54 -08001922
Rob Clarke2c719b2014-12-15 13:56:32 -05001923 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001924 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packardf2e8b182011-11-01 20:01:35 -07001925
Keith Packardbd943152011-09-18 23:09:52 -07001926 intel_dp->want_panel_vdd = false;
1927
Imre Deakaba86892014-07-30 15:57:31 +03001928 if (sync)
Daniel Vetter4be73782014-01-17 14:39:48 +01001929 edp_panel_vdd_off_sync(intel_dp);
Imre Deakaba86892014-07-30 15:57:31 +03001930 else
1931 edp_panel_vdd_schedule_off(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001932}
1933
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001934static void edp_panel_on(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07001935{
Paulo Zanoni30add222012-10-26 19:05:45 -02001936 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001937 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packard99ea7122011-11-01 19:57:50 -07001938 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001939 u32 pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07001940
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001941 lockdep_assert_held(&dev_priv->pps_mutex);
1942
Keith Packard97af61f572011-09-28 16:23:51 -07001943 if (!is_edp(intel_dp))
Keith Packardbd943152011-09-18 23:09:52 -07001944 return;
Keith Packard99ea7122011-11-01 19:57:50 -07001945
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001946 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1947 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packard99ea7122011-11-01 19:57:50 -07001948
Ville Syrjäläe7a89ac2014-10-16 21:30:07 +03001949 if (WARN(edp_have_panel_power(intel_dp),
1950 "eDP port %c panel power already on\n",
1951 port_name(dp_to_dig_port(intel_dp)->port)))
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001952 return;
Jesse Barnes9934c132010-07-22 13:18:19 -07001953
Daniel Vetter4be73782014-01-17 14:39:48 +01001954 wait_panel_power_cycle(intel_dp);
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07001955
Jani Nikulabf13e812013-09-06 07:40:05 +03001956 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001957 pp = ironlake_get_pp_control(intel_dp);
Keith Packard05ce1a42011-09-29 16:33:01 -07001958 if (IS_GEN5(dev)) {
1959 /* ILK workaround: disable reset around power sequence */
1960 pp &= ~PANEL_POWER_RESET;
Jani Nikulabf13e812013-09-06 07:40:05 +03001961 I915_WRITE(pp_ctrl_reg, pp);
1962 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07001963 }
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07001964
Keith Packard1c0ae802011-09-19 13:59:29 -07001965 pp |= POWER_TARGET_ON;
Keith Packard99ea7122011-11-01 19:57:50 -07001966 if (!IS_GEN5(dev))
1967 pp |= PANEL_POWER_RESET;
1968
Jesse Barnes453c5422013-03-28 09:55:41 -07001969 I915_WRITE(pp_ctrl_reg, pp);
1970 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07001971
Daniel Vetter4be73782014-01-17 14:39:48 +01001972 wait_panel_on(intel_dp);
Paulo Zanonidce56b32013-12-19 14:29:40 -02001973 intel_dp->last_power_on = jiffies;
Jesse Barnes9934c132010-07-22 13:18:19 -07001974
Keith Packard05ce1a42011-09-29 16:33:01 -07001975 if (IS_GEN5(dev)) {
1976 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
Jani Nikulabf13e812013-09-06 07:40:05 +03001977 I915_WRITE(pp_ctrl_reg, pp);
1978 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07001979 }
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001980}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001981
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001982void intel_edp_panel_on(struct intel_dp *intel_dp)
1983{
1984 if (!is_edp(intel_dp))
1985 return;
1986
1987 pps_lock(intel_dp);
1988 edp_panel_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001989 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001990}
1991
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001992
1993static void edp_panel_off(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07001994{
Imre Deak4e6e1a52014-03-27 17:45:11 +02001995 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1996 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanoni30add222012-10-26 19:05:45 -02001997 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001998 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02001999 enum intel_display_power_domain power_domain;
Keith Packard99ea7122011-11-01 19:57:50 -07002000 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002001 u32 pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07002002
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002003 lockdep_assert_held(&dev_priv->pps_mutex);
2004
Keith Packard97af61f572011-09-28 16:23:51 -07002005 if (!is_edp(intel_dp))
2006 return;
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002007
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002008 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2009 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002010
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002011 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2012 port_name(dp_to_dig_port(intel_dp)->port));
Jani Nikula24f3e092014-03-17 16:43:36 +02002013
Jesse Barnes453c5422013-03-28 09:55:41 -07002014 pp = ironlake_get_pp_control(intel_dp);
Daniel Vetter35a38552012-08-12 22:17:14 +02002015 /* We need to switch off panel power _and_ force vdd, for otherwise some
2016 * panels get very unhappy and cease to work. */
Patrik Jakobssonb3064152014-03-04 00:42:44 +01002017 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2018 EDP_BLC_ENABLE);
Jesse Barnes453c5422013-03-28 09:55:41 -07002019
Jani Nikulabf13e812013-09-06 07:40:05 +03002020 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002021
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002022 intel_dp->want_panel_vdd = false;
2023
Jesse Barnes453c5422013-03-28 09:55:41 -07002024 I915_WRITE(pp_ctrl_reg, pp);
2025 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07002026
Paulo Zanonidce56b32013-12-19 14:29:40 -02002027 intel_dp->last_power_cycle = jiffies;
Daniel Vetter4be73782014-01-17 14:39:48 +01002028 wait_panel_off(intel_dp);
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002029
2030 /* We got a reference when we enabled the VDD. */
Imre Deak4e6e1a52014-03-27 17:45:11 +02002031 power_domain = intel_display_port_power_domain(intel_encoder);
2032 intel_display_power_put(dev_priv, power_domain);
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002033}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002034
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002035void intel_edp_panel_off(struct intel_dp *intel_dp)
2036{
2037 if (!is_edp(intel_dp))
2038 return;
2039
2040 pps_lock(intel_dp);
2041 edp_panel_off(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002042 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002043}
2044
Jani Nikula1250d102014-08-12 17:11:39 +03002045/* Enable backlight in the panel power control. */
2046static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002047{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002048 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2049 struct drm_device *dev = intel_dig_port->base.base.dev;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002050 struct drm_i915_private *dev_priv = dev->dev_private;
2051 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002052 u32 pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002053
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07002054 /*
2055 * If we enable the backlight right away following a panel power
2056 * on, we may see slight flicker as the panel syncs with the eDP
2057 * link. So delay a bit to make sure the image is solid before
2058 * allowing it to appear.
2059 */
Daniel Vetter4be73782014-01-17 14:39:48 +01002060 wait_backlight_on(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002061
Ville Syrjälä773538e82014-09-04 14:54:56 +03002062 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002063
Jesse Barnes453c5422013-03-28 09:55:41 -07002064 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002065 pp |= EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002066
Jani Nikulabf13e812013-09-06 07:40:05 +03002067 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002068
2069 I915_WRITE(pp_ctrl_reg, pp);
2070 POSTING_READ(pp_ctrl_reg);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002071
Ville Syrjälä773538e82014-09-04 14:54:56 +03002072 pps_unlock(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002073}
2074
Jani Nikula1250d102014-08-12 17:11:39 +03002075/* Enable backlight PWM and backlight PP control. */
2076void intel_edp_backlight_on(struct intel_dp *intel_dp)
2077{
2078 if (!is_edp(intel_dp))
2079 return;
2080
2081 DRM_DEBUG_KMS("\n");
2082
2083 intel_panel_enable_backlight(intel_dp->attached_connector);
2084 _intel_edp_backlight_on(intel_dp);
2085}
2086
2087/* Disable backlight in the panel power control. */
2088static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002089{
Paulo Zanoni30add222012-10-26 19:05:45 -02002090 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002091 struct drm_i915_private *dev_priv = dev->dev_private;
2092 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002093 u32 pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002094
Keith Packardf01eca22011-09-28 16:48:10 -07002095 if (!is_edp(intel_dp))
2096 return;
2097
Ville Syrjälä773538e82014-09-04 14:54:56 +03002098 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002099
Jesse Barnes453c5422013-03-28 09:55:41 -07002100 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002101 pp &= ~EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002102
Jani Nikulabf13e812013-09-06 07:40:05 +03002103 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002104
2105 I915_WRITE(pp_ctrl_reg, pp);
2106 POSTING_READ(pp_ctrl_reg);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002107
Ville Syrjälä773538e82014-09-04 14:54:56 +03002108 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002109
Paulo Zanonidce56b32013-12-19 14:29:40 -02002110 intel_dp->last_backlight_off = jiffies;
Jesse Barnesf7d23232014-03-31 11:13:56 -07002111 edp_wait_backlight_off(intel_dp);
Jani Nikula1250d102014-08-12 17:11:39 +03002112}
Jesse Barnesf7d23232014-03-31 11:13:56 -07002113
Jani Nikula1250d102014-08-12 17:11:39 +03002114/* Disable backlight PP control and backlight PWM. */
2115void intel_edp_backlight_off(struct intel_dp *intel_dp)
2116{
2117 if (!is_edp(intel_dp))
2118 return;
2119
2120 DRM_DEBUG_KMS("\n");
2121
2122 _intel_edp_backlight_off(intel_dp);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002123 intel_panel_disable_backlight(intel_dp->attached_connector);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002124}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002125
Jani Nikula73580fb72014-08-12 17:11:41 +03002126/*
2127 * Hook for controlling the panel power control backlight through the bl_power
2128 * sysfs attribute. Take care to handle multiple calls.
2129 */
2130static void intel_edp_backlight_power(struct intel_connector *connector,
2131 bool enable)
2132{
2133 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002134 bool is_enabled;
2135
Ville Syrjälä773538e82014-09-04 14:54:56 +03002136 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002137 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002138 pps_unlock(intel_dp);
Jani Nikula73580fb72014-08-12 17:11:41 +03002139
2140 if (is_enabled == enable)
2141 return;
2142
Jani Nikula23ba9372014-08-27 14:08:43 +03002143 DRM_DEBUG_KMS("panel power control backlight %s\n",
2144 enable ? "enable" : "disable");
Jani Nikula73580fb72014-08-12 17:11:41 +03002145
2146 if (enable)
2147 _intel_edp_backlight_on(intel_dp);
2148 else
2149 _intel_edp_backlight_off(intel_dp);
2150}
2151
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002152static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002153{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002154 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2155 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2156 struct drm_device *dev = crtc->dev;
Jesse Barnesd240f202010-08-13 15:43:26 -07002157 struct drm_i915_private *dev_priv = dev->dev_private;
2158 u32 dpa_ctl;
2159
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002160 assert_pipe_disabled(dev_priv,
2161 to_intel_crtc(crtc)->pipe);
2162
Jesse Barnesd240f202010-08-13 15:43:26 -07002163 DRM_DEBUG_KMS("\n");
2164 dpa_ctl = I915_READ(DP_A);
Daniel Vetter07679352012-09-06 22:15:42 +02002165 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2166 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2167
2168 /* We don't adjust intel_dp->DP while tearing down the link, to
2169 * facilitate link retraining (e.g. after hotplug). Hence clear all
2170 * enable bits here to ensure that we don't enable too much. */
2171 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2172 intel_dp->DP |= DP_PLL_ENABLE;
2173 I915_WRITE(DP_A, intel_dp->DP);
Jesse Barnes298b0b32010-10-07 16:01:24 -07002174 POSTING_READ(DP_A);
2175 udelay(200);
Jesse Barnesd240f202010-08-13 15:43:26 -07002176}
2177
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002178static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002179{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002180 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2181 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2182 struct drm_device *dev = crtc->dev;
Jesse Barnesd240f202010-08-13 15:43:26 -07002183 struct drm_i915_private *dev_priv = dev->dev_private;
2184 u32 dpa_ctl;
2185
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002186 assert_pipe_disabled(dev_priv,
2187 to_intel_crtc(crtc)->pipe);
2188
Jesse Barnesd240f202010-08-13 15:43:26 -07002189 dpa_ctl = I915_READ(DP_A);
Daniel Vetter07679352012-09-06 22:15:42 +02002190 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2191 "dp pll off, should be on\n");
2192 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2193
2194 /* We can't rely on the value tracked for the DP register in
2195 * intel_dp->DP because link_down must not change that (otherwise link
2196 * re-training will fail. */
Jesse Barnes298b0b32010-10-07 16:01:24 -07002197 dpa_ctl &= ~DP_PLL_ENABLE;
Jesse Barnesd240f202010-08-13 15:43:26 -07002198 I915_WRITE(DP_A, dpa_ctl);
Chris Wilson1af5fa12010-09-08 21:07:28 +01002199 POSTING_READ(DP_A);
Jesse Barnesd240f202010-08-13 15:43:26 -07002200 udelay(200);
2201}
2202
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002203/* If the sink supports it, try to set the power state appropriately */
Paulo Zanonic19b0662012-10-15 15:51:41 -03002204void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002205{
2206 int ret, i;
2207
2208 /* Should have a valid DPCD by this point */
2209 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2210 return;
2211
2212 if (mode != DRM_MODE_DPMS_ON) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002213 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2214 DP_SET_POWER_D3);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002215 } else {
2216 /*
2217 * When turning on, we need to retry for 1ms to give the sink
2218 * time to wake up.
2219 */
2220 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002221 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2222 DP_SET_POWER_D0);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002223 if (ret == 1)
2224 break;
2225 msleep(1);
2226 }
2227 }
Jani Nikulaf9cac722014-09-02 16:33:52 +03002228
2229 if (ret != 1)
2230 DRM_DEBUG_KMS("failed to %s sink power state\n",
2231 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002232}
2233
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002234static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2235 enum pipe *pipe)
Jesse Barnesd240f202010-08-13 15:43:26 -07002236{
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002237 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03002238 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002239 struct drm_device *dev = encoder->base.dev;
2240 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak6d129be2014-03-05 16:20:54 +02002241 enum intel_display_power_domain power_domain;
2242 u32 tmp;
2243
2244 power_domain = intel_display_port_power_domain(encoder);
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002245 if (!intel_display_power_is_enabled(dev_priv, power_domain))
Imre Deak6d129be2014-03-05 16:20:54 +02002246 return false;
2247
2248 tmp = I915_READ(intel_dp->output_reg);
Jesse Barnesd240f202010-08-13 15:43:26 -07002249
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002250 if (!(tmp & DP_PORT_EN))
2251 return false;
2252
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002253 if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002254 *pipe = PORT_TO_PIPE_CPT(tmp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002255 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002256 enum pipe p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002257
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002258 for_each_pipe(dev_priv, p) {
2259 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2260 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2261 *pipe = p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002262 return true;
2263 }
2264 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002265
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002266 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2267 intel_dp->output_reg);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002268 } else if (IS_CHERRYVIEW(dev)) {
2269 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2270 } else {
2271 *pipe = PORT_TO_PIPE(tmp);
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002272 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002273
2274 return true;
2275}
2276
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002277static void intel_dp_get_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02002278 struct intel_crtc_state *pipe_config)
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002279{
2280 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002281 u32 tmp, flags = 0;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002282 struct drm_device *dev = encoder->base.dev;
2283 struct drm_i915_private *dev_priv = dev->dev_private;
2284 enum port port = dp_to_dig_port(intel_dp)->port;
2285 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjälä18442d02013-09-13 16:00:08 +03002286 int dotclock;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002287
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002288 tmp = I915_READ(intel_dp->output_reg);
Jani Nikula9fcb1702015-05-05 16:32:12 +03002289
2290 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002291
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002292 if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläb81e34c2015-07-06 15:10:03 +03002293 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2294
2295 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
Xiong Zhang63000ef2013-06-28 12:59:06 +08002296 flags |= DRM_MODE_FLAG_PHSYNC;
2297 else
2298 flags |= DRM_MODE_FLAG_NHSYNC;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002299
Ville Syrjäläb81e34c2015-07-06 15:10:03 +03002300 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
Xiong Zhang63000ef2013-06-28 12:59:06 +08002301 flags |= DRM_MODE_FLAG_PVSYNC;
2302 else
2303 flags |= DRM_MODE_FLAG_NVSYNC;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002304 } else {
2305 if (tmp & DP_SYNC_HS_HIGH)
2306 flags |= DRM_MODE_FLAG_PHSYNC;
2307 else
2308 flags |= DRM_MODE_FLAG_NHSYNC;
2309
2310 if (tmp & DP_SYNC_VS_HIGH)
2311 flags |= DRM_MODE_FLAG_PVSYNC;
2312 else
2313 flags |= DRM_MODE_FLAG_NVSYNC;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002314 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002315
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002316 pipe_config->base.adjusted_mode.flags |= flags;
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002317
Ville Syrjälä8c875fc2014-09-12 15:46:29 +03002318 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2319 tmp & DP_COLOR_RANGE_16_235)
2320 pipe_config->limited_color_range = true;
2321
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002322 pipe_config->has_dp_encoder = true;
2323
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03002324 pipe_config->lane_count =
2325 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2326
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002327 intel_dp_get_m_n(crtc, pipe_config);
2328
Ville Syrjälä18442d02013-09-13 16:00:08 +03002329 if (port == PORT_A) {
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002330 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2331 pipe_config->port_clock = 162000;
2332 else
2333 pipe_config->port_clock = 270000;
2334 }
Ville Syrjälä18442d02013-09-13 16:00:08 +03002335
2336 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2337 &pipe_config->dp_m_n);
2338
2339 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2340 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2341
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002342 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
Daniel Vetter7f16e5c2013-11-04 16:28:47 +01002343
Jani Nikulac6cd2ee2013-10-21 10:52:07 +03002344 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2345 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2346 /*
2347 * This is a big fat ugly hack.
2348 *
2349 * Some machines in UEFI boot mode provide us a VBT that has 18
2350 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2351 * unknown we fail to light up. Yet the same BIOS boots up with
2352 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2353 * max, not what it tells us to use.
2354 *
2355 * Note: This will still be broken if the eDP panel is not lit
2356 * up by the BIOS, and thus we can't get the mode at module
2357 * load.
2358 */
2359 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2360 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2361 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2362 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002363}
2364
Daniel Vettere8cb4552012-07-01 13:05:48 +02002365static void intel_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002366{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002367 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002368 struct drm_device *dev = encoder->base.dev;
Jani Nikula495a5bb2014-10-27 16:26:55 +02002369 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2370
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002371 if (crtc->config->has_audio)
Jani Nikula495a5bb2014-10-27 16:26:55 +02002372 intel_audio_codec_disable(encoder);
Daniel Vetter6cb49832012-05-20 17:14:50 +02002373
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002374 if (HAS_PSR(dev) && !HAS_DDI(dev))
2375 intel_psr_disable(intel_dp);
2376
Daniel Vetter6cb49832012-05-20 17:14:50 +02002377 /* Make sure the panel is off before trying to change the mode. But also
2378 * ensure that we have vdd while we switch off the panel. */
Jani Nikula24f3e092014-03-17 16:43:36 +02002379 intel_edp_panel_vdd_on(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01002380 intel_edp_backlight_off(intel_dp);
Jani Nikulafdbc3b12013-11-12 17:10:13 +02002381 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
Daniel Vetter4be73782014-01-17 14:39:48 +01002382 intel_edp_panel_off(intel_dp);
Daniel Vetter37398502012-09-06 22:15:44 +02002383
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002384 /* disable the port before the pipe on g4x */
2385 if (INTEL_INFO(dev)->gen < 5)
Daniel Vetter37398502012-09-06 22:15:44 +02002386 intel_dp_link_down(intel_dp);
Jesse Barnesd240f202010-08-13 15:43:26 -07002387}
2388
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002389static void ilk_post_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002390{
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002391 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002392 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002393
Ville Syrjälä49277c32014-03-31 18:21:26 +03002394 intel_dp_link_down(intel_dp);
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002395 if (port == PORT_A)
2396 ironlake_edp_pll_off(intel_dp);
Ville Syrjälä49277c32014-03-31 18:21:26 +03002397}
2398
2399static void vlv_post_disable_dp(struct intel_encoder *encoder)
2400{
2401 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2402
2403 intel_dp_link_down(intel_dp);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002404}
2405
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002406static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2407 bool reset)
2408{
2409 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2410 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2411 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2412 enum pipe pipe = crtc->pipe;
2413 uint32_t val;
2414
2415 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2416 if (reset)
2417 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2418 else
2419 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2420 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2421
2422 if (crtc->config->lane_count > 2) {
2423 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2424 if (reset)
2425 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2426 else
2427 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2428 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2429 }
2430
2431 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2432 val |= CHV_PCS_REQ_SOFTRESET_EN;
2433 if (reset)
2434 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2435 else
2436 val |= DPIO_PCS_CLK_SOFT_RESET;
2437 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2438
2439 if (crtc->config->lane_count > 2) {
2440 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2441 val |= CHV_PCS_REQ_SOFTRESET_EN;
2442 if (reset)
2443 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2444 else
2445 val |= DPIO_PCS_CLK_SOFT_RESET;
2446 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2447 }
2448}
2449
Ville Syrjälä580d3812014-04-09 13:29:00 +03002450static void chv_post_disable_dp(struct intel_encoder *encoder)
2451{
2452 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002453 struct drm_device *dev = encoder->base.dev;
2454 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä580d3812014-04-09 13:29:00 +03002455
2456 intel_dp_link_down(intel_dp);
2457
Ville Syrjäläa5805162015-05-26 20:42:30 +03002458 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002459
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002460 /* Assert data lane reset */
2461 chv_data_lane_soft_reset(encoder, true);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002462
Ville Syrjäläa5805162015-05-26 20:42:30 +03002463 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002464}
2465
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002466static void
2467_intel_dp_set_link_train(struct intel_dp *intel_dp,
2468 uint32_t *DP,
2469 uint8_t dp_train_pat)
2470{
2471 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2472 struct drm_device *dev = intel_dig_port->base.base.dev;
2473 struct drm_i915_private *dev_priv = dev->dev_private;
2474 enum port port = intel_dig_port->port;
2475
2476 if (HAS_DDI(dev)) {
2477 uint32_t temp = I915_READ(DP_TP_CTL(port));
2478
2479 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2480 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2481 else
2482 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2483
2484 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2485 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2486 case DP_TRAINING_PATTERN_DISABLE:
2487 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2488
2489 break;
2490 case DP_TRAINING_PATTERN_1:
2491 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2492 break;
2493 case DP_TRAINING_PATTERN_2:
2494 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2495 break;
2496 case DP_TRAINING_PATTERN_3:
2497 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2498 break;
2499 }
2500 I915_WRITE(DP_TP_CTL(port), temp);
2501
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002502 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2503 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002504 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2505
2506 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2507 case DP_TRAINING_PATTERN_DISABLE:
2508 *DP |= DP_LINK_TRAIN_OFF_CPT;
2509 break;
2510 case DP_TRAINING_PATTERN_1:
2511 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2512 break;
2513 case DP_TRAINING_PATTERN_2:
2514 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2515 break;
2516 case DP_TRAINING_PATTERN_3:
2517 DRM_ERROR("DP training pattern 3 not supported\n");
2518 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2519 break;
2520 }
2521
2522 } else {
2523 if (IS_CHERRYVIEW(dev))
2524 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2525 else
2526 *DP &= ~DP_LINK_TRAIN_MASK;
2527
2528 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2529 case DP_TRAINING_PATTERN_DISABLE:
2530 *DP |= DP_LINK_TRAIN_OFF;
2531 break;
2532 case DP_TRAINING_PATTERN_1:
2533 *DP |= DP_LINK_TRAIN_PAT_1;
2534 break;
2535 case DP_TRAINING_PATTERN_2:
2536 *DP |= DP_LINK_TRAIN_PAT_2;
2537 break;
2538 case DP_TRAINING_PATTERN_3:
2539 if (IS_CHERRYVIEW(dev)) {
2540 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2541 } else {
2542 DRM_ERROR("DP training pattern 3 not supported\n");
2543 *DP |= DP_LINK_TRAIN_PAT_2;
2544 }
2545 break;
2546 }
2547 }
2548}
2549
2550static void intel_dp_enable_port(struct intel_dp *intel_dp)
2551{
2552 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2553 struct drm_i915_private *dev_priv = dev->dev_private;
2554
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002555 /* enable with pattern 1 (as per spec) */
2556 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2557 DP_TRAINING_PATTERN_1);
2558
2559 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2560 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b713f52014-10-16 21:27:35 +03002561
2562 /*
2563 * Magic for VLV/CHV. We _must_ first set up the register
2564 * without actually enabling the port, and then do another
2565 * write to enable the port. Otherwise link training will
2566 * fail when the power sequencer is freshly used for this port.
2567 */
2568 intel_dp->DP |= DP_PORT_EN;
2569
2570 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2571 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002572}
2573
Daniel Vettere8cb4552012-07-01 13:05:48 +02002574static void intel_enable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002575{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002576 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2577 struct drm_device *dev = encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002578 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulac1dec792014-10-27 16:26:56 +02002579 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Chris Wilsonea5b2132010-08-04 13:50:23 +01002580 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002581 enum port port = dp_to_dig_port(intel_dp)->port;
2582 enum pipe pipe = crtc->pipe;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002583
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02002584 if (WARN_ON(dp_reg & DP_PORT_EN))
2585 return;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002586
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002587 pps_lock(intel_dp);
2588
2589 if (IS_VALLEYVIEW(dev))
2590 vlv_init_panel_power_sequencer(intel_dp);
2591
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002592 intel_dp_enable_port(intel_dp);
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002593
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002594 if (port == PORT_A && IS_GEN5(dev_priv)) {
2595 /*
2596 * Underrun reporting for the other pipe was disabled in
2597 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2598 * enabled, so it's now safe to re-enable underrun reporting.
2599 */
2600 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2601 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2602 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2603 }
2604
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002605 edp_panel_vdd_on(intel_dp);
2606 edp_panel_on(intel_dp);
2607 edp_panel_vdd_off(intel_dp, true);
2608
2609 pps_unlock(intel_dp);
2610
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002611 if (IS_VALLEYVIEW(dev)) {
2612 unsigned int lane_mask = 0x0;
2613
2614 if (IS_CHERRYVIEW(dev))
2615 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2616
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03002617 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2618 lane_mask);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002619 }
Ville Syrjälä61234fa2014-10-16 21:27:34 +03002620
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002621 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2622 intel_dp_start_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03002623 intel_dp_stop_link_train(intel_dp);
Jani Nikulac1dec792014-10-27 16:26:56 +02002624
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002625 if (crtc->config->has_audio) {
Jani Nikulac1dec792014-10-27 16:26:56 +02002626 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002627 pipe_name(pipe));
Jani Nikulac1dec792014-10-27 16:26:56 +02002628 intel_audio_codec_enable(encoder);
2629 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002630}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002631
Jani Nikulaecff4f32013-09-06 07:38:29 +03002632static void g4x_enable_dp(struct intel_encoder *encoder)
2633{
Jani Nikula828f5c62013-09-05 16:44:45 +03002634 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2635
Jani Nikulaecff4f32013-09-06 07:38:29 +03002636 intel_enable_dp(encoder);
Daniel Vetter4be73782014-01-17 14:39:48 +01002637 intel_edp_backlight_on(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002638}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002639
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002640static void vlv_enable_dp(struct intel_encoder *encoder)
2641{
Jani Nikula828f5c62013-09-05 16:44:45 +03002642 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2643
Daniel Vetter4be73782014-01-17 14:39:48 +01002644 intel_edp_backlight_on(intel_dp);
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002645 intel_psr_enable(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002646}
2647
Jani Nikulaecff4f32013-09-06 07:38:29 +03002648static void g4x_pre_enable_dp(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002649{
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002650 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002651 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002652 enum port port = dp_to_dig_port(intel_dp)->port;
2653 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002654
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002655 intel_dp_prepare(encoder);
2656
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002657 if (port == PORT_A && IS_GEN5(dev_priv)) {
2658 /*
2659 * We get FIFO underruns on the other pipe when
2660 * enabling the CPU eDP PLL, and when enabling CPU
2661 * eDP port. We could potentially avoid the PLL
2662 * underrun with a vblank wait just prior to enabling
2663 * the PLL, but that doesn't appear to help the port
2664 * enable case. Just sweep it all under the rug.
2665 */
2666 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2667 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2668 }
2669
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002670 /* Only ilk+ has port A */
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002671 if (port == PORT_A) {
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002672 ironlake_set_pll_cpu_edp(intel_dp);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002673 ironlake_edp_pll_on(intel_dp);
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002674 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002675}
2676
Ville Syrjälä83b84592014-10-16 21:29:51 +03002677static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2678{
2679 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2680 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2681 enum pipe pipe = intel_dp->pps_pipe;
2682 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2683
2684 edp_panel_vdd_off_sync(intel_dp);
2685
2686 /*
2687 * VLV seems to get confused when multiple power seqeuencers
2688 * have the same port selected (even if only one has power/vdd
2689 * enabled). The failure manifests as vlv_wait_port_ready() failing
2690 * CHV on the other hand doesn't seem to mind having the same port
2691 * selected in multiple power seqeuencers, but let's clear the
2692 * port select always when logically disconnecting a power sequencer
2693 * from a port.
2694 */
2695 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2696 pipe_name(pipe), port_name(intel_dig_port->port));
2697 I915_WRITE(pp_on_reg, 0);
2698 POSTING_READ(pp_on_reg);
2699
2700 intel_dp->pps_pipe = INVALID_PIPE;
2701}
2702
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002703static void vlv_steal_power_sequencer(struct drm_device *dev,
2704 enum pipe pipe)
2705{
2706 struct drm_i915_private *dev_priv = dev->dev_private;
2707 struct intel_encoder *encoder;
2708
2709 lockdep_assert_held(&dev_priv->pps_mutex);
2710
Ville Syrjäläac3c12e2014-10-16 21:29:56 +03002711 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2712 return;
2713
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002714 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2715 base.head) {
2716 struct intel_dp *intel_dp;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002717 enum port port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002718
2719 if (encoder->type != INTEL_OUTPUT_EDP)
2720 continue;
2721
2722 intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002723 port = dp_to_dig_port(intel_dp)->port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002724
2725 if (intel_dp->pps_pipe != pipe)
2726 continue;
2727
2728 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
Ville Syrjälä773538e82014-09-04 14:54:56 +03002729 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002730
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02002731 WARN(encoder->base.crtc,
Ville Syrjälä034e43c2014-10-16 21:27:28 +03002732 "stealing pipe %c power sequencer from active eDP port %c\n",
2733 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002734
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002735 /* make sure vdd is off before we steal it */
Ville Syrjälä83b84592014-10-16 21:29:51 +03002736 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002737 }
2738}
2739
2740static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2741{
2742 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2743 struct intel_encoder *encoder = &intel_dig_port->base;
2744 struct drm_device *dev = encoder->base.dev;
2745 struct drm_i915_private *dev_priv = dev->dev_private;
2746 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002747
2748 lockdep_assert_held(&dev_priv->pps_mutex);
2749
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002750 if (!is_edp(intel_dp))
2751 return;
2752
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002753 if (intel_dp->pps_pipe == crtc->pipe)
2754 return;
2755
2756 /*
2757 * If another power sequencer was being used on this
2758 * port previously make sure to turn off vdd there while
2759 * we still have control of it.
2760 */
2761 if (intel_dp->pps_pipe != INVALID_PIPE)
Ville Syrjälä83b84592014-10-16 21:29:51 +03002762 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002763
2764 /*
2765 * We may be stealing the power
2766 * sequencer from another port.
2767 */
2768 vlv_steal_power_sequencer(dev, crtc->pipe);
2769
2770 /* now it's all ours */
2771 intel_dp->pps_pipe = crtc->pipe;
2772
2773 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2774 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2775
2776 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03002777 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2778 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002779}
2780
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002781static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2782{
2783 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2784 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Jesse Barnesb2634012013-03-28 09:55:40 -07002785 struct drm_device *dev = encoder->base.dev;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002786 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002787 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002788 enum dpio_channel port = vlv_dport_to_channel(dport);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002789 int pipe = intel_crtc->pipe;
2790 u32 val;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002791
Ville Syrjäläa5805162015-05-26 20:42:30 +03002792 mutex_lock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002793
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002794 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002795 val = 0;
2796 if (pipe)
2797 val |= (1<<21);
2798 else
2799 val &= ~(1<<21);
2800 val |= 0x001000c4;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002801 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2802 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2803 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002804
Ville Syrjäläa5805162015-05-26 20:42:30 +03002805 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002806
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002807 intel_enable_dp(encoder);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002808}
2809
Jani Nikulaecff4f32013-09-06 07:38:29 +03002810static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
Jesse Barnes89b667f2013-04-18 14:51:36 -07002811{
2812 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2813 struct drm_device *dev = encoder->base.dev;
2814 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002815 struct intel_crtc *intel_crtc =
2816 to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002817 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002818 int pipe = intel_crtc->pipe;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002819
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002820 intel_dp_prepare(encoder);
2821
Jesse Barnes89b667f2013-04-18 14:51:36 -07002822 /* Program Tx lane resets to default */
Ville Syrjäläa5805162015-05-26 20:42:30 +03002823 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002824 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002825 DPIO_PCS_TX_LANE2_RESET |
2826 DPIO_PCS_TX_LANE1_RESET);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002827 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002828 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2829 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2830 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2831 DPIO_PCS_CLK_SOFT_RESET);
2832
2833 /* Fix up inter-pair skew failure */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002834 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2835 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2836 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03002837 mutex_unlock(&dev_priv->sb_lock);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002838}
2839
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002840static void chv_pre_enable_dp(struct intel_encoder *encoder)
2841{
2842 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2843 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2844 struct drm_device *dev = encoder->base.dev;
2845 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002846 struct intel_crtc *intel_crtc =
2847 to_intel_crtc(encoder->base.crtc);
2848 enum dpio_channel ch = vlv_dport_to_channel(dport);
2849 int pipe = intel_crtc->pipe;
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002850 int data, i, stagger;
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002851 u32 val;
2852
Ville Syrjäläa5805162015-05-26 20:42:30 +03002853 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002854
Ville Syrjälä570e2a72014-08-18 14:42:46 +03002855 /* allow hardware to manage TX FIFO reset source */
2856 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2857 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2858 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2859
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002860 if (intel_crtc->config->lane_count > 2) {
2861 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2862 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2863 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2864 }
Ville Syrjälä570e2a72014-08-18 14:42:46 +03002865
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002866 /* Program Tx lane latency optimal setting*/
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002867 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002868 /* Set the upar bit */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002869 if (intel_crtc->config->lane_count == 1)
2870 data = 0x0;
2871 else
2872 data = (i == 1) ? 0x0 : 0x1;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002873 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2874 data << DPIO_UPAR_SHIFT);
2875 }
2876
2877 /* Data lane stagger programming */
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002878 if (intel_crtc->config->port_clock > 270000)
2879 stagger = 0x18;
2880 else if (intel_crtc->config->port_clock > 135000)
2881 stagger = 0xd;
2882 else if (intel_crtc->config->port_clock > 67500)
2883 stagger = 0x7;
2884 else if (intel_crtc->config->port_clock > 33750)
2885 stagger = 0x4;
2886 else
2887 stagger = 0x2;
2888
2889 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2890 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2891 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2892
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002893 if (intel_crtc->config->lane_count > 2) {
2894 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2895 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2896 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2897 }
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002898
2899 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2900 DPIO_LANESTAGGER_STRAP(stagger) |
2901 DPIO_LANESTAGGER_STRAP_OVRD |
2902 DPIO_TX1_STAGGER_MASK(0x1f) |
2903 DPIO_TX1_STAGGER_MULT(6) |
2904 DPIO_TX2_STAGGER_MULT(0));
2905
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002906 if (intel_crtc->config->lane_count > 2) {
2907 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2908 DPIO_LANESTAGGER_STRAP(stagger) |
2909 DPIO_LANESTAGGER_STRAP_OVRD |
2910 DPIO_TX1_STAGGER_MASK(0x1f) |
2911 DPIO_TX1_STAGGER_MULT(7) |
2912 DPIO_TX2_STAGGER_MULT(5));
2913 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002914
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002915 /* Deassert data lane reset */
2916 chv_data_lane_soft_reset(encoder, false);
2917
Ville Syrjäläa5805162015-05-26 20:42:30 +03002918 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002919
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002920 intel_enable_dp(encoder);
Ville Syrjäläb0b33842015-07-08 23:45:55 +03002921
2922 /* Second common lane will stay alive on its own now */
2923 if (dport->release_cl2_override) {
2924 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2925 dport->release_cl2_override = false;
2926 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002927}
2928
Ville Syrjälä9197c882014-04-09 13:29:05 +03002929static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2930{
2931 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2932 struct drm_device *dev = encoder->base.dev;
2933 struct drm_i915_private *dev_priv = dev->dev_private;
2934 struct intel_crtc *intel_crtc =
2935 to_intel_crtc(encoder->base.crtc);
2936 enum dpio_channel ch = vlv_dport_to_channel(dport);
2937 enum pipe pipe = intel_crtc->pipe;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002938 unsigned int lane_mask =
2939 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002940 u32 val;
2941
Ville Syrjälä625695f2014-06-28 02:04:02 +03002942 intel_dp_prepare(encoder);
2943
Ville Syrjäläb0b33842015-07-08 23:45:55 +03002944 /*
2945 * Must trick the second common lane into life.
2946 * Otherwise we can't even access the PLL.
2947 */
2948 if (ch == DPIO_CH0 && pipe == PIPE_B)
2949 dport->release_cl2_override =
2950 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2951
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002952 chv_phy_powergate_lanes(encoder, true, lane_mask);
2953
Ville Syrjäläa5805162015-05-26 20:42:30 +03002954 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002955
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002956 /* Assert data lane reset */
2957 chv_data_lane_soft_reset(encoder, true);
2958
Ville Syrjäläb9e5ac32014-05-27 16:30:18 +03002959 /* program left/right clock distribution */
2960 if (pipe != PIPE_B) {
2961 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2962 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2963 if (ch == DPIO_CH0)
2964 val |= CHV_BUFLEFTENA1_FORCE;
2965 if (ch == DPIO_CH1)
2966 val |= CHV_BUFRIGHTENA1_FORCE;
2967 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2968 } else {
2969 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2970 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2971 if (ch == DPIO_CH0)
2972 val |= CHV_BUFLEFTENA2_FORCE;
2973 if (ch == DPIO_CH1)
2974 val |= CHV_BUFRIGHTENA2_FORCE;
2975 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2976 }
2977
Ville Syrjälä9197c882014-04-09 13:29:05 +03002978 /* program clock channel usage */
2979 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2980 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2981 if (pipe != PIPE_B)
2982 val &= ~CHV_PCS_USEDCLKCHANNEL;
2983 else
2984 val |= CHV_PCS_USEDCLKCHANNEL;
2985 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2986
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002987 if (intel_crtc->config->lane_count > 2) {
2988 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2989 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2990 if (pipe != PIPE_B)
2991 val &= ~CHV_PCS_USEDCLKCHANNEL;
2992 else
2993 val |= CHV_PCS_USEDCLKCHANNEL;
2994 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2995 }
Ville Syrjälä9197c882014-04-09 13:29:05 +03002996
2997 /*
2998 * This a a bit weird since generally CL
2999 * matches the pipe, but here we need to
3000 * pick the CL based on the port.
3001 */
3002 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3003 if (pipe != PIPE_B)
3004 val &= ~CHV_CMN_USEDCLKCHANNEL;
3005 else
3006 val |= CHV_CMN_USEDCLKCHANNEL;
3007 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3008
Ville Syrjäläa5805162015-05-26 20:42:30 +03003009 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03003010}
3011
Ville Syrjäläd6db9952015-07-08 23:45:49 +03003012static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3013{
3014 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3015 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3016 u32 val;
3017
3018 mutex_lock(&dev_priv->sb_lock);
3019
3020 /* disable left/right clock distribution */
3021 if (pipe != PIPE_B) {
3022 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3023 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3024 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3025 } else {
3026 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3027 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3028 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3029 }
3030
3031 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003032
Ville Syrjäläb0b33842015-07-08 23:45:55 +03003033 /*
3034 * Leave the power down bit cleared for at least one
3035 * lane so that chv_powergate_phy_ch() will power
3036 * on something when the channel is otherwise unused.
3037 * When the port is off and the override is removed
3038 * the lanes power down anyway, so otherwise it doesn't
3039 * really matter what the state of power down bits is
3040 * after this.
3041 */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003042 chv_phy_powergate_lanes(encoder, false, 0x0);
Ville Syrjäläd6db9952015-07-08 23:45:49 +03003043}
3044
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003045/*
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003046 * Native read with retry for link status and receiver capability reads for
3047 * cases where the sink may still be asleep.
Jani Nikula9d1a1032014-03-14 16:51:15 +02003048 *
3049 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3050 * supposed to retry 3 times per the spec.
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003051 */
Jani Nikula9d1a1032014-03-14 16:51:15 +02003052static ssize_t
3053intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3054 void *buffer, size_t size)
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003055{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003056 ssize_t ret;
3057 int i;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003058
Ville Syrjäläf6a19062014-10-16 20:46:09 +03003059 /*
3060 * Sometime we just get the same incorrect byte repeated
3061 * over the entire buffer. Doing just one throw away read
3062 * initially seems to "solve" it.
3063 */
3064 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3065
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003066 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003067 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3068 if (ret == size)
3069 return ret;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003070 msleep(1);
3071 }
3072
Jani Nikula9d1a1032014-03-14 16:51:15 +02003073 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003074}
3075
3076/*
3077 * Fetch AUX CH registers 0x202 - 0x207 which contain
3078 * link status information
3079 */
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003080bool
Keith Packard93f62da2011-11-01 19:45:03 -07003081intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003082{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003083 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3084 DP_LANE0_1_STATUS,
3085 link_status,
3086 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003087}
3088
Paulo Zanoni11002442014-06-13 18:45:41 -03003089/* These are source-specific values. */
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003090uint8_t
Keith Packard1a2eb462011-11-16 16:26:07 -08003091intel_dp_voltage_max(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003092{
Paulo Zanoni30add222012-10-26 19:05:45 -02003093 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303094 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003095 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003096
Vandana Kannan93147262014-11-18 15:45:29 +05303097 if (IS_BROXTON(dev))
3098 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3099 else if (INTEL_INFO(dev)->gen >= 9) {
Sonika Jindal9e458032015-05-06 17:35:48 +05303100 if (dev_priv->edp_low_vswing && port == PORT_A)
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303101 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003102 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303103 } else if (IS_VALLEYVIEW(dev))
Sonika Jindalbd600182014-08-08 16:23:41 +05303104 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003105 else if (IS_GEN7(dev) && port == PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05303106 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003107 else if (HAS_PCH_CPT(dev) && port != PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05303108 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Keith Packard1a2eb462011-11-16 16:26:07 -08003109 else
Sonika Jindalbd600182014-08-08 16:23:41 +05303110 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Keith Packard1a2eb462011-11-16 16:26:07 -08003111}
3112
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003113uint8_t
Keith Packard1a2eb462011-11-16 16:26:07 -08003114intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3115{
Paulo Zanoni30add222012-10-26 19:05:45 -02003116 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003117 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003118
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003119 if (INTEL_INFO(dev)->gen >= 9) {
3120 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3121 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3122 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3123 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3124 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3125 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3126 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303127 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3128 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003129 default:
3130 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3131 }
3132 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003133 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303134 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3135 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3136 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3137 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3138 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3139 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3140 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003141 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303142 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003143 }
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003144 } else if (IS_VALLEYVIEW(dev)) {
3145 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303146 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3147 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3148 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3149 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3150 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3151 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3152 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003153 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303154 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003155 }
Imre Deakbc7d38a2013-05-16 14:40:36 +03003156 } else if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08003157 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303158 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3159 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3160 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3161 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3162 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Keith Packard1a2eb462011-11-16 16:26:07 -08003163 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303164 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003165 }
3166 } else {
3167 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303168 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3169 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3170 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3171 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3172 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3173 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3174 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packard1a2eb462011-11-16 16:26:07 -08003175 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303176 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003177 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003178 }
3179}
3180
Daniel Vetter5829975c2015-04-16 11:36:52 +02003181static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003182{
3183 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3184 struct drm_i915_private *dev_priv = dev->dev_private;
3185 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003186 struct intel_crtc *intel_crtc =
3187 to_intel_crtc(dport->base.base.crtc);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003188 unsigned long demph_reg_value, preemph_reg_value,
3189 uniqtranscale_reg_value;
3190 uint8_t train_set = intel_dp->train_set[0];
Chon Ming Leee4607fc2013-11-06 14:36:35 +08003191 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003192 int pipe = intel_crtc->pipe;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003193
3194 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303195 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003196 preemph_reg_value = 0x0004000;
3197 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303198 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003199 demph_reg_value = 0x2B405555;
3200 uniqtranscale_reg_value = 0x552AB83A;
3201 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303202 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003203 demph_reg_value = 0x2B404040;
3204 uniqtranscale_reg_value = 0x5548B83A;
3205 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303206 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003207 demph_reg_value = 0x2B245555;
3208 uniqtranscale_reg_value = 0x5560B83A;
3209 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303210 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003211 demph_reg_value = 0x2B405555;
3212 uniqtranscale_reg_value = 0x5598DA3A;
3213 break;
3214 default:
3215 return 0;
3216 }
3217 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303218 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003219 preemph_reg_value = 0x0002000;
3220 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303221 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003222 demph_reg_value = 0x2B404040;
3223 uniqtranscale_reg_value = 0x5552B83A;
3224 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303225 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003226 demph_reg_value = 0x2B404848;
3227 uniqtranscale_reg_value = 0x5580B83A;
3228 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303229 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003230 demph_reg_value = 0x2B404040;
3231 uniqtranscale_reg_value = 0x55ADDA3A;
3232 break;
3233 default:
3234 return 0;
3235 }
3236 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303237 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003238 preemph_reg_value = 0x0000000;
3239 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303240 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003241 demph_reg_value = 0x2B305555;
3242 uniqtranscale_reg_value = 0x5570B83A;
3243 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303244 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003245 demph_reg_value = 0x2B2B4040;
3246 uniqtranscale_reg_value = 0x55ADDA3A;
3247 break;
3248 default:
3249 return 0;
3250 }
3251 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303252 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003253 preemph_reg_value = 0x0006000;
3254 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303255 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003256 demph_reg_value = 0x1B405555;
3257 uniqtranscale_reg_value = 0x55ADDA3A;
3258 break;
3259 default:
3260 return 0;
3261 }
3262 break;
3263 default:
3264 return 0;
3265 }
3266
Ville Syrjäläa5805162015-05-26 20:42:30 +03003267 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003268 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3269 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3270 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003271 uniqtranscale_reg_value);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003272 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3273 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3274 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3275 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03003276 mutex_unlock(&dev_priv->sb_lock);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003277
3278 return 0;
3279}
3280
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003281static bool chv_need_uniq_trans_scale(uint8_t train_set)
3282{
3283 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3284 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3285}
3286
Daniel Vetter5829975c2015-04-16 11:36:52 +02003287static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003288{
3289 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3290 struct drm_i915_private *dev_priv = dev->dev_private;
3291 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3292 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003293 u32 deemph_reg_value, margin_reg_value, val;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003294 uint8_t train_set = intel_dp->train_set[0];
3295 enum dpio_channel ch = vlv_dport_to_channel(dport);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003296 enum pipe pipe = intel_crtc->pipe;
3297 int i;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003298
3299 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303300 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003301 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003303 deemph_reg_value = 128;
3304 margin_reg_value = 52;
3305 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003307 deemph_reg_value = 128;
3308 margin_reg_value = 77;
3309 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003311 deemph_reg_value = 128;
3312 margin_reg_value = 102;
3313 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303314 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003315 deemph_reg_value = 128;
3316 margin_reg_value = 154;
3317 /* FIXME extra to set for 1200 */
3318 break;
3319 default:
3320 return 0;
3321 }
3322 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303323 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003324 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303325 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003326 deemph_reg_value = 85;
3327 margin_reg_value = 78;
3328 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303329 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003330 deemph_reg_value = 85;
3331 margin_reg_value = 116;
3332 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303333 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003334 deemph_reg_value = 85;
3335 margin_reg_value = 154;
3336 break;
3337 default:
3338 return 0;
3339 }
3340 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303341 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003342 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003344 deemph_reg_value = 64;
3345 margin_reg_value = 104;
3346 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003348 deemph_reg_value = 64;
3349 margin_reg_value = 154;
3350 break;
3351 default:
3352 return 0;
3353 }
3354 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303355 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003356 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303357 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003358 deemph_reg_value = 43;
3359 margin_reg_value = 154;
3360 break;
3361 default:
3362 return 0;
3363 }
3364 break;
3365 default:
3366 return 0;
3367 }
3368
Ville Syrjäläa5805162015-05-26 20:42:30 +03003369 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003370
3371 /* Clear calc init */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003372 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3373 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003374 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3375 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
Ville Syrjälä1966e592014-04-09 13:29:04 +03003376 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3377
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003378 if (intel_crtc->config->lane_count > 2) {
3379 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3380 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3381 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3382 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3383 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3384 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003385
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003386 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3387 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3388 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3389 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3390
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003391 if (intel_crtc->config->lane_count > 2) {
3392 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3393 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3394 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3395 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3396 }
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003397
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003398 /* Program swing deemph */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003399 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003400 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3401 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3402 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3403 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3404 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003405
3406 /* Program swing margin */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003407 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003408 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003409
Ville Syrjälä1fb44502014-06-28 02:04:03 +03003410 val &= ~DPIO_SWING_MARGIN000_MASK;
3411 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003412
3413 /*
3414 * Supposedly this value shouldn't matter when unique transition
3415 * scale is disabled, but in fact it does matter. Let's just
3416 * always program the same value and hope it's OK.
3417 */
3418 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3419 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3420
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003421 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3422 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003423
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003424 /*
3425 * The document said it needs to set bit 27 for ch0 and bit 26
3426 * for ch1. Might be a typo in the doc.
3427 * For now, for this unique transition scale selection, set bit
3428 * 27 for ch0 and ch1.
3429 */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003430 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003431 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003432 if (chv_need_uniq_trans_scale(train_set))
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003433 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003434 else
3435 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3436 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003437 }
3438
3439 /* Start swing calculation */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003440 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3441 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3442 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3443
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003444 if (intel_crtc->config->lane_count > 2) {
3445 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3446 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3447 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3448 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003449
Ville Syrjäläa5805162015-05-26 20:42:30 +03003450 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003451
3452 return 0;
3453}
3454
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003455static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003456gen4_signal_levels(uint8_t train_set)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003457{
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003458 uint32_t signal_levels = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003459
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003460 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303461 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003462 default:
3463 signal_levels |= DP_VOLTAGE_0_4;
3464 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003466 signal_levels |= DP_VOLTAGE_0_6;
3467 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303468 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003469 signal_levels |= DP_VOLTAGE_0_8;
3470 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303471 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003472 signal_levels |= DP_VOLTAGE_1_2;
3473 break;
3474 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003475 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303476 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003477 default:
3478 signal_levels |= DP_PRE_EMPHASIS_0;
3479 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303480 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003481 signal_levels |= DP_PRE_EMPHASIS_3_5;
3482 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303483 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003484 signal_levels |= DP_PRE_EMPHASIS_6;
3485 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303486 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003487 signal_levels |= DP_PRE_EMPHASIS_9_5;
3488 break;
3489 }
3490 return signal_levels;
3491}
3492
Zhenyu Wange3421a12010-04-08 09:43:27 +08003493/* Gen6's DP voltage swing and pre-emphasis control */
3494static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003495gen6_edp_signal_levels(uint8_t train_set)
Zhenyu Wange3421a12010-04-08 09:43:27 +08003496{
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003497 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3498 DP_TRAIN_PRE_EMPHASIS_MASK);
3499 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303500 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3501 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003502 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303503 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003504 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303505 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3506 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003507 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303508 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3509 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003510 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303511 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3512 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003513 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003514 default:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003515 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3516 "0x%x\n", signal_levels);
3517 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003518 }
3519}
3520
Keith Packard1a2eb462011-11-16 16:26:07 -08003521/* Gen7's DP voltage swing and pre-emphasis control */
3522static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003523gen7_edp_signal_levels(uint8_t train_set)
Keith Packard1a2eb462011-11-16 16:26:07 -08003524{
3525 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3526 DP_TRAIN_PRE_EMPHASIS_MASK);
3527 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303528 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003529 return EDP_LINK_TRAIN_400MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303530 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003531 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303532 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packard1a2eb462011-11-16 16:26:07 -08003533 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3534
Sonika Jindalbd600182014-08-08 16:23:41 +05303535 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003536 return EDP_LINK_TRAIN_600MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303537 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003538 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3539
Sonika Jindalbd600182014-08-08 16:23:41 +05303540 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003541 return EDP_LINK_TRAIN_800MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303542 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003543 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3544
3545 default:
3546 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3547 "0x%x\n", signal_levels);
3548 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3549 }
3550}
3551
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003552void
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003553intel_dp_set_signal_levels(struct intel_dp *intel_dp)
Paulo Zanonif0a34242012-12-06 16:51:50 -02003554{
3555 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003556 enum port port = intel_dig_port->port;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003557 struct drm_device *dev = intel_dig_port->base.base.dev;
Ander Conselvan de Oliveirab905a912015-10-23 13:01:47 +03003558 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehallf8896f52015-06-25 11:11:03 +03003559 uint32_t signal_levels, mask = 0;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003560 uint8_t train_set = intel_dp->train_set[0];
3561
David Weinehallf8896f52015-06-25 11:11:03 +03003562 if (HAS_DDI(dev)) {
3563 signal_levels = ddi_signal_levels(intel_dp);
3564
3565 if (IS_BROXTON(dev))
3566 signal_levels = 0;
3567 else
3568 mask = DDI_BUF_EMP_MASK;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003569 } else if (IS_CHERRYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003570 signal_levels = chv_signal_levels(intel_dp);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003571 } else if (IS_VALLEYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003572 signal_levels = vlv_signal_levels(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003573 } else if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003574 signal_levels = gen7_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003575 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003576 } else if (IS_GEN6(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003577 signal_levels = gen6_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003578 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3579 } else {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003580 signal_levels = gen4_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003581 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3582 }
3583
Vandana Kannan96fb9f92014-11-18 15:45:27 +05303584 if (mask)
3585 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3586
3587 DRM_DEBUG_KMS("Using vswing level %d\n",
3588 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3589 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3590 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3591 DP_TRAIN_PRE_EMPHASIS_SHIFT);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003592
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003593 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
Ander Conselvan de Oliveirab905a912015-10-23 13:01:47 +03003594
3595 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3596 POSTING_READ(intel_dp->output_reg);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003597}
3598
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003599void
Ander Conselvan de Oliveirae9c176d2015-10-23 13:01:45 +03003600intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3601 uint8_t dp_train_pat)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003602{
Paulo Zanoni174edf12012-10-26 19:05:50 -02003603 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03003604 struct drm_i915_private *dev_priv =
3605 to_i915(intel_dig_port->base.base.dev);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003606
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003607 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003608
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003609 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
Chris Wilsonea5b2132010-08-04 13:50:23 +01003610 POSTING_READ(intel_dp->output_reg);
Ander Conselvan de Oliveirae9c176d2015-10-23 13:01:45 +03003611}
3612
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003613void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
Imre Deak3ab9c632013-05-03 12:57:41 +03003614{
3615 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3616 struct drm_device *dev = intel_dig_port->base.base.dev;
3617 struct drm_i915_private *dev_priv = dev->dev_private;
3618 enum port port = intel_dig_port->port;
3619 uint32_t val;
3620
3621 if (!HAS_DDI(dev))
3622 return;
3623
3624 val = I915_READ(DP_TP_CTL(port));
3625 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3626 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3627 I915_WRITE(DP_TP_CTL(port), val);
3628
3629 /*
3630 * On PORT_A we can have only eDP in SST mode. There the only reason
3631 * we need to set idle transmission mode is to work around a HW issue
3632 * where we enable the pipe while not in idle link-training mode.
3633 * In this case there is requirement to wait for a minimum number of
3634 * idle patterns to be sent.
3635 */
3636 if (port == PORT_A)
3637 return;
3638
3639 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3640 1))
3641 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3642}
3643
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003644static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01003645intel_dp_link_down(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003646{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003647 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003648 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003649 enum port port = intel_dig_port->port;
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003650 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003651 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonea5b2132010-08-04 13:50:23 +01003652 uint32_t DP = intel_dp->DP;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003653
Daniel Vetterbc76e3202014-05-20 22:46:50 +02003654 if (WARN_ON(HAS_DDI(dev)))
Paulo Zanonic19b0662012-10-15 15:51:41 -03003655 return;
3656
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02003657 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
Chris Wilson1b39d6f2010-12-06 11:20:45 +00003658 return;
3659
Zhao Yakui28c97732009-10-09 11:39:41 +08003660 DRM_DEBUG_KMS("\n");
Zhenyu Wang32f9d652009-07-24 01:00:32 +08003661
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03003662 if ((IS_GEN7(dev) && port == PORT_A) ||
3663 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Zhenyu Wange3421a12010-04-08 09:43:27 +08003664 DP &= ~DP_LINK_TRAIN_MASK_CPT;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003665 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003666 } else {
Ville Syrjäläaad3d142014-06-28 02:04:25 +03003667 if (IS_CHERRYVIEW(dev))
3668 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3669 else
3670 DP &= ~DP_LINK_TRAIN_MASK;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003671 DP |= DP_LINK_TRAIN_PAT_IDLE;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003672 }
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003673 I915_WRITE(intel_dp->output_reg, DP);
Chris Wilsonfe255d02010-09-11 21:37:48 +01003674 POSTING_READ(intel_dp->output_reg);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08003675
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003676 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3677 I915_WRITE(intel_dp->output_reg, DP);
3678 POSTING_READ(intel_dp->output_reg);
3679
3680 /*
3681 * HW workaround for IBX, we need to move the port
3682 * to transcoder A after disabling it to allow the
3683 * matching HDMI port to be enabled on transcoder A.
3684 */
3685 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
Ville Syrjälä0c241d52015-10-30 19:23:22 +02003686 /*
3687 * We get CPU/PCH FIFO underruns on the other pipe when
3688 * doing the workaround. Sweep them under the rug.
3689 */
3690 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3691 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3692
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003693 /* always enable with pattern 1 (as per spec) */
3694 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3695 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3696 I915_WRITE(intel_dp->output_reg, DP);
3697 POSTING_READ(intel_dp->output_reg);
3698
3699 DP &= ~DP_PORT_EN;
Eric Anholt5bddd172010-11-18 09:32:59 +08003700 I915_WRITE(intel_dp->output_reg, DP);
Daniel Vetter0ca09682014-11-24 16:54:11 +01003701 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä0c241d52015-10-30 19:23:22 +02003702
3703 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3704 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3705 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
Eric Anholt5bddd172010-11-18 09:32:59 +08003706 }
3707
Keith Packardf01eca22011-09-28 16:48:10 -07003708 msleep(intel_dp->panel_power_down_delay);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003709}
3710
Keith Packard26d61aa2011-07-25 20:01:09 -07003711static bool
3712intel_dp_get_dpcd(struct intel_dp *intel_dp)
Keith Packard92fd8fd2011-07-25 19:50:10 -07003713{
Rodrigo Vivia031d702013-10-03 16:15:06 -03003714 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3715 struct drm_device *dev = dig_port->base.base.dev;
3716 struct drm_i915_private *dev_priv = dev->dev_private;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303717 uint8_t rev;
Rodrigo Vivia031d702013-10-03 16:15:06 -03003718
Jani Nikula9d1a1032014-03-14 16:51:15 +02003719 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3720 sizeof(intel_dp->dpcd)) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003721 return false; /* aux transfer failed */
Keith Packard92fd8fd2011-07-25 19:50:10 -07003722
Andy Shevchenkoa8e98152014-09-01 14:12:01 +03003723 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
Damien Lespiau577c7a52012-12-13 16:09:02 +00003724
Adam Jacksonedb39242012-09-18 10:58:49 -04003725 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3726 return false; /* DPCD not present */
3727
Shobhit Kumar2293bb52013-07-11 18:44:56 -03003728 /* Check if the panel supports PSR */
3729 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
Jani Nikula50003932013-09-20 16:42:17 +03003730 if (is_edp(intel_dp)) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003731 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3732 intel_dp->psr_dpcd,
3733 sizeof(intel_dp->psr_dpcd));
Rodrigo Vivia031d702013-10-03 16:15:06 -03003734 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3735 dev_priv->psr.sink_support = true;
Jani Nikula50003932013-09-20 16:42:17 +03003736 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
Rodrigo Vivia031d702013-10-03 16:15:06 -03003737 }
Sonika Jindal474d1ec2015-04-02 11:02:44 +05303738
3739 if (INTEL_INFO(dev)->gen >= 9 &&
3740 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3741 uint8_t frame_sync_cap;
3742
3743 dev_priv->psr.sink_support = true;
3744 intel_dp_dpcd_read_wake(&intel_dp->aux,
3745 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3746 &frame_sync_cap, 1);
3747 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3748 /* PSR2 needs frame sync as well */
3749 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3750 DRM_DEBUG_KMS("PSR2 %s on sink",
3751 dev_priv->psr.psr2_support ? "supported" : "not supported");
3752 }
Jani Nikula50003932013-09-20 16:42:17 +03003753 }
3754
Jani Nikulabc5133d2015-09-03 11:16:07 +03003755 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03003756 yesno(intel_dp_source_supports_hbr2(intel_dp)),
Jani Nikula742f4912015-09-03 11:16:09 +03003757 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
Todd Previte06ea66b2014-01-20 10:19:39 -07003758
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303759 /* Intermediate frequency support */
3760 if (is_edp(intel_dp) &&
3761 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3762 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3763 (rev >= 0x03)) { /* eDp v1.4 or higher */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003764 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003765 int i;
3766
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303767 intel_dp_dpcd_read_wake(&intel_dp->aux,
3768 DP_SUPPORTED_LINK_RATES,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003769 sink_rates,
3770 sizeof(sink_rates));
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003771
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003772 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3773 int val = le16_to_cpu(sink_rates[i]);
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003774
3775 if (val == 0)
3776 break;
3777
Sonika Jindalaf77b972015-05-07 13:59:28 +05303778 /* Value read is in kHz while drm clock is saved in deca-kHz */
3779 intel_dp->sink_rates[i] = (val * 200) / 10;
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003780 }
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003781 intel_dp->num_sink_rates = i;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303782 }
Ville Syrjälä0336400e2015-03-12 17:10:39 +02003783
3784 intel_dp_print_rates(intel_dp);
3785
Adam Jacksonedb39242012-09-18 10:58:49 -04003786 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3787 DP_DWN_STRM_PORT_PRESENT))
3788 return true; /* native DP sink */
3789
3790 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3791 return true; /* no per-port downstream info */
3792
Jani Nikula9d1a1032014-03-14 16:51:15 +02003793 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3794 intel_dp->downstream_ports,
3795 DP_MAX_DOWNSTREAM_PORTS) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003796 return false; /* downstream port status fetch failed */
3797
3798 return true;
Keith Packard92fd8fd2011-07-25 19:50:10 -07003799}
3800
Adam Jackson0d198322012-05-14 16:05:47 -04003801static void
3802intel_dp_probe_oui(struct intel_dp *intel_dp)
3803{
3804 u8 buf[3];
3805
3806 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3807 return;
3808
Jani Nikula9d1a1032014-03-14 16:51:15 +02003809 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04003810 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3811 buf[0], buf[1], buf[2]);
3812
Jani Nikula9d1a1032014-03-14 16:51:15 +02003813 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04003814 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3815 buf[0], buf[1], buf[2]);
3816}
3817
Dave Airlie0e32b392014-05-02 14:02:48 +10003818static bool
3819intel_dp_probe_mst(struct intel_dp *intel_dp)
3820{
3821 u8 buf[1];
3822
3823 if (!intel_dp->can_mst)
3824 return false;
3825
3826 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3827 return false;
3828
Dave Airlie0e32b392014-05-02 14:02:48 +10003829 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3830 if (buf[0] & DP_MST_CAP) {
3831 DRM_DEBUG_KMS("Sink is MST capable\n");
3832 intel_dp->is_mst = true;
3833 } else {
3834 DRM_DEBUG_KMS("Sink is not MST capable\n");
3835 intel_dp->is_mst = false;
3836 }
3837 }
Dave Airlie0e32b392014-05-02 14:02:48 +10003838
3839 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3840 return intel_dp->is_mst;
3841}
3842
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003843static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003844{
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003845 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3846 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
Rodrigo Viviad9dc912014-09-16 19:18:12 -04003847 u8 buf;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003848 int ret = 0;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003849
3850 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003851 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003852 ret = -EIO;
3853 goto out;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003854 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003855
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003856 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003857 buf & ~DP_TEST_SINK_START) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003858 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003859 ret = -EIO;
3860 goto out;
3861 }
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003862
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003863 intel_dp->sink_crc.started = false;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003864 out:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003865 hsw_enable_ips(intel_crtc);
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003866 return ret;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003867}
3868
3869static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3870{
3871 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3872 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3873 u8 buf;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003874 int ret;
3875
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003876 if (intel_dp->sink_crc.started) {
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003877 ret = intel_dp_sink_crc_stop(intel_dp);
3878 if (ret)
3879 return ret;
3880 }
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003881
3882 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3883 return -EIO;
3884
3885 if (!(buf & DP_TEST_CRC_SUPPORTED))
3886 return -ENOTTY;
3887
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003888 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
3889
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003890 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3891 return -EIO;
3892
3893 hsw_disable_ips(intel_crtc);
3894
3895 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3896 buf | DP_TEST_SINK_START) < 0) {
3897 hsw_enable_ips(intel_crtc);
3898 return -EIO;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003899 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003900
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003901 intel_dp->sink_crc.started = true;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003902 return 0;
3903}
3904
3905int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3906{
3907 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3908 struct drm_device *dev = dig_port->base.base.dev;
3909 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3910 u8 buf;
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003911 int count, ret;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003912 int attempts = 6;
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07003913 bool old_equal_new;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003914
3915 ret = intel_dp_sink_crc_start(intel_dp);
3916 if (ret)
3917 return ret;
3918
Rodrigo Viviad9dc912014-09-16 19:18:12 -04003919 do {
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003920 intel_wait_for_vblank(dev, intel_crtc->pipe);
3921
Rodrigo Vivi1dda5f92014-10-01 07:32:37 -07003922 if (drm_dp_dpcd_readb(&intel_dp->aux,
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003923 DP_TEST_SINK_MISC, &buf) < 0) {
3924 ret = -EIO;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07003925 goto stop;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003926 }
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003927 count = buf & DP_TEST_COUNT_MASK;
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07003928
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003929 /*
3930 * Count might be reset during the loop. In this case
3931 * last known count needs to be reset as well.
3932 */
3933 if (count == 0)
3934 intel_dp->sink_crc.last_count = 0;
3935
3936 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3937 ret = -EIO;
3938 goto stop;
3939 }
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07003940
3941 old_equal_new = (count == intel_dp->sink_crc.last_count &&
3942 !memcmp(intel_dp->sink_crc.last_crc, crc,
3943 6 * sizeof(u8)));
3944
3945 } while (--attempts && (count == 0 || old_equal_new));
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003946
3947 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
3948 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
Rodrigo Viviad9dc912014-09-16 19:18:12 -04003949
3950 if (attempts == 0) {
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07003951 if (old_equal_new) {
3952 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
3953 } else {
3954 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3955 ret = -ETIMEDOUT;
3956 goto stop;
3957 }
Rodrigo Viviad9dc912014-09-16 19:18:12 -04003958 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003959
Rodrigo Viviafe0d672015-07-23 16:35:45 -07003960stop:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003961 intel_dp_sink_crc_stop(intel_dp);
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003962 return ret;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003963}
3964
Jesse Barnesa60f0e32011-10-20 15:09:17 -07003965static bool
3966intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3967{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003968 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3969 DP_DEVICE_SERVICE_IRQ_VECTOR,
3970 sink_irq_vector, 1) == 1;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07003971}
3972
Dave Airlie0e32b392014-05-02 14:02:48 +10003973static bool
3974intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3975{
3976 int ret;
3977
3978 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3979 DP_SINK_COUNT_ESI,
3980 sink_irq_vector, 14);
3981 if (ret != 14)
3982 return false;
3983
3984 return true;
3985}
3986
Todd Previtec5d5ab72015-04-15 08:38:38 -07003987static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
Jesse Barnesa60f0e32011-10-20 15:09:17 -07003988{
Todd Previtec5d5ab72015-04-15 08:38:38 -07003989 uint8_t test_result = DP_TEST_ACK;
3990 return test_result;
3991}
3992
3993static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
3994{
3995 uint8_t test_result = DP_TEST_NAK;
3996 return test_result;
3997}
3998
3999static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4000{
4001 uint8_t test_result = DP_TEST_NAK;
Todd Previte559be302015-05-04 07:48:20 -07004002 struct intel_connector *intel_connector = intel_dp->attached_connector;
4003 struct drm_connector *connector = &intel_connector->base;
4004
4005 if (intel_connector->detect_edid == NULL ||
Daniel Vetterac6f2e22015-05-08 16:15:41 +02004006 connector->edid_corrupt ||
Todd Previte559be302015-05-04 07:48:20 -07004007 intel_dp->aux.i2c_defer_count > 6) {
4008 /* Check EDID read for NACKs, DEFERs and corruption
4009 * (DP CTS 1.2 Core r1.1)
4010 * 4.2.2.4 : Failed EDID read, I2C_NAK
4011 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4012 * 4.2.2.6 : EDID corruption detected
4013 * Use failsafe mode for all cases
4014 */
4015 if (intel_dp->aux.i2c_nack_count > 0 ||
4016 intel_dp->aux.i2c_defer_count > 0)
4017 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4018 intel_dp->aux.i2c_nack_count,
4019 intel_dp->aux.i2c_defer_count);
4020 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4021 } else {
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304022 struct edid *block = intel_connector->detect_edid;
4023
4024 /* We have to write the checksum
4025 * of the last block read
4026 */
4027 block += intel_connector->detect_edid->extensions;
4028
Todd Previte559be302015-05-04 07:48:20 -07004029 if (!drm_dp_dpcd_write(&intel_dp->aux,
4030 DP_TEST_EDID_CHECKSUM,
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304031 &block->checksum,
Dan Carpenter5a1cc652015-05-12 21:07:37 +03004032 1))
Todd Previte559be302015-05-04 07:48:20 -07004033 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4034
4035 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4036 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4037 }
4038
4039 /* Set test active flag here so userspace doesn't interrupt things */
4040 intel_dp->compliance_test_active = 1;
4041
Todd Previtec5d5ab72015-04-15 08:38:38 -07004042 return test_result;
4043}
4044
4045static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4046{
4047 uint8_t test_result = DP_TEST_NAK;
4048 return test_result;
4049}
4050
4051static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4052{
4053 uint8_t response = DP_TEST_NAK;
4054 uint8_t rxdata = 0;
4055 int status = 0;
4056
Todd Previte559be302015-05-04 07:48:20 -07004057 intel_dp->compliance_test_active = 0;
Todd Previtec5d5ab72015-04-15 08:38:38 -07004058 intel_dp->compliance_test_type = 0;
Todd Previte559be302015-05-04 07:48:20 -07004059 intel_dp->compliance_test_data = 0;
4060
Todd Previtec5d5ab72015-04-15 08:38:38 -07004061 intel_dp->aux.i2c_nack_count = 0;
4062 intel_dp->aux.i2c_defer_count = 0;
4063
4064 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4065 if (status <= 0) {
4066 DRM_DEBUG_KMS("Could not read test request from sink\n");
4067 goto update_status;
4068 }
4069
4070 switch (rxdata) {
4071 case DP_TEST_LINK_TRAINING:
4072 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4073 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4074 response = intel_dp_autotest_link_training(intel_dp);
4075 break;
4076 case DP_TEST_LINK_VIDEO_PATTERN:
4077 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4078 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4079 response = intel_dp_autotest_video_pattern(intel_dp);
4080 break;
4081 case DP_TEST_LINK_EDID_READ:
4082 DRM_DEBUG_KMS("EDID test requested\n");
4083 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4084 response = intel_dp_autotest_edid(intel_dp);
4085 break;
4086 case DP_TEST_LINK_PHY_TEST_PATTERN:
4087 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4088 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4089 response = intel_dp_autotest_phy_pattern(intel_dp);
4090 break;
4091 default:
4092 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4093 break;
4094 }
4095
4096update_status:
4097 status = drm_dp_dpcd_write(&intel_dp->aux,
4098 DP_TEST_RESPONSE,
4099 &response, 1);
4100 if (status <= 0)
4101 DRM_DEBUG_KMS("Could not write test response to sink\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004102}
4103
Dave Airlie0e32b392014-05-02 14:02:48 +10004104static int
4105intel_dp_check_mst_status(struct intel_dp *intel_dp)
4106{
4107 bool bret;
4108
4109 if (intel_dp->is_mst) {
4110 u8 esi[16] = { 0 };
4111 int ret = 0;
4112 int retry;
4113 bool handled;
4114 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4115go_again:
4116 if (bret == true) {
4117
4118 /* check link status - esi[10] = 0x200c */
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03004119 if (intel_dp->active_mst_links &&
Ville Syrjälä901c2da2015-08-17 18:05:12 +03004120 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
Dave Airlie0e32b392014-05-02 14:02:48 +10004121 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4122 intel_dp_start_link_train(intel_dp);
Dave Airlie0e32b392014-05-02 14:02:48 +10004123 intel_dp_stop_link_train(intel_dp);
4124 }
4125
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004126 DRM_DEBUG_KMS("got esi %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004127 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4128
4129 if (handled) {
4130 for (retry = 0; retry < 3; retry++) {
4131 int wret;
4132 wret = drm_dp_dpcd_write(&intel_dp->aux,
4133 DP_SINK_COUNT_ESI+1,
4134 &esi[1], 3);
4135 if (wret == 3) {
4136 break;
4137 }
4138 }
4139
4140 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4141 if (bret == true) {
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004142 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004143 goto go_again;
4144 }
4145 } else
4146 ret = 0;
4147
4148 return ret;
4149 } else {
4150 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4151 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4152 intel_dp->is_mst = false;
4153 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4154 /* send a hotplug event */
4155 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4156 }
4157 }
4158 return -EINVAL;
4159}
4160
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004161/*
4162 * According to DP spec
4163 * 5.1.2:
4164 * 1. Read DPCD
4165 * 2. Configure link according to Receiver Capabilities
4166 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4167 * 4. Check link status on receipt of hot-plug interrupt
4168 */
Damien Lespiaua5146202015-02-10 19:32:22 +00004169static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01004170intel_dp_check_link_status(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004171{
Dave Airlie5b215bc2014-08-05 10:40:20 +10004172 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004173 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004174 u8 sink_irq_vector;
Keith Packard93f62da2011-11-01 19:45:03 -07004175 u8 link_status[DP_LINK_STATUS_SIZE];
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004176
Dave Airlie5b215bc2014-08-05 10:40:20 +10004177 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4178
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02004179 if (!intel_encoder->base.crtc)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004180 return;
4181
Imre Deak1a125d82014-08-18 14:42:46 +03004182 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4183 return;
4184
Keith Packard92fd8fd2011-07-25 19:50:10 -07004185 /* Try to read receiver status if the link appears to be up */
Keith Packard93f62da2011-11-01 19:45:03 -07004186 if (!intel_dp_get_link_status(intel_dp, link_status)) {
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004187 return;
4188 }
4189
Keith Packard92fd8fd2011-07-25 19:50:10 -07004190 /* Now read the DPCD to see if it's actually running */
Keith Packard26d61aa2011-07-25 20:01:09 -07004191 if (!intel_dp_get_dpcd(intel_dp)) {
Jesse Barnes59cd09e2011-07-07 11:10:59 -07004192 return;
4193 }
4194
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004195 /* Try to read the source of the interrupt */
4196 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4197 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4198 /* Clear interrupt source */
Jani Nikula9d1a1032014-03-14 16:51:15 +02004199 drm_dp_dpcd_writeb(&intel_dp->aux,
4200 DP_DEVICE_SERVICE_IRQ_VECTOR,
4201 sink_irq_vector);
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004202
4203 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
Todd Previte09b1eb12015-04-20 15:27:34 -07004204 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004205 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4206 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4207 }
4208
Ville Syrjälä901c2da2015-08-17 18:05:12 +03004209 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
Keith Packard92fd8fd2011-07-25 19:50:10 -07004210 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
Jani Nikula8e329a02014-06-03 14:56:21 +03004211 intel_encoder->base.name);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004212 intel_dp_start_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03004213 intel_dp_stop_link_train(intel_dp);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004214 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004215}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004216
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004217/* XXX this is probably wrong for multiple downstream ports */
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004218static enum drm_connector_status
Keith Packard26d61aa2011-07-25 20:01:09 -07004219intel_dp_detect_dpcd(struct intel_dp *intel_dp)
Adam Jackson71ba90002011-07-12 17:38:04 -04004220{
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004221 uint8_t *dpcd = intel_dp->dpcd;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004222 uint8_t type;
4223
4224 if (!intel_dp_get_dpcd(intel_dp))
4225 return connector_status_disconnected;
4226
4227 /* if there's no downstream port, we're done */
4228 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
Keith Packard26d61aa2011-07-25 20:01:09 -07004229 return connector_status_connected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004230
4231 /* If we're HPD-aware, SINK_COUNT changes dynamically */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004232 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4233 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
Adam Jackson23235172012-09-20 16:42:45 -04004234 uint8_t reg;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004235
4236 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4237 &reg, 1) < 0)
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004238 return connector_status_unknown;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004239
Adam Jackson23235172012-09-20 16:42:45 -04004240 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4241 : connector_status_disconnected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004242 }
4243
4244 /* If no HPD, poke DDC gently */
Jani Nikula0b998362014-03-14 16:51:17 +02004245 if (drm_probe_ddc(&intel_dp->aux.ddc))
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004246 return connector_status_connected;
4247
4248 /* Well we tried, say unknown for unreliable port types */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004249 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4250 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4251 if (type == DP_DS_PORT_TYPE_VGA ||
4252 type == DP_DS_PORT_TYPE_NON_EDID)
4253 return connector_status_unknown;
4254 } else {
4255 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4256 DP_DWN_STRM_PORT_TYPE_MASK;
4257 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4258 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4259 return connector_status_unknown;
4260 }
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004261
4262 /* Anything else is out of spec, warn and ignore */
4263 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
Keith Packard26d61aa2011-07-25 20:01:09 -07004264 return connector_status_disconnected;
Adam Jackson71ba90002011-07-12 17:38:04 -04004265}
4266
4267static enum drm_connector_status
Chris Wilsond410b562014-09-02 20:03:59 +01004268edp_detect(struct intel_dp *intel_dp)
4269{
4270 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4271 enum drm_connector_status status;
4272
4273 status = intel_panel_detect(dev);
4274 if (status == connector_status_unknown)
4275 status = connector_status_connected;
4276
4277 return status;
4278}
4279
Jani Nikulab93433c2015-08-20 10:47:36 +03004280static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4281 struct intel_digital_port *port)
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004282{
Jani Nikulab93433c2015-08-20 10:47:36 +03004283 u32 bit;
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07004284
Jani Nikula0df53b72015-08-20 10:47:40 +03004285 switch (port->port) {
4286 case PORT_A:
4287 return true;
4288 case PORT_B:
4289 bit = SDE_PORTB_HOTPLUG;
4290 break;
4291 case PORT_C:
4292 bit = SDE_PORTC_HOTPLUG;
4293 break;
4294 case PORT_D:
4295 bit = SDE_PORTD_HOTPLUG;
4296 break;
4297 default:
4298 MISSING_CASE(port->port);
4299 return false;
4300 }
4301
4302 return I915_READ(SDEISR) & bit;
4303}
4304
4305static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4306 struct intel_digital_port *port)
4307{
4308 u32 bit;
4309
4310 switch (port->port) {
4311 case PORT_A:
4312 return true;
4313 case PORT_B:
4314 bit = SDE_PORTB_HOTPLUG_CPT;
4315 break;
4316 case PORT_C:
4317 bit = SDE_PORTC_HOTPLUG_CPT;
4318 break;
4319 case PORT_D:
4320 bit = SDE_PORTD_HOTPLUG_CPT;
4321 break;
Jani Nikulaa78695d2015-09-18 15:54:50 +03004322 case PORT_E:
4323 bit = SDE_PORTE_HOTPLUG_SPT;
4324 break;
Jani Nikula0df53b72015-08-20 10:47:40 +03004325 default:
4326 MISSING_CASE(port->port);
4327 return false;
Jani Nikulab93433c2015-08-20 10:47:36 +03004328 }
Damien Lespiau1b469632012-12-13 16:09:01 +00004329
Jani Nikulab93433c2015-08-20 10:47:36 +03004330 return I915_READ(SDEISR) & bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004331}
4332
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004333static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
Jani Nikula1d245982015-08-20 10:47:37 +03004334 struct intel_digital_port *port)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004335{
Jani Nikula9642c812015-08-20 10:47:41 +03004336 u32 bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004337
Jani Nikula9642c812015-08-20 10:47:41 +03004338 switch (port->port) {
4339 case PORT_B:
4340 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4341 break;
4342 case PORT_C:
4343 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4344 break;
4345 case PORT_D:
4346 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4347 break;
4348 default:
4349 MISSING_CASE(port->port);
4350 return false;
4351 }
4352
4353 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4354}
4355
4356static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4357 struct intel_digital_port *port)
4358{
4359 u32 bit;
4360
4361 switch (port->port) {
4362 case PORT_B:
4363 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4364 break;
4365 case PORT_C:
4366 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4367 break;
4368 case PORT_D:
4369 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4370 break;
4371 default:
4372 MISSING_CASE(port->port);
4373 return false;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004374 }
4375
Jani Nikula1d245982015-08-20 10:47:37 +03004376 return I915_READ(PORT_HOTPLUG_STAT) & bit;
Dave Airlie2a592be2014-09-01 16:58:12 +10004377}
4378
Jani Nikulae464bfd2015-08-20 10:47:42 +03004379static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304380 struct intel_digital_port *intel_dig_port)
Jani Nikulae464bfd2015-08-20 10:47:42 +03004381{
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304382 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4383 enum port port;
Jani Nikulae464bfd2015-08-20 10:47:42 +03004384 u32 bit;
4385
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304386 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4387 switch (port) {
Jani Nikulae464bfd2015-08-20 10:47:42 +03004388 case PORT_A:
4389 bit = BXT_DE_PORT_HP_DDIA;
4390 break;
4391 case PORT_B:
4392 bit = BXT_DE_PORT_HP_DDIB;
4393 break;
4394 case PORT_C:
4395 bit = BXT_DE_PORT_HP_DDIC;
4396 break;
4397 default:
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304398 MISSING_CASE(port);
Jani Nikulae464bfd2015-08-20 10:47:42 +03004399 return false;
4400 }
4401
4402 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4403}
4404
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004405/*
4406 * intel_digital_port_connected - is the specified port connected?
4407 * @dev_priv: i915 private structure
4408 * @port: the port to test
4409 *
4410 * Return %true if @port is connected, %false otherwise.
4411 */
Sonika Jindal237ed862015-09-15 09:44:20 +05304412bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004413 struct intel_digital_port *port)
4414{
Jani Nikula0df53b72015-08-20 10:47:40 +03004415 if (HAS_PCH_IBX(dev_priv))
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004416 return ibx_digital_port_connected(dev_priv, port);
Jani Nikula0df53b72015-08-20 10:47:40 +03004417 if (HAS_PCH_SPLIT(dev_priv))
4418 return cpt_digital_port_connected(dev_priv, port);
Jani Nikulae464bfd2015-08-20 10:47:42 +03004419 else if (IS_BROXTON(dev_priv))
4420 return bxt_digital_port_connected(dev_priv, port);
Jani Nikula9642c812015-08-20 10:47:41 +03004421 else if (IS_VALLEYVIEW(dev_priv))
4422 return vlv_digital_port_connected(dev_priv, port);
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004423 else
4424 return g4x_digital_port_connected(dev_priv, port);
4425}
4426
Dave Airlie2a592be2014-09-01 16:58:12 +10004427static enum drm_connector_status
Jani Nikulab93433c2015-08-20 10:47:36 +03004428ironlake_dp_detect(struct intel_dp *intel_dp)
4429{
4430 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4431 struct drm_i915_private *dev_priv = dev->dev_private;
4432 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4433
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004434 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
Jani Nikulab93433c2015-08-20 10:47:36 +03004435 return connector_status_disconnected;
4436
4437 return intel_dp_detect_dpcd(intel_dp);
4438}
4439
4440static enum drm_connector_status
Dave Airlie2a592be2014-09-01 16:58:12 +10004441g4x_dp_detect(struct intel_dp *intel_dp)
4442{
4443 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4444 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Dave Airlie2a592be2014-09-01 16:58:12 +10004445
4446 /* Can't disconnect eDP, but you can close the lid... */
4447 if (is_edp(intel_dp)) {
4448 enum drm_connector_status status;
4449
4450 status = intel_panel_detect(dev);
4451 if (status == connector_status_unknown)
4452 status = connector_status_connected;
4453 return status;
4454 }
4455
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004456 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004457 return connector_status_disconnected;
4458
Keith Packard26d61aa2011-07-25 20:01:09 -07004459 return intel_dp_detect_dpcd(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004460}
4461
Keith Packard8c241fe2011-09-28 16:38:44 -07004462static struct edid *
Chris Wilsonbeb60602014-09-02 20:04:00 +01004463intel_dp_get_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004464{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004465 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packard8c241fe2011-09-28 16:38:44 -07004466
Jani Nikula9cd300e2012-10-19 14:51:52 +03004467 /* use cached edid if we have one */
4468 if (intel_connector->edid) {
Jani Nikula9cd300e2012-10-19 14:51:52 +03004469 /* invalid edid */
4470 if (IS_ERR(intel_connector->edid))
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004471 return NULL;
4472
Jani Nikula55e9ede2013-10-01 10:38:54 +03004473 return drm_edid_duplicate(intel_connector->edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004474 } else
4475 return drm_get_edid(&intel_connector->base,
4476 &intel_dp->aux.ddc);
Keith Packard8c241fe2011-09-28 16:38:44 -07004477}
4478
Chris Wilsonbeb60602014-09-02 20:04:00 +01004479static void
4480intel_dp_set_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004481{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004482 struct intel_connector *intel_connector = intel_dp->attached_connector;
4483 struct edid *edid;
Keith Packard8c241fe2011-09-28 16:38:44 -07004484
Chris Wilsonbeb60602014-09-02 20:04:00 +01004485 edid = intel_dp_get_edid(intel_dp);
4486 intel_connector->detect_edid = edid;
Jani Nikula9cd300e2012-10-19 14:51:52 +03004487
Chris Wilsonbeb60602014-09-02 20:04:00 +01004488 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4489 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4490 else
4491 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4492}
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004493
Chris Wilsonbeb60602014-09-02 20:04:00 +01004494static void
4495intel_dp_unset_edid(struct intel_dp *intel_dp)
4496{
4497 struct intel_connector *intel_connector = intel_dp->attached_connector;
4498
4499 kfree(intel_connector->detect_edid);
4500 intel_connector->detect_edid = NULL;
4501
4502 intel_dp->has_audio = false;
4503}
4504
4505static enum intel_display_power_domain
4506intel_dp_power_get(struct intel_dp *dp)
4507{
4508 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4509 enum intel_display_power_domain power_domain;
4510
4511 power_domain = intel_display_port_power_domain(encoder);
4512 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4513
4514 return power_domain;
4515}
4516
4517static void
4518intel_dp_power_put(struct intel_dp *dp,
4519 enum intel_display_power_domain power_domain)
4520{
4521 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4522 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
Keith Packard8c241fe2011-09-28 16:38:44 -07004523}
4524
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004525static enum drm_connector_status
4526intel_dp_detect(struct drm_connector *connector, bool force)
4527{
4528 struct intel_dp *intel_dp = intel_attached_dp(connector);
Paulo Zanonid63885d2012-10-26 19:05:49 -02004529 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4530 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004531 struct drm_device *dev = connector->dev;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004532 enum drm_connector_status status;
Imre Deak671dedd2014-03-05 16:20:53 +02004533 enum intel_display_power_domain power_domain;
Dave Airlie0e32b392014-05-02 14:02:48 +10004534 bool ret;
Todd Previte09b1eb12015-04-20 15:27:34 -07004535 u8 sink_irq_vector;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004536
Chris Wilson164c8592013-07-20 20:27:08 +01004537 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03004538 connector->base.id, connector->name);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004539 intel_dp_unset_edid(intel_dp);
Chris Wilson164c8592013-07-20 20:27:08 +01004540
Dave Airlie0e32b392014-05-02 14:02:48 +10004541 if (intel_dp->is_mst) {
4542 /* MST devices are disconnected from a monitor POV */
4543 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4544 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004545 return connector_status_disconnected;
Dave Airlie0e32b392014-05-02 14:02:48 +10004546 }
4547
Chris Wilsonbeb60602014-09-02 20:04:00 +01004548 power_domain = intel_dp_power_get(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004549
Chris Wilsond410b562014-09-02 20:03:59 +01004550 /* Can't disconnect eDP, but you can close the lid... */
4551 if (is_edp(intel_dp))
4552 status = edp_detect(intel_dp);
4553 else if (HAS_PCH_SPLIT(dev))
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004554 status = ironlake_dp_detect(intel_dp);
4555 else
4556 status = g4x_dp_detect(intel_dp);
4557 if (status != connector_status_connected)
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004558 goto out;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004559
Adam Jackson0d198322012-05-14 16:05:47 -04004560 intel_dp_probe_oui(intel_dp);
4561
Dave Airlie0e32b392014-05-02 14:02:48 +10004562 ret = intel_dp_probe_mst(intel_dp);
4563 if (ret) {
4564 /* if we are in MST mode then this connector
4565 won't appear connected or have anything with EDID on it */
4566 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4567 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4568 status = connector_status_disconnected;
4569 goto out;
4570 }
4571
Chris Wilsonbeb60602014-09-02 20:04:00 +01004572 intel_dp_set_edid(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004573
Paulo Zanonid63885d2012-10-26 19:05:49 -02004574 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4575 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004576 status = connector_status_connected;
4577
Todd Previte09b1eb12015-04-20 15:27:34 -07004578 /* Try to read the source of the interrupt */
4579 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4580 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4581 /* Clear interrupt source */
4582 drm_dp_dpcd_writeb(&intel_dp->aux,
4583 DP_DEVICE_SERVICE_IRQ_VECTOR,
4584 sink_irq_vector);
4585
4586 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4587 intel_dp_handle_test_request(intel_dp);
4588 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4589 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4590 }
4591
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004592out:
Chris Wilsonbeb60602014-09-02 20:04:00 +01004593 intel_dp_power_put(intel_dp, power_domain);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004594 return status;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004595}
4596
Chris Wilsonbeb60602014-09-02 20:04:00 +01004597static void
4598intel_dp_force(struct drm_connector *connector)
4599{
4600 struct intel_dp *intel_dp = intel_attached_dp(connector);
4601 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4602 enum intel_display_power_domain power_domain;
4603
4604 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4605 connector->base.id, connector->name);
4606 intel_dp_unset_edid(intel_dp);
4607
4608 if (connector->status != connector_status_connected)
4609 return;
4610
4611 power_domain = intel_dp_power_get(intel_dp);
4612
4613 intel_dp_set_edid(intel_dp);
4614
4615 intel_dp_power_put(intel_dp, power_domain);
4616
4617 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4618 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4619}
4620
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004621static int intel_dp_get_modes(struct drm_connector *connector)
4622{
Jani Nikuladd06f902012-10-19 14:51:50 +03004623 struct intel_connector *intel_connector = to_intel_connector(connector);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004624 struct edid *edid;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004625
Chris Wilsonbeb60602014-09-02 20:04:00 +01004626 edid = intel_connector->detect_edid;
4627 if (edid) {
4628 int ret = intel_connector_update_modes(connector, edid);
4629 if (ret)
4630 return ret;
4631 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004632
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004633 /* if eDP has no EDID, fall back to fixed mode */
Chris Wilsonbeb60602014-09-02 20:04:00 +01004634 if (is_edp(intel_attached_dp(connector)) &&
4635 intel_connector->panel.fixed_mode) {
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004636 struct drm_display_mode *mode;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004637
4638 mode = drm_mode_duplicate(connector->dev,
Jani Nikuladd06f902012-10-19 14:51:50 +03004639 intel_connector->panel.fixed_mode);
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004640 if (mode) {
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004641 drm_mode_probed_add(connector, mode);
4642 return 1;
4643 }
4644 }
Chris Wilsonbeb60602014-09-02 20:04:00 +01004645
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004646 return 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004647}
4648
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004649static bool
4650intel_dp_detect_audio(struct drm_connector *connector)
4651{
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004652 bool has_audio = false;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004653 struct edid *edid;
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004654
Chris Wilsonbeb60602014-09-02 20:04:00 +01004655 edid = to_intel_connector(connector)->detect_edid;
4656 if (edid)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004657 has_audio = drm_detect_monitor_audio(edid);
Imre Deak671dedd2014-03-05 16:20:53 +02004658
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004659 return has_audio;
4660}
4661
Chris Wilsonf6849602010-09-19 09:29:33 +01004662static int
4663intel_dp_set_property(struct drm_connector *connector,
4664 struct drm_property *property,
4665 uint64_t val)
4666{
Chris Wilsone953fd72011-02-21 22:23:52 +00004667 struct drm_i915_private *dev_priv = connector->dev->dev_private;
Yuly Novikov53b41832012-10-26 12:04:00 +03004668 struct intel_connector *intel_connector = to_intel_connector(connector);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004669 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4670 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonf6849602010-09-19 09:29:33 +01004671 int ret;
4672
Rob Clark662595d2012-10-11 20:36:04 -05004673 ret = drm_object_property_set_value(&connector->base, property, val);
Chris Wilsonf6849602010-09-19 09:29:33 +01004674 if (ret)
4675 return ret;
4676
Chris Wilson3f43c482011-05-12 22:17:24 +01004677 if (property == dev_priv->force_audio_property) {
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004678 int i = val;
4679 bool has_audio;
4680
4681 if (i == intel_dp->force_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004682 return 0;
4683
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004684 intel_dp->force_audio = i;
Chris Wilsonf6849602010-09-19 09:29:33 +01004685
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004686 if (i == HDMI_AUDIO_AUTO)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004687 has_audio = intel_dp_detect_audio(connector);
4688 else
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004689 has_audio = (i == HDMI_AUDIO_ON);
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004690
4691 if (has_audio == intel_dp->has_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004692 return 0;
4693
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004694 intel_dp->has_audio = has_audio;
Chris Wilsonf6849602010-09-19 09:29:33 +01004695 goto done;
4696 }
4697
Chris Wilsone953fd72011-02-21 22:23:52 +00004698 if (property == dev_priv->broadcast_rgb_property) {
Daniel Vetterae4edb82013-04-22 17:07:23 +02004699 bool old_auto = intel_dp->color_range_auto;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004700 bool old_range = intel_dp->limited_color_range;
Daniel Vetterae4edb82013-04-22 17:07:23 +02004701
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004702 switch (val) {
4703 case INTEL_BROADCAST_RGB_AUTO:
4704 intel_dp->color_range_auto = true;
4705 break;
4706 case INTEL_BROADCAST_RGB_FULL:
4707 intel_dp->color_range_auto = false;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004708 intel_dp->limited_color_range = false;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004709 break;
4710 case INTEL_BROADCAST_RGB_LIMITED:
4711 intel_dp->color_range_auto = false;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004712 intel_dp->limited_color_range = true;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004713 break;
4714 default:
4715 return -EINVAL;
4716 }
Daniel Vetterae4edb82013-04-22 17:07:23 +02004717
4718 if (old_auto == intel_dp->color_range_auto &&
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004719 old_range == intel_dp->limited_color_range)
Daniel Vetterae4edb82013-04-22 17:07:23 +02004720 return 0;
4721
Chris Wilsone953fd72011-02-21 22:23:52 +00004722 goto done;
4723 }
4724
Yuly Novikov53b41832012-10-26 12:04:00 +03004725 if (is_edp(intel_dp) &&
4726 property == connector->dev->mode_config.scaling_mode_property) {
4727 if (val == DRM_MODE_SCALE_NONE) {
4728 DRM_DEBUG_KMS("no scaling not supported\n");
4729 return -EINVAL;
4730 }
4731
4732 if (intel_connector->panel.fitting_mode == val) {
4733 /* the eDP scaling property is not changed */
4734 return 0;
4735 }
4736 intel_connector->panel.fitting_mode = val;
4737
4738 goto done;
4739 }
4740
Chris Wilsonf6849602010-09-19 09:29:33 +01004741 return -EINVAL;
4742
4743done:
Chris Wilsonc0c36b942012-12-19 16:08:43 +00004744 if (intel_encoder->base.crtc)
4745 intel_crtc_restore_mode(intel_encoder->base.crtc);
Chris Wilsonf6849602010-09-19 09:29:33 +01004746
4747 return 0;
4748}
4749
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004750static void
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004751intel_dp_connector_destroy(struct drm_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004752{
Jani Nikula1d508702012-10-19 14:51:49 +03004753 struct intel_connector *intel_connector = to_intel_connector(connector);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004754
Chris Wilson10e972d2014-09-04 21:43:45 +01004755 kfree(intel_connector->detect_edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004756
Jani Nikula9cd300e2012-10-19 14:51:52 +03004757 if (!IS_ERR_OR_NULL(intel_connector->edid))
4758 kfree(intel_connector->edid);
4759
Paulo Zanoniacd8db102013-06-12 17:27:23 -03004760 /* Can't call is_edp() since the encoder may have been destroyed
4761 * already. */
4762 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
Jani Nikula1d508702012-10-19 14:51:49 +03004763 intel_panel_fini(&intel_connector->panel);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004764
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004765 drm_connector_cleanup(connector);
Zhenyu Wang55f78c42010-03-29 16:13:57 +08004766 kfree(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004767}
4768
Paulo Zanoni00c09d72012-10-26 19:05:52 -02004769void intel_dp_encoder_destroy(struct drm_encoder *encoder)
Daniel Vetter24d05922010-08-20 18:08:28 +02004770{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004771 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4772 struct intel_dp *intel_dp = &intel_dig_port->dp;
Daniel Vetter24d05922010-08-20 18:08:28 +02004773
Dave Airlie4f71d0c2014-06-04 16:02:28 +10004774 drm_dp_aux_unregister(&intel_dp->aux);
Dave Airlie0e32b392014-05-02 14:02:48 +10004775 intel_dp_mst_encoder_cleanup(intel_dig_port);
Keith Packardbd943152011-09-18 23:09:52 -07004776 if (is_edp(intel_dp)) {
4777 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03004778 /*
4779 * vdd might still be enabled do to the delayed vdd off.
4780 * Make sure vdd is actually turned off here.
4781 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03004782 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01004783 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004784 pps_unlock(intel_dp);
4785
Clint Taylor01527b32014-07-07 13:01:46 -07004786 if (intel_dp->edp_notifier.notifier_call) {
4787 unregister_reboot_notifier(&intel_dp->edp_notifier);
4788 intel_dp->edp_notifier.notifier_call = NULL;
4789 }
Keith Packardbd943152011-09-18 23:09:52 -07004790 }
Imre Deakc8bd0e42014-12-12 17:57:38 +02004791 drm_encoder_cleanup(encoder);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004792 kfree(intel_dig_port);
Daniel Vetter24d05922010-08-20 18:08:28 +02004793}
4794
Imre Deak07f9cd02014-08-18 14:42:45 +03004795static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4796{
4797 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4798
4799 if (!is_edp(intel_dp))
4800 return;
4801
Ville Syrjälä951468f2014-09-04 14:55:31 +03004802 /*
4803 * vdd might still be enabled do to the delayed vdd off.
4804 * Make sure vdd is actually turned off here.
4805 */
Ville Syrjäläafa4e532014-11-25 15:43:48 +02004806 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004807 pps_lock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004808 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004809 pps_unlock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004810}
4811
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004812static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4813{
4814 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4815 struct drm_device *dev = intel_dig_port->base.base.dev;
4816 struct drm_i915_private *dev_priv = dev->dev_private;
4817 enum intel_display_power_domain power_domain;
4818
4819 lockdep_assert_held(&dev_priv->pps_mutex);
4820
4821 if (!edp_have_panel_vdd(intel_dp))
4822 return;
4823
4824 /*
4825 * The VDD bit needs a power domain reference, so if the bit is
4826 * already enabled when we boot or resume, grab this reference and
4827 * schedule a vdd off, so we don't hold on to the reference
4828 * indefinitely.
4829 */
4830 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4831 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4832 intel_display_power_get(dev_priv, power_domain);
4833
4834 edp_panel_vdd_schedule_off(intel_dp);
4835}
4836
Imre Deak6d93c0c2014-07-31 14:03:36 +03004837static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4838{
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004839 struct intel_dp *intel_dp;
4840
4841 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4842 return;
4843
4844 intel_dp = enc_to_intel_dp(encoder);
4845
4846 pps_lock(intel_dp);
4847
4848 /*
4849 * Read out the current power sequencer assignment,
4850 * in case the BIOS did something with it.
4851 */
4852 if (IS_VALLEYVIEW(encoder->dev))
4853 vlv_initial_power_sequencer_setup(intel_dp);
4854
4855 intel_edp_panel_vdd_sanitize(intel_dp);
4856
4857 pps_unlock(intel_dp);
Imre Deak6d93c0c2014-07-31 14:03:36 +03004858}
4859
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004860static const struct drm_connector_funcs intel_dp_connector_funcs = {
Maarten Lankhorst4d688a22015-08-05 12:37:06 +02004861 .dpms = drm_atomic_helper_connector_dpms,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004862 .detect = intel_dp_detect,
Chris Wilsonbeb60602014-09-02 20:04:00 +01004863 .force = intel_dp_force,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004864 .fill_modes = drm_helper_probe_single_connector_modes,
Chris Wilsonf6849602010-09-19 09:29:33 +01004865 .set_property = intel_dp_set_property,
Matt Roper2545e4a2015-01-22 16:51:27 -08004866 .atomic_get_property = intel_connector_atomic_get_property,
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004867 .destroy = intel_dp_connector_destroy,
Matt Roperc6f95f22015-01-22 16:50:32 -08004868 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
Ander Conselvan de Oliveira98969722015-03-20 16:18:06 +02004869 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004870};
4871
4872static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4873 .get_modes = intel_dp_get_modes,
4874 .mode_valid = intel_dp_mode_valid,
Chris Wilsondf0e9242010-09-09 16:20:55 +01004875 .best_encoder = intel_best_encoder,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004876};
4877
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004878static const struct drm_encoder_funcs intel_dp_enc_funcs = {
Imre Deak6d93c0c2014-07-31 14:03:36 +03004879 .reset = intel_dp_encoder_reset,
Daniel Vetter24d05922010-08-20 18:08:28 +02004880 .destroy = intel_dp_encoder_destroy,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004881};
4882
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004883enum irqreturn
Dave Airlie13cf5502014-06-18 11:29:35 +10004884intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4885{
4886 struct intel_dp *intel_dp = &intel_dig_port->dp;
Imre Deak1c767b32014-08-18 14:42:42 +03004887 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Dave Airlie0e32b392014-05-02 14:02:48 +10004888 struct drm_device *dev = intel_dig_port->base.base.dev;
4889 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak1c767b32014-08-18 14:42:42 +03004890 enum intel_display_power_domain power_domain;
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004891 enum irqreturn ret = IRQ_NONE;
Imre Deak1c767b32014-08-18 14:42:42 +03004892
Dave Airlie0e32b392014-05-02 14:02:48 +10004893 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4894 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
Dave Airlie13cf5502014-06-18 11:29:35 +10004895
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03004896 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4897 /*
4898 * vdd off can generate a long pulse on eDP which
4899 * would require vdd on to handle it, and thus we
4900 * would end up in an endless cycle of
4901 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4902 */
4903 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4904 port_name(intel_dig_port->port));
Ville Syrjäläa8b3d522015-02-10 14:11:46 +02004905 return IRQ_HANDLED;
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03004906 }
4907
Ville Syrjälä26fbb772014-08-11 18:37:37 +03004908 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4909 port_name(intel_dig_port->port),
Dave Airlie0e32b392014-05-02 14:02:48 +10004910 long_hpd ? "long" : "short");
Dave Airlie13cf5502014-06-18 11:29:35 +10004911
Imre Deak1c767b32014-08-18 14:42:42 +03004912 power_domain = intel_display_port_power_domain(intel_encoder);
4913 intel_display_power_get(dev_priv, power_domain);
4914
Dave Airlie0e32b392014-05-02 14:02:48 +10004915 if (long_hpd) {
Mika Kahola5fa836a2015-04-29 09:17:40 +03004916 /* indicate that we need to restart link training */
4917 intel_dp->train_set_valid = false;
Dave Airlie2a592be2014-09-01 16:58:12 +10004918
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004919 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4920 goto mst_fail;
Dave Airlie0e32b392014-05-02 14:02:48 +10004921
4922 if (!intel_dp_get_dpcd(intel_dp)) {
4923 goto mst_fail;
4924 }
4925
4926 intel_dp_probe_oui(intel_dp);
4927
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03004928 if (!intel_dp_probe_mst(intel_dp)) {
4929 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4930 intel_dp_check_link_status(intel_dp);
4931 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10004932 goto mst_fail;
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03004933 }
Dave Airlie0e32b392014-05-02 14:02:48 +10004934 } else {
4935 if (intel_dp->is_mst) {
Imre Deak1c767b32014-08-18 14:42:42 +03004936 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
Dave Airlie0e32b392014-05-02 14:02:48 +10004937 goto mst_fail;
4938 }
4939
4940 if (!intel_dp->is_mst) {
Dave Airlie5b215bc2014-08-05 10:40:20 +10004941 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
Dave Airlie0e32b392014-05-02 14:02:48 +10004942 intel_dp_check_link_status(intel_dp);
Dave Airlie5b215bc2014-08-05 10:40:20 +10004943 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10004944 }
4945 }
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004946
4947 ret = IRQ_HANDLED;
4948
Imre Deak1c767b32014-08-18 14:42:42 +03004949 goto put_power;
Dave Airlie0e32b392014-05-02 14:02:48 +10004950mst_fail:
4951 /* if we were in MST mode, and device is not there get out of MST mode */
4952 if (intel_dp->is_mst) {
4953 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4954 intel_dp->is_mst = false;
4955 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4956 }
Imre Deak1c767b32014-08-18 14:42:42 +03004957put_power:
4958 intel_display_power_put(dev_priv, power_domain);
4959
4960 return ret;
Dave Airlie13cf5502014-06-18 11:29:35 +10004961}
4962
Zhenyu Wange3421a12010-04-08 09:43:27 +08004963/* Return which DP Port should be selected for Transcoder DP control */
4964int
Akshay Joshi0206e352011-08-16 15:34:10 -04004965intel_trans_dp_port_sel(struct drm_crtc *crtc)
Zhenyu Wange3421a12010-04-08 09:43:27 +08004966{
4967 struct drm_device *dev = crtc->dev;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004968 struct intel_encoder *intel_encoder;
4969 struct intel_dp *intel_dp;
Zhenyu Wange3421a12010-04-08 09:43:27 +08004970
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004971 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4972 intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonea5b2132010-08-04 13:50:23 +01004973
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004974 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4975 intel_encoder->type == INTEL_OUTPUT_EDP)
Chris Wilsonea5b2132010-08-04 13:50:23 +01004976 return intel_dp->output_reg;
Zhenyu Wange3421a12010-04-08 09:43:27 +08004977 }
Chris Wilsonea5b2132010-08-04 13:50:23 +01004978
Zhenyu Wange3421a12010-04-08 09:43:27 +08004979 return -1;
4980}
4981
Rodrigo Vivi477ec322015-08-06 15:51:39 +08004982/* check the VBT to see whether the eDP is on another port */
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02004983bool intel_dp_is_edp(struct drm_device *dev, enum port port)
Zhao Yakui36e83a12010-06-12 14:32:21 +08004984{
4985 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni768f69c2013-09-11 18:02:47 -03004986 union child_device_config *p_child;
Zhao Yakui36e83a12010-06-12 14:32:21 +08004987 int i;
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02004988 static const short port_mapping[] = {
Rodrigo Vivi477ec322015-08-06 15:51:39 +08004989 [PORT_B] = DVO_PORT_DPB,
4990 [PORT_C] = DVO_PORT_DPC,
4991 [PORT_D] = DVO_PORT_DPD,
4992 [PORT_E] = DVO_PORT_DPE,
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02004993 };
Zhao Yakui36e83a12010-06-12 14:32:21 +08004994
Ville Syrjälä53ce81a2015-09-11 21:04:38 +03004995 /*
4996 * eDP not supported on g4x. so bail out early just
4997 * for a bit extra safety in case the VBT is bonkers.
4998 */
4999 if (INTEL_INFO(dev)->gen < 5)
5000 return false;
5001
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005002 if (port == PORT_A)
5003 return true;
5004
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005005 if (!dev_priv->vbt.child_dev_num)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005006 return false;
5007
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005008 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5009 p_child = dev_priv->vbt.child_dev + i;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005010
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005011 if (p_child->common.dvo_port == port_mapping[port] &&
Ville Syrjäläf02586d2013-11-01 20:32:08 +02005012 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5013 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
Zhao Yakui36e83a12010-06-12 14:32:21 +08005014 return true;
5015 }
5016 return false;
5017}
5018
Dave Airlie0e32b392014-05-02 14:02:48 +10005019void
Chris Wilsonf6849602010-09-19 09:29:33 +01005020intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5021{
Yuly Novikov53b41832012-10-26 12:04:00 +03005022 struct intel_connector *intel_connector = to_intel_connector(connector);
5023
Chris Wilson3f43c482011-05-12 22:17:24 +01005024 intel_attach_force_audio_property(connector);
Chris Wilsone953fd72011-02-21 22:23:52 +00005025 intel_attach_broadcast_rgb_property(connector);
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02005026 intel_dp->color_range_auto = true;
Yuly Novikov53b41832012-10-26 12:04:00 +03005027
5028 if (is_edp(intel_dp)) {
5029 drm_mode_create_scaling_mode_property(connector->dev);
Rob Clark6de6d842012-10-11 20:36:04 -05005030 drm_object_attach_property(
5031 &connector->base,
Yuly Novikov53b41832012-10-26 12:04:00 +03005032 connector->dev->mode_config.scaling_mode_property,
Yuly Novikov8e740cd2012-10-26 12:04:01 +03005033 DRM_MODE_SCALE_ASPECT);
5034 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
Yuly Novikov53b41832012-10-26 12:04:00 +03005035 }
Chris Wilsonf6849602010-09-19 09:29:33 +01005036}
5037
Imre Deakdada1a92014-01-29 13:25:41 +02005038static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5039{
5040 intel_dp->last_power_cycle = jiffies;
5041 intel_dp->last_power_on = jiffies;
5042 intel_dp->last_backlight_off = jiffies;
5043}
5044
Daniel Vetter67a54562012-10-20 20:57:45 +02005045static void
5046intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005047 struct intel_dp *intel_dp)
Daniel Vetter67a54562012-10-20 20:57:45 +02005048{
5049 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005050 struct edp_power_seq cur, vbt, spec,
5051 *final = &intel_dp->pps_delays;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305052 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5053 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
Jesse Barnes453c5422013-03-28 09:55:41 -07005054
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005055 lockdep_assert_held(&dev_priv->pps_mutex);
5056
Ville Syrjälä81ddbc62014-10-16 21:27:31 +03005057 /* already initialized? */
5058 if (final->t11_t12 != 0)
5059 return;
5060
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305061 if (IS_BROXTON(dev)) {
5062 /*
5063 * TODO: BXT has 2 sets of PPS registers.
5064 * Correct Register for Broxton need to be identified
5065 * using VBT. hardcoding for now
5066 */
5067 pp_ctrl_reg = BXT_PP_CONTROL(0);
5068 pp_on_reg = BXT_PP_ON_DELAYS(0);
5069 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5070 } else if (HAS_PCH_SPLIT(dev)) {
Jani Nikulabf13e812013-09-06 07:40:05 +03005071 pp_ctrl_reg = PCH_PP_CONTROL;
Jesse Barnes453c5422013-03-28 09:55:41 -07005072 pp_on_reg = PCH_PP_ON_DELAYS;
5073 pp_off_reg = PCH_PP_OFF_DELAYS;
5074 pp_div_reg = PCH_PP_DIVISOR;
5075 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005076 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5077
5078 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5079 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5080 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5081 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005082 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005083
5084 /* Workaround: Need to write PP_CONTROL with the unlock key as
5085 * the very first thing. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305086 pp_ctl = ironlake_get_pp_control(intel_dp);
Daniel Vetter67a54562012-10-20 20:57:45 +02005087
Jesse Barnes453c5422013-03-28 09:55:41 -07005088 pp_on = I915_READ(pp_on_reg);
5089 pp_off = I915_READ(pp_off_reg);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305090 if (!IS_BROXTON(dev)) {
5091 I915_WRITE(pp_ctrl_reg, pp_ctl);
5092 pp_div = I915_READ(pp_div_reg);
5093 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005094
5095 /* Pull timing values out of registers */
5096 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5097 PANEL_POWER_UP_DELAY_SHIFT;
5098
5099 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5100 PANEL_LIGHT_ON_DELAY_SHIFT;
5101
5102 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5103 PANEL_LIGHT_OFF_DELAY_SHIFT;
5104
5105 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5106 PANEL_POWER_DOWN_DELAY_SHIFT;
5107
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305108 if (IS_BROXTON(dev)) {
5109 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5110 BXT_POWER_CYCLE_DELAY_SHIFT;
5111 if (tmp > 0)
5112 cur.t11_t12 = (tmp - 1) * 1000;
5113 else
5114 cur.t11_t12 = 0;
5115 } else {
5116 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
Daniel Vetter67a54562012-10-20 20:57:45 +02005117 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305118 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005119
5120 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5121 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5122
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005123 vbt = dev_priv->vbt.edp_pps;
Daniel Vetter67a54562012-10-20 20:57:45 +02005124
5125 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5126 * our hw here, which are all in 100usec. */
5127 spec.t1_t3 = 210 * 10;
5128 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5129 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5130 spec.t10 = 500 * 10;
5131 /* This one is special and actually in units of 100ms, but zero
5132 * based in the hw (so we need to add 100 ms). But the sw vbt
5133 * table multiplies it with 1000 to make it in units of 100usec,
5134 * too. */
5135 spec.t11_t12 = (510 + 100) * 10;
5136
5137 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5138 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5139
5140 /* Use the max of the register settings and vbt. If both are
5141 * unset, fall back to the spec limits. */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005142#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
Daniel Vetter67a54562012-10-20 20:57:45 +02005143 spec.field : \
5144 max(cur.field, vbt.field))
5145 assign_final(t1_t3);
5146 assign_final(t8);
5147 assign_final(t9);
5148 assign_final(t10);
5149 assign_final(t11_t12);
5150#undef assign_final
5151
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005152#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
Daniel Vetter67a54562012-10-20 20:57:45 +02005153 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5154 intel_dp->backlight_on_delay = get_delay(t8);
5155 intel_dp->backlight_off_delay = get_delay(t9);
5156 intel_dp->panel_power_down_delay = get_delay(t10);
5157 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5158#undef get_delay
5159
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005160 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5161 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5162 intel_dp->panel_power_cycle_delay);
5163
5164 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5165 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005166}
5167
5168static void
5169intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005170 struct intel_dp *intel_dp)
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005171{
5172 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07005173 u32 pp_on, pp_off, pp_div, port_sel = 0;
5174 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305175 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
Ville Syrjäläad933b52014-08-18 22:15:56 +03005176 enum port port = dp_to_dig_port(intel_dp)->port;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005177 const struct edp_power_seq *seq = &intel_dp->pps_delays;
Jesse Barnes453c5422013-03-28 09:55:41 -07005178
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005179 lockdep_assert_held(&dev_priv->pps_mutex);
Jesse Barnes453c5422013-03-28 09:55:41 -07005180
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305181 if (IS_BROXTON(dev)) {
5182 /*
5183 * TODO: BXT has 2 sets of PPS registers.
5184 * Correct Register for Broxton need to be identified
5185 * using VBT. hardcoding for now
5186 */
5187 pp_ctrl_reg = BXT_PP_CONTROL(0);
5188 pp_on_reg = BXT_PP_ON_DELAYS(0);
5189 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5190
5191 } else if (HAS_PCH_SPLIT(dev)) {
Jesse Barnes453c5422013-03-28 09:55:41 -07005192 pp_on_reg = PCH_PP_ON_DELAYS;
5193 pp_off_reg = PCH_PP_OFF_DELAYS;
5194 pp_div_reg = PCH_PP_DIVISOR;
5195 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005196 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5197
5198 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5199 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5200 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005201 }
5202
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005203 /*
5204 * And finally store the new values in the power sequencer. The
5205 * backlight delays are set to 1 because we do manual waits on them. For
5206 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5207 * we'll end up waiting for the backlight off delay twice: once when we
5208 * do the manual sleep, and once when we disable the panel and wait for
5209 * the PP_STATUS bit to become zero.
5210 */
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005211 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005212 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5213 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005214 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
Daniel Vetter67a54562012-10-20 20:57:45 +02005215 /* Compute the divisor for the pp clock, simply match the Bspec
5216 * formula. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305217 if (IS_BROXTON(dev)) {
5218 pp_div = I915_READ(pp_ctrl_reg);
5219 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5220 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5221 << BXT_POWER_CYCLE_DELAY_SHIFT);
5222 } else {
5223 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5224 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5225 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5226 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005227
5228 /* Haswell doesn't have any port selection bits for the panel
5229 * power sequencer any more. */
Imre Deakbc7d38a2013-05-16 14:40:36 +03005230 if (IS_VALLEYVIEW(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005231 port_sel = PANEL_PORT_SELECT_VLV(port);
Imre Deakbc7d38a2013-05-16 14:40:36 +03005232 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005233 if (port == PORT_A)
Jani Nikulaa24c1442013-09-05 16:44:46 +03005234 port_sel = PANEL_PORT_SELECT_DPA;
Daniel Vetter67a54562012-10-20 20:57:45 +02005235 else
Jani Nikulaa24c1442013-09-05 16:44:46 +03005236 port_sel = PANEL_PORT_SELECT_DPD;
Daniel Vetter67a54562012-10-20 20:57:45 +02005237 }
5238
Jesse Barnes453c5422013-03-28 09:55:41 -07005239 pp_on |= port_sel;
5240
5241 I915_WRITE(pp_on_reg, pp_on);
5242 I915_WRITE(pp_off_reg, pp_off);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305243 if (IS_BROXTON(dev))
5244 I915_WRITE(pp_ctrl_reg, pp_div);
5245 else
5246 I915_WRITE(pp_div_reg, pp_div);
Daniel Vetter67a54562012-10-20 20:57:45 +02005247
Daniel Vetter67a54562012-10-20 20:57:45 +02005248 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07005249 I915_READ(pp_on_reg),
5250 I915_READ(pp_off_reg),
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305251 IS_BROXTON(dev) ?
5252 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
Jesse Barnes453c5422013-03-28 09:55:41 -07005253 I915_READ(pp_div_reg));
Keith Packardc8110e52009-05-06 11:51:10 -07005254}
5255
Vandana Kannanb33a2812015-02-13 15:33:03 +05305256/**
5257 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5258 * @dev: DRM device
5259 * @refresh_rate: RR to be programmed
5260 *
5261 * This function gets called when refresh rate (RR) has to be changed from
5262 * one frequency to another. Switches can be between high and low RR
5263 * supported by the panel or to any other RR based on media playback (in
5264 * this case, RR value needs to be passed from user space).
5265 *
5266 * The caller of this function needs to take a lock on dev_priv->drrs.
5267 */
Vandana Kannan96178ee2015-01-10 02:25:56 +05305268static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305269{
5270 struct drm_i915_private *dev_priv = dev->dev_private;
5271 struct intel_encoder *encoder;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305272 struct intel_digital_port *dig_port = NULL;
5273 struct intel_dp *intel_dp = dev_priv->drrs.dp;
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02005274 struct intel_crtc_state *config = NULL;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305275 struct intel_crtc *intel_crtc = NULL;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305276 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305277
5278 if (refresh_rate <= 0) {
5279 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5280 return;
5281 }
5282
Vandana Kannan96178ee2015-01-10 02:25:56 +05305283 if (intel_dp == NULL) {
5284 DRM_DEBUG_KMS("DRRS not supported.\n");
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305285 return;
5286 }
5287
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005288 /*
Rodrigo Vivie4d59f62014-11-20 02:22:08 -08005289 * FIXME: This needs proper synchronization with psr state for some
5290 * platforms that cannot have PSR and DRRS enabled at the same time.
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005291 */
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305292
Vandana Kannan96178ee2015-01-10 02:25:56 +05305293 dig_port = dp_to_dig_port(intel_dp);
5294 encoder = &dig_port->base;
Ander Conselvan de Oliveira723f9aa2015-03-20 16:18:18 +02005295 intel_crtc = to_intel_crtc(encoder->base.crtc);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305296
5297 if (!intel_crtc) {
5298 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5299 return;
5300 }
5301
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005302 config = intel_crtc->config;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305303
Vandana Kannan96178ee2015-01-10 02:25:56 +05305304 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305305 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5306 return;
5307 }
5308
Vandana Kannan96178ee2015-01-10 02:25:56 +05305309 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5310 refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305311 index = DRRS_LOW_RR;
5312
Vandana Kannan96178ee2015-01-10 02:25:56 +05305313 if (index == dev_priv->drrs.refresh_rate_type) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305314 DRM_DEBUG_KMS(
5315 "DRRS requested for previously set RR...ignoring\n");
5316 return;
5317 }
5318
5319 if (!intel_crtc->active) {
5320 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5321 return;
5322 }
5323
Durgadoss R44395bf2015-02-13 15:33:02 +05305324 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
Vandana Kannana4c30b12015-02-13 15:33:00 +05305325 switch (index) {
5326 case DRRS_HIGH_RR:
5327 intel_dp_set_m_n(intel_crtc, M1_N1);
5328 break;
5329 case DRRS_LOW_RR:
5330 intel_dp_set_m_n(intel_crtc, M2_N2);
5331 break;
5332 case DRRS_MAX_RR:
5333 default:
5334 DRM_ERROR("Unsupported refreshrate type\n");
5335 }
5336 } else if (INTEL_INFO(dev)->gen > 6) {
Ville Syrjälä649636e2015-09-22 19:50:01 +03005337 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5338 u32 val;
Vandana Kannana4c30b12015-02-13 15:33:00 +05305339
Ville Syrjälä649636e2015-09-22 19:50:01 +03005340 val = I915_READ(reg);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305341 if (index > DRRS_HIGH_RR) {
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305342 if (IS_VALLEYVIEW(dev))
5343 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5344 else
5345 val |= PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305346 } else {
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305347 if (IS_VALLEYVIEW(dev))
5348 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5349 else
5350 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305351 }
5352 I915_WRITE(reg, val);
5353 }
5354
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305355 dev_priv->drrs.refresh_rate_type = index;
5356
5357 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5358}
5359
Vandana Kannanb33a2812015-02-13 15:33:03 +05305360/**
5361 * intel_edp_drrs_enable - init drrs struct if supported
5362 * @intel_dp: DP struct
5363 *
5364 * Initializes frontbuffer_bits and drrs.dp
5365 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305366void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5367{
5368 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5369 struct drm_i915_private *dev_priv = dev->dev_private;
5370 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5371 struct drm_crtc *crtc = dig_port->base.base.crtc;
5372 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5373
5374 if (!intel_crtc->config->has_drrs) {
5375 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5376 return;
5377 }
5378
5379 mutex_lock(&dev_priv->drrs.mutex);
5380 if (WARN_ON(dev_priv->drrs.dp)) {
5381 DRM_ERROR("DRRS already enabled\n");
5382 goto unlock;
5383 }
5384
5385 dev_priv->drrs.busy_frontbuffer_bits = 0;
5386
5387 dev_priv->drrs.dp = intel_dp;
5388
5389unlock:
5390 mutex_unlock(&dev_priv->drrs.mutex);
5391}
5392
Vandana Kannanb33a2812015-02-13 15:33:03 +05305393/**
5394 * intel_edp_drrs_disable - Disable DRRS
5395 * @intel_dp: DP struct
5396 *
5397 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305398void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5399{
5400 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5401 struct drm_i915_private *dev_priv = dev->dev_private;
5402 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5403 struct drm_crtc *crtc = dig_port->base.base.crtc;
5404 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5405
5406 if (!intel_crtc->config->has_drrs)
5407 return;
5408
5409 mutex_lock(&dev_priv->drrs.mutex);
5410 if (!dev_priv->drrs.dp) {
5411 mutex_unlock(&dev_priv->drrs.mutex);
5412 return;
5413 }
5414
5415 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5416 intel_dp_set_drrs_state(dev_priv->dev,
5417 intel_dp->attached_connector->panel.
5418 fixed_mode->vrefresh);
5419
5420 dev_priv->drrs.dp = NULL;
5421 mutex_unlock(&dev_priv->drrs.mutex);
5422
5423 cancel_delayed_work_sync(&dev_priv->drrs.work);
5424}
5425
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305426static void intel_edp_drrs_downclock_work(struct work_struct *work)
5427{
5428 struct drm_i915_private *dev_priv =
5429 container_of(work, typeof(*dev_priv), drrs.work.work);
5430 struct intel_dp *intel_dp;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305431
Vandana Kannan96178ee2015-01-10 02:25:56 +05305432 mutex_lock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305433
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305434 intel_dp = dev_priv->drrs.dp;
5435
5436 if (!intel_dp)
5437 goto unlock;
5438
5439 /*
5440 * The delayed work can race with an invalidate hence we need to
5441 * recheck.
5442 */
5443
5444 if (dev_priv->drrs.busy_frontbuffer_bits)
5445 goto unlock;
5446
5447 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5448 intel_dp_set_drrs_state(dev_priv->dev,
5449 intel_dp->attached_connector->panel.
5450 downclock_mode->vrefresh);
5451
5452unlock:
Vandana Kannan96178ee2015-01-10 02:25:56 +05305453 mutex_unlock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305454}
5455
Vandana Kannanb33a2812015-02-13 15:33:03 +05305456/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305457 * intel_edp_drrs_invalidate - Disable Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305458 * @dev: DRM device
5459 * @frontbuffer_bits: frontbuffer plane tracking bits
5460 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305461 * This function gets called everytime rendering on the given planes start.
5462 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
Vandana Kannanb33a2812015-02-13 15:33:03 +05305463 *
5464 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5465 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305466void intel_edp_drrs_invalidate(struct drm_device *dev,
5467 unsigned frontbuffer_bits)
5468{
5469 struct drm_i915_private *dev_priv = dev->dev_private;
5470 struct drm_crtc *crtc;
5471 enum pipe pipe;
5472
Daniel Vetter9da7d692015-04-09 16:44:15 +02005473 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305474 return;
5475
Daniel Vetter88f933a2015-04-09 16:44:16 +02005476 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305477
Vandana Kannana93fad02015-01-10 02:25:59 +05305478 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005479 if (!dev_priv->drrs.dp) {
5480 mutex_unlock(&dev_priv->drrs.mutex);
5481 return;
5482 }
5483
Vandana Kannana93fad02015-01-10 02:25:59 +05305484 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5485 pipe = to_intel_crtc(crtc)->pipe;
5486
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005487 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5488 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5489
Ramalingam C0ddfd202015-06-15 20:50:05 +05305490 /* invalidate means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005491 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Vandana Kannana93fad02015-01-10 02:25:59 +05305492 intel_dp_set_drrs_state(dev_priv->dev,
5493 dev_priv->drrs.dp->attached_connector->panel.
5494 fixed_mode->vrefresh);
Vandana Kannana93fad02015-01-10 02:25:59 +05305495
Vandana Kannana93fad02015-01-10 02:25:59 +05305496 mutex_unlock(&dev_priv->drrs.mutex);
5497}
5498
Vandana Kannanb33a2812015-02-13 15:33:03 +05305499/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305500 * intel_edp_drrs_flush - Restart Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305501 * @dev: DRM device
5502 * @frontbuffer_bits: frontbuffer plane tracking bits
5503 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305504 * This function gets called every time rendering on the given planes has
5505 * completed or flip on a crtc is completed. So DRRS should be upclocked
5506 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5507 * if no other planes are dirty.
Vandana Kannanb33a2812015-02-13 15:33:03 +05305508 *
5509 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5510 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305511void intel_edp_drrs_flush(struct drm_device *dev,
5512 unsigned frontbuffer_bits)
5513{
5514 struct drm_i915_private *dev_priv = dev->dev_private;
5515 struct drm_crtc *crtc;
5516 enum pipe pipe;
5517
Daniel Vetter9da7d692015-04-09 16:44:15 +02005518 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305519 return;
5520
Daniel Vetter88f933a2015-04-09 16:44:16 +02005521 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305522
Vandana Kannana93fad02015-01-10 02:25:59 +05305523 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005524 if (!dev_priv->drrs.dp) {
5525 mutex_unlock(&dev_priv->drrs.mutex);
5526 return;
5527 }
5528
Vandana Kannana93fad02015-01-10 02:25:59 +05305529 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5530 pipe = to_intel_crtc(crtc)->pipe;
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005531
5532 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Vandana Kannana93fad02015-01-10 02:25:59 +05305533 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5534
Ramalingam C0ddfd202015-06-15 20:50:05 +05305535 /* flush means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005536 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Ramalingam C0ddfd202015-06-15 20:50:05 +05305537 intel_dp_set_drrs_state(dev_priv->dev,
5538 dev_priv->drrs.dp->attached_connector->panel.
5539 fixed_mode->vrefresh);
5540
5541 /*
5542 * flush also means no more activity hence schedule downclock, if all
5543 * other fbs are quiescent too
5544 */
5545 if (!dev_priv->drrs.busy_frontbuffer_bits)
Vandana Kannana93fad02015-01-10 02:25:59 +05305546 schedule_delayed_work(&dev_priv->drrs.work,
5547 msecs_to_jiffies(1000));
5548 mutex_unlock(&dev_priv->drrs.mutex);
5549}
5550
Vandana Kannanb33a2812015-02-13 15:33:03 +05305551/**
5552 * DOC: Display Refresh Rate Switching (DRRS)
5553 *
5554 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5555 * which enables swtching between low and high refresh rates,
5556 * dynamically, based on the usage scenario. This feature is applicable
5557 * for internal panels.
5558 *
5559 * Indication that the panel supports DRRS is given by the panel EDID, which
5560 * would list multiple refresh rates for one resolution.
5561 *
5562 * DRRS is of 2 types - static and seamless.
5563 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5564 * (may appear as a blink on screen) and is used in dock-undock scenario.
5565 * Seamless DRRS involves changing RR without any visual effect to the user
5566 * and can be used during normal system usage. This is done by programming
5567 * certain registers.
5568 *
5569 * Support for static/seamless DRRS may be indicated in the VBT based on
5570 * inputs from the panel spec.
5571 *
5572 * DRRS saves power by switching to low RR based on usage scenarios.
5573 *
5574 * eDP DRRS:-
5575 * The implementation is based on frontbuffer tracking implementation.
5576 * When there is a disturbance on the screen triggered by user activity or a
5577 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5578 * When there is no movement on screen, after a timeout of 1 second, a switch
5579 * to low RR is made.
5580 * For integration with frontbuffer tracking code,
5581 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5582 *
5583 * DRRS can be further extended to support other internal panels and also
5584 * the scenario of video playback wherein RR is set based on the rate
5585 * requested by userspace.
5586 */
5587
5588/**
5589 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5590 * @intel_connector: eDP connector
5591 * @fixed_mode: preferred mode of panel
5592 *
5593 * This function is called only once at driver load to initialize basic
5594 * DRRS stuff.
5595 *
5596 * Returns:
5597 * Downclock mode if panel supports it, else return NULL.
5598 * DRRS support is determined by the presence of downclock mode (apart
5599 * from VBT setting).
5600 */
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305601static struct drm_display_mode *
Vandana Kannan96178ee2015-01-10 02:25:56 +05305602intel_dp_drrs_init(struct intel_connector *intel_connector,
5603 struct drm_display_mode *fixed_mode)
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305604{
5605 struct drm_connector *connector = &intel_connector->base;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305606 struct drm_device *dev = connector->dev;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305607 struct drm_i915_private *dev_priv = dev->dev_private;
5608 struct drm_display_mode *downclock_mode = NULL;
5609
Daniel Vetter9da7d692015-04-09 16:44:15 +02005610 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5611 mutex_init(&dev_priv->drrs.mutex);
5612
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305613 if (INTEL_INFO(dev)->gen <= 6) {
5614 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5615 return NULL;
5616 }
5617
5618 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005619 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305620 return NULL;
5621 }
5622
5623 downclock_mode = intel_find_panel_downclock
5624 (dev, fixed_mode, connector);
5625
5626 if (!downclock_mode) {
Ramalingam Ca1d26342015-02-23 17:38:33 +05305627 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305628 return NULL;
5629 }
5630
Vandana Kannan96178ee2015-01-10 02:25:56 +05305631 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305632
Vandana Kannan96178ee2015-01-10 02:25:56 +05305633 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005634 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305635 return downclock_mode;
5636}
5637
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005638static bool intel_edp_init_connector(struct intel_dp *intel_dp,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005639 struct intel_connector *intel_connector)
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005640{
5641 struct drm_connector *connector = &intel_connector->base;
5642 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005643 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5644 struct drm_device *dev = intel_encoder->base.dev;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005645 struct drm_i915_private *dev_priv = dev->dev_private;
5646 struct drm_display_mode *fixed_mode = NULL;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305647 struct drm_display_mode *downclock_mode = NULL;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005648 bool has_dpcd;
5649 struct drm_display_mode *scan;
5650 struct edid *edid;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005651 enum pipe pipe = INVALID_PIPE;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005652
5653 if (!is_edp(intel_dp))
5654 return true;
5655
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02005656 pps_lock(intel_dp);
5657 intel_edp_panel_vdd_sanitize(intel_dp);
5658 pps_unlock(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005659
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005660 /* Cache DPCD and EDID for edp. */
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005661 has_dpcd = intel_dp_get_dpcd(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005662
5663 if (has_dpcd) {
5664 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5665 dev_priv->no_aux_handshake =
5666 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5667 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5668 } else {
5669 /* if this fails, presume the device is a ghost */
5670 DRM_INFO("failed to retrieve link info, disabling eDP\n");
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005671 return false;
5672 }
5673
5674 /* We now know it's not a ghost, init power sequence regs. */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005675 pps_lock(intel_dp);
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005676 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005677 pps_unlock(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005678
Daniel Vetter060c8772014-03-21 23:22:35 +01005679 mutex_lock(&dev->mode_config.mutex);
Jani Nikula0b998362014-03-14 16:51:17 +02005680 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005681 if (edid) {
5682 if (drm_add_edid_modes(connector, edid)) {
5683 drm_mode_connector_update_edid_property(connector,
5684 edid);
5685 drm_edid_to_eld(connector, edid);
5686 } else {
5687 kfree(edid);
5688 edid = ERR_PTR(-EINVAL);
5689 }
5690 } else {
5691 edid = ERR_PTR(-ENOENT);
5692 }
5693 intel_connector->edid = edid;
5694
5695 /* prefer fixed mode from EDID if available */
5696 list_for_each_entry(scan, &connector->probed_modes, head) {
5697 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5698 fixed_mode = drm_mode_duplicate(dev, scan);
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305699 downclock_mode = intel_dp_drrs_init(
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305700 intel_connector, fixed_mode);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005701 break;
5702 }
5703 }
5704
5705 /* fallback to VBT if available for eDP */
5706 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5707 fixed_mode = drm_mode_duplicate(dev,
5708 dev_priv->vbt.lfp_lvds_vbt_mode);
5709 if (fixed_mode)
5710 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5711 }
Daniel Vetter060c8772014-03-21 23:22:35 +01005712 mutex_unlock(&dev->mode_config.mutex);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005713
Clint Taylor01527b32014-07-07 13:01:46 -07005714 if (IS_VALLEYVIEW(dev)) {
5715 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5716 register_reboot_notifier(&intel_dp->edp_notifier);
Ville Syrjälä6517d272014-11-07 11:16:02 +02005717
5718 /*
5719 * Figure out the current pipe for the initial backlight setup.
5720 * If the current pipe isn't valid, try the PPS pipe, and if that
5721 * fails just assume pipe A.
5722 */
5723 if (IS_CHERRYVIEW(dev))
5724 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5725 else
5726 pipe = PORT_TO_PIPE(intel_dp->DP);
5727
5728 if (pipe != PIPE_A && pipe != PIPE_B)
5729 pipe = intel_dp->pps_pipe;
5730
5731 if (pipe != PIPE_A && pipe != PIPE_B)
5732 pipe = PIPE_A;
5733
5734 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5735 pipe_name(pipe));
Clint Taylor01527b32014-07-07 13:01:46 -07005736 }
5737
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305738 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
Jani Nikula5507fae2015-09-14 14:03:48 +03005739 intel_connector->panel.backlight.power = intel_edp_backlight_power;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005740 intel_panel_setup_backlight(connector, pipe);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005741
5742 return true;
5743}
5744
Paulo Zanoni16c25532013-06-12 17:27:25 -03005745bool
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005746intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5747 struct intel_connector *intel_connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005748{
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005749 struct drm_connector *connector = &intel_connector->base;
5750 struct intel_dp *intel_dp = &intel_dig_port->dp;
5751 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5752 struct drm_device *dev = intel_encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005753 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni174edf12012-10-26 19:05:50 -02005754 enum port port = intel_dig_port->port;
Jani Nikula0b998362014-03-14 16:51:17 +02005755 int type;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005756
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005757 intel_dp->pps_pipe = INVALID_PIPE;
5758
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005759 /* intel_dp vfuncs */
Damien Lespiaub6b5e382014-01-20 16:00:59 +00005760 if (INTEL_INFO(dev)->gen >= 9)
5761 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5762 else if (IS_VALLEYVIEW(dev))
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005763 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5764 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5765 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5766 else if (HAS_PCH_SPLIT(dev))
5767 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5768 else
5769 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5770
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +00005771 if (INTEL_INFO(dev)->gen >= 9)
5772 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5773 else
5774 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
Damien Lespiau153b1102014-01-21 13:37:15 +00005775
Ander Conselvan de Oliveiraad642172015-10-23 13:01:49 +03005776 if (HAS_DDI(dev))
5777 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5778
Daniel Vetter07679352012-09-06 22:15:42 +02005779 /* Preserve the current hw state. */
5780 intel_dp->DP = I915_READ(intel_dp->output_reg);
Jani Nikuladd06f902012-10-19 14:51:50 +03005781 intel_dp->attached_connector = intel_connector;
Chris Wilson3d3dc142011-02-12 10:33:12 +00005782
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005783 if (intel_dp_is_edp(dev, port))
Gajanan Bhat19c03922012-09-27 19:13:07 +05305784 type = DRM_MODE_CONNECTOR_eDP;
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005785 else
5786 type = DRM_MODE_CONNECTOR_DisplayPort;
Adam Jacksonb3295302010-07-16 14:46:28 -04005787
Imre Deakf7d24902013-05-08 13:14:05 +03005788 /*
5789 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5790 * for DP the encoder type can be set by the caller to
5791 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5792 */
5793 if (type == DRM_MODE_CONNECTOR_eDP)
5794 intel_encoder->type = INTEL_OUTPUT_EDP;
5795
Ville Syrjäläc17ed5b2014-10-16 21:27:27 +03005796 /* eDP only on port B and/or C on vlv/chv */
5797 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5798 port != PORT_B && port != PORT_C))
5799 return false;
5800
Imre Deake7281ea2013-05-08 13:14:08 +03005801 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5802 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5803 port_name(port));
5804
Adam Jacksonb3295302010-07-16 14:46:28 -04005805 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005806 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5807
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005808 connector->interlace_allowed = true;
5809 connector->doublescan_allowed = 0;
Ma Lingf8aed702009-08-24 13:50:24 +08005810
Daniel Vetter66a92782012-07-12 20:08:18 +02005811 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
Daniel Vetter4be73782014-01-17 14:39:48 +01005812 edp_panel_vdd_work);
Zhenyu Wang6251ec02010-01-12 05:38:32 +08005813
Chris Wilsondf0e9242010-09-09 16:20:55 +01005814 intel_connector_attach_encoder(intel_connector, intel_encoder);
Thomas Wood34ea3d32014-05-29 16:57:41 +01005815 drm_connector_register(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005816
Paulo Zanoniaffa9352012-11-23 15:30:39 -02005817 if (HAS_DDI(dev))
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005818 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5819 else
5820 intel_connector->get_hw_state = intel_connector_get_hw_state;
Imre Deak80f65de2014-02-11 17:12:49 +02005821 intel_connector->unregister = intel_dp_connector_unregister;
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005822
Jani Nikula0b998362014-03-14 16:51:17 +02005823 /* Set up the hotplug pin. */
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005824 switch (port) {
5825 case PORT_A:
Egbert Eich1d843f92013-02-25 12:06:49 -05005826 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005827 break;
5828 case PORT_B:
Egbert Eich1d843f92013-02-25 12:06:49 -05005829 intel_encoder->hpd_pin = HPD_PORT_B;
Jani Nikulae87a0052015-10-20 15:22:02 +03005830 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
Sonika Jindalcf1d5882015-08-10 10:35:36 +05305831 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005832 break;
5833 case PORT_C:
Egbert Eich1d843f92013-02-25 12:06:49 -05005834 intel_encoder->hpd_pin = HPD_PORT_C;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005835 break;
5836 case PORT_D:
Egbert Eich1d843f92013-02-25 12:06:49 -05005837 intel_encoder->hpd_pin = HPD_PORT_D;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005838 break;
Xiong Zhang26951ca2015-08-17 15:55:50 +08005839 case PORT_E:
5840 intel_encoder->hpd_pin = HPD_PORT_E;
5841 break;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005842 default:
Damien Lespiauad1c0b12013-03-07 15:30:28 +00005843 BUG();
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08005844 }
5845
Imre Deakdada1a92014-01-29 13:25:41 +02005846 if (is_edp(intel_dp)) {
Ville Syrjälä773538e82014-09-04 14:54:56 +03005847 pps_lock(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005848 intel_dp_init_panel_power_timestamps(intel_dp);
5849 if (IS_VALLEYVIEW(dev))
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005850 vlv_initial_power_sequencer_setup(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005851 else
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005852 intel_dp_init_panel_power_sequencer(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005853 pps_unlock(intel_dp);
Imre Deakdada1a92014-01-29 13:25:41 +02005854 }
Paulo Zanoni0095e6d2013-12-19 14:29:39 -02005855
Jani Nikula9d1a1032014-03-14 16:51:15 +02005856 intel_dp_aux_init(intel_dp, intel_connector);
Dave Airliec1f05262012-08-30 11:06:18 +10005857
Dave Airlie0e32b392014-05-02 14:02:48 +10005858 /* init MST on ports that can support it */
Jani Nikula0c9b3712015-05-18 17:10:01 +03005859 if (HAS_DP_MST(dev) &&
5860 (port == PORT_B || port == PORT_C || port == PORT_D))
5861 intel_dp_mst_encoder_init(intel_dig_port,
5862 intel_connector->base.base.id);
Dave Airlie0e32b392014-05-02 14:02:48 +10005863
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005864 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10005865 drm_dp_aux_unregister(&intel_dp->aux);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03005866 if (is_edp(intel_dp)) {
5867 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03005868 /*
5869 * vdd might still be enabled do to the delayed vdd off.
5870 * Make sure vdd is actually turned off here.
5871 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005872 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01005873 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005874 pps_unlock(intel_dp);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03005875 }
Thomas Wood34ea3d32014-05-29 16:57:41 +01005876 drm_connector_unregister(connector);
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005877 drm_connector_cleanup(connector);
Paulo Zanoni16c25532013-06-12 17:27:25 -03005878 return false;
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005879 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08005880
Chris Wilsonf6849602010-09-19 09:29:33 +01005881 intel_dp_add_properties(intel_dp, connector);
5882
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005883 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5884 * 0xd. Failure to do so will result in spurious interrupts being
5885 * generated on the port when a cable is not attached.
5886 */
5887 if (IS_G4X(dev) && !IS_GM45(dev)) {
5888 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5889 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5890 }
Paulo Zanoni16c25532013-06-12 17:27:25 -03005891
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005892 i915_debugfs_connector_add(connector);
5893
Paulo Zanoni16c25532013-06-12 17:27:25 -03005894 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005895}
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005896
5897void
5898intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5899{
Dave Airlie13cf5502014-06-18 11:29:35 +10005900 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005901 struct intel_digital_port *intel_dig_port;
5902 struct intel_encoder *intel_encoder;
5903 struct drm_encoder *encoder;
5904 struct intel_connector *intel_connector;
5905
Daniel Vetterb14c5672013-09-19 12:18:32 +02005906 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005907 if (!intel_dig_port)
5908 return;
5909
Ander Conselvan de Oliveira08d9bc92015-04-10 10:59:10 +03005910 intel_connector = intel_connector_alloc();
Sudip Mukherjee11aee0f2015-10-08 19:27:59 +05305911 if (!intel_connector)
5912 goto err_connector_alloc;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005913
5914 intel_encoder = &intel_dig_port->base;
5915 encoder = &intel_encoder->base;
5916
5917 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5918 DRM_MODE_ENCODER_TMDS);
5919
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01005920 intel_encoder->compute_config = intel_dp_compute_config;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005921 intel_encoder->disable = intel_disable_dp;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005922 intel_encoder->get_hw_state = intel_dp_get_hw_state;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07005923 intel_encoder->get_config = intel_dp_get_config;
Imre Deak07f9cd02014-08-18 14:42:45 +03005924 intel_encoder->suspend = intel_dp_encoder_suspend;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005925 if (IS_CHERRYVIEW(dev)) {
Ville Syrjälä9197c882014-04-09 13:29:05 +03005926 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005927 intel_encoder->pre_enable = chv_pre_enable_dp;
5928 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä580d3812014-04-09 13:29:00 +03005929 intel_encoder->post_disable = chv_post_disable_dp;
Ville Syrjäläd6db9952015-07-08 23:45:49 +03005930 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005931 } else if (IS_VALLEYVIEW(dev)) {
Jani Nikulaecff4f32013-09-06 07:38:29 +03005932 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005933 intel_encoder->pre_enable = vlv_pre_enable_dp;
5934 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä49277c32014-03-31 18:21:26 +03005935 intel_encoder->post_disable = vlv_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005936 } else {
Jani Nikulaecff4f32013-09-06 07:38:29 +03005937 intel_encoder->pre_enable = g4x_pre_enable_dp;
5938 intel_encoder->enable = g4x_enable_dp;
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03005939 if (INTEL_INFO(dev)->gen >= 5)
5940 intel_encoder->post_disable = ilk_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005941 }
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005942
Paulo Zanoni174edf12012-10-26 19:05:50 -02005943 intel_dig_port->port = port;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005944 intel_dig_port->dp.output_reg = output_reg;
5945
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005946 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Ville Syrjälä882ec382014-04-28 14:07:43 +03005947 if (IS_CHERRYVIEW(dev)) {
5948 if (port == PORT_D)
5949 intel_encoder->crtc_mask = 1 << 2;
5950 else
5951 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5952 } else {
5953 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5954 }
Ville Syrjäläbc079e82014-03-03 16:15:28 +02005955 intel_encoder->cloneable = 0;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005956
Dave Airlie13cf5502014-06-18 11:29:35 +10005957 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
Jani Nikula5fcece82015-05-27 15:03:42 +03005958 dev_priv->hotplug.irq_port[port] = intel_dig_port;
Dave Airlie13cf5502014-06-18 11:29:35 +10005959
Sudip Mukherjee11aee0f2015-10-08 19:27:59 +05305960 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5961 goto err_init_connector;
5962
5963 return;
5964
5965err_init_connector:
5966 drm_encoder_cleanup(encoder);
5967 kfree(intel_connector);
5968err_connector_alloc:
5969 kfree(intel_dig_port);
5970
5971 return;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005972}
Dave Airlie0e32b392014-05-02 14:02:48 +10005973
5974void intel_dp_mst_suspend(struct drm_device *dev)
5975{
5976 struct drm_i915_private *dev_priv = dev->dev_private;
5977 int i;
5978
5979 /* disable MST */
5980 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03005981 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10005982 if (!intel_dig_port)
5983 continue;
5984
5985 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5986 if (!intel_dig_port->dp.can_mst)
5987 continue;
5988 if (intel_dig_port->dp.is_mst)
5989 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5990 }
5991 }
5992}
5993
5994void intel_dp_mst_resume(struct drm_device *dev)
5995{
5996 struct drm_i915_private *dev_priv = dev->dev_private;
5997 int i;
5998
5999 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006000 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006001 if (!intel_dig_port)
6002 continue;
6003 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6004 int ret;
6005
6006 if (!intel_dig_port->dp.can_mst)
6007 continue;
6008
6009 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6010 if (ret != 0) {
6011 intel_dp_check_mst_status(&intel_dig_port->dp);
6012 }
6013 }
6014 }
6015}