blob: 555508457632de4b5bfd77b57a627b2a909f9ead [file] [log] [blame]
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040030#include <linux/export.h>
Clint Taylor01527b32014-07-07 13:01:46 -070031#include <linux/notifier.h>
32#include <linux/reboot.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
Matt Roperc6f95f22015-01-22 16:50:32 -080034#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010035#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070038#include "intel_drv.h"
David Howells760285e2012-10-02 18:01:07 +010039#include <drm/i915_drm.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070040#include "i915_drv.h"
Keith Packarda4fc5ed2009-04-07 16:16:42 -070041
Keith Packarda4fc5ed2009-04-07 16:16:42 -070042#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
Todd Previte559be302015-05-04 07:48:20 -070044/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080050struct dp_link_dpll {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030051 int clock;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080052 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030056 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080057 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030058 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080059 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030063 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080064 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030065 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080066 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080069static const struct dp_link_dpll vlv_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030070 { 162000,
Chon Ming Lee58f6e632013-09-25 15:47:51 +080071 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030072 { 270000,
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080073 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
Chon Ming Leeef9348c2014-04-09 13:28:18 +030076/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
Ville Syrjälä840b32b2015-08-11 20:21:46 +030086 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030087 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030088 { 270000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030089 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030090 { 540000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030091 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
Sonika Jindal637a9c62015-05-07 09:52:08 +053093
Sonika Jindal64987fc2015-05-26 17:50:13 +053094static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
Sonika Jindal637a9c62015-05-07 09:52:08 +053096static const int skl_rates[] = { 162000, 216000, 270000,
Ville Syrjäläf4896f12015-03-12 17:10:27 +020097 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
Chon Ming Leeef9348c2014-04-09 13:28:18 +030099
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
Paulo Zanonida63a9f2012-10-26 19:05:46 -0200109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700112}
113
Imre Deak68b4d822013-05-08 13:14:06 +0300114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700115{
Imre Deak68b4d822013-05-08 13:14:06 +0300116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700119}
120
Chris Wilsondf0e9242010-09-09 16:20:55 +0100121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
Paulo Zanonifa90ece2012-10-26 19:05:44 -0200123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Chris Wilsondf0e9242010-09-09 16:20:55 +0100124}
125
Chris Wilsonea5b2132010-08-04 13:50:23 +0100126static void intel_dp_link_down(struct intel_dp *intel_dp);
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +0100128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
Ville Syrjälä093e3f12014-10-16 21:27:33 +0300129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700132
Ville Syrjäläe0fce782015-07-08 23:45:54 +0300133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
Ville Syrjäläed4e9c12015-03-12 17:10:36 +0200138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700140{
Jesse Barnes7183dc22011-07-07 11:10:58 -0700141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
Ville Syrjälä1db10e22015-03-12 17:10:32 +0200146 case DP_LINK_BW_5_4:
Imre Deakd4eead52013-07-09 17:05:26 +0300147 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700148 default:
Imre Deakd4eead52013-07-09 17:05:26 +0300149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
Paulo Zanonieeb63242014-05-06 14:56:50 +0300157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 struct drm_device *dev = intel_dig_port->base.base.dev;
161 u8 source_max, sink_max;
162
163 source_max = 4;
164 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 source_max = 2;
167
168 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170 return min(source_max, sink_max);
171}
172
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400173/*
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
176 *
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178 *
179 * 270000 * 1 * 8 / 10 == 216000
180 *
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
185 *
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
188 */
189
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700190static int
Keith Packardc8982612012-01-25 08:16:25 -0800191intel_dp_link_required(int pixel_clock, int bpp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700192{
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400193 return (pixel_clock * bpp + 9) / 10;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700194}
195
196static int
Dave Airliefe27d532010-06-30 11:46:17 +1000197intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198{
199 return (max_link_clock * max_lanes * 8) / 10;
200}
201
Damien Lespiauc19de8e2013-11-28 15:29:18 +0000202static enum drm_mode_status
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700203intel_dp_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
205{
Chris Wilsondf0e9242010-09-09 16:20:55 +0100206 struct intel_dp *intel_dp = intel_attached_dp(connector);
Jani Nikuladd06f902012-10-19 14:51:50 +0300207 struct intel_connector *intel_connector = to_intel_connector(connector);
208 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
Daniel Vetter36008362013-03-27 00:44:59 +0100209 int target_clock = mode->clock;
210 int max_rate, mode_rate, max_lanes, max_link_clock;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700211
Jani Nikuladd06f902012-10-19 14:51:50 +0300212 if (is_edp(intel_dp) && fixed_mode) {
213 if (mode->hdisplay > fixed_mode->hdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100214 return MODE_PANEL;
215
Jani Nikuladd06f902012-10-19 14:51:50 +0300216 if (mode->vdisplay > fixed_mode->vdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100217 return MODE_PANEL;
Daniel Vetter03afc4a2013-04-02 23:42:31 +0200218
219 target_clock = fixed_mode->clock;
Zhao Yakui7de56f42010-07-19 09:43:14 +0100220 }
221
Ville Syrjälä50fec212015-03-12 17:10:34 +0200222 max_link_clock = intel_dp_max_link_rate(intel_dp);
Paulo Zanonieeb63242014-05-06 14:56:50 +0300223 max_lanes = intel_dp_max_lane_count(intel_dp);
Daniel Vetter36008362013-03-27 00:44:59 +0100224
225 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 mode_rate = intel_dp_link_required(target_clock, 18);
227
228 if (mode_rate > max_rate)
Daniel Vetterc4867932012-04-10 10:42:36 +0200229 return MODE_CLOCK_HIGH;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700230
231 if (mode->clock < 10000)
232 return MODE_CLOCK_LOW;
233
Daniel Vetter0af78a22012-05-23 11:30:55 +0200234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 return MODE_H_ILLEGAL;
236
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700237 return MODE_OK;
238}
239
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800240uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700241{
242 int i;
243 uint32_t v = 0;
244
245 if (src_bytes > 4)
246 src_bytes = 4;
247 for (i = 0; i < src_bytes; i++)
248 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249 return v;
250}
251
Damien Lespiauc2af70e2015-02-10 19:32:23 +0000252static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700253{
254 int i;
255 if (dst_bytes > 4)
256 dst_bytes = 4;
257 for (i = 0; i < dst_bytes; i++)
258 dst[i] = src >> ((3-i) * 8);
259}
260
Jani Nikulabf13e812013-09-06 07:40:05 +0300261static void
262intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300263 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300264static void
265intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300266 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300267
Ville Syrjälä773538e82014-09-04 14:54:56 +0300268static void pps_lock(struct intel_dp *intel_dp)
269{
270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 struct intel_encoder *encoder = &intel_dig_port->base;
272 struct drm_device *dev = encoder->base.dev;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 enum intel_display_power_domain power_domain;
275
276 /*
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
279 */
280 power_domain = intel_display_port_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain);
282
283 mutex_lock(&dev_priv->pps_mutex);
284}
285
286static void pps_unlock(struct intel_dp *intel_dp)
287{
288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 struct intel_encoder *encoder = &intel_dig_port->base;
290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 enum intel_display_power_domain power_domain;
293
294 mutex_unlock(&dev_priv->pps_mutex);
295
296 power_domain = intel_display_port_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain);
298}
299
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300300static void
301vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302{
303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 struct drm_device *dev = intel_dig_port->base.base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum pipe pipe = intel_dp->pps_pipe;
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300310 uint32_t DP;
311
312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe), port_name(intel_dig_port->port)))
315 return;
316
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe), port_name(intel_dig_port->port));
319
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
322 */
323 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 DP |= DP_PORT_WIDTH(1);
326 DP |= DP_LINK_TRAIN_PAT_1;
327
328 if (IS_CHERRYVIEW(dev))
329 DP |= DP_PIPE_SELECT_CHV(pipe);
330 else if (pipe == PIPE_B)
331 DP |= DP_PIPEB_SELECT;
332
Ville Syrjäläd288f652014-10-28 13:20:22 +0200333 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335 /*
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
338 */
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300339 if (!pll_enabled) {
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
Ville Syrjäläd288f652014-10-28 13:20:22 +0200343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300345 }
Ville Syrjäläd288f652014-10-28 13:20:22 +0200346
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200361
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300362 if (!pll_enabled) {
Ville Syrjäläd288f652014-10-28 13:20:22 +0200363 vlv_force_pll_off(dev, pipe);
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300368}
369
Jani Nikulabf13e812013-09-06 07:40:05 +0300370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300378 enum pipe pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300379
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300380 lockdep_assert_held(&dev_priv->pps_mutex);
381
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300387
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393 base.head) {
394 struct intel_dp *tmp;
395
396 if (encoder->type != INTEL_OUTPUT_EDP)
397 continue;
398
399 tmp = enc_to_intel_dp(&encoder->base);
400
401 if (tmp->pps_pipe != INVALID_PIPE)
402 pipes &= ~(1 << tmp->pps_pipe);
403 }
404
405 /*
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
408 */
409 if (WARN_ON(pipes == 0))
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300410 pipe = PIPE_A;
411 else
412 pipe = ffs(pipes) - 1;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300413
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300414 vlv_steal_power_sequencer(dev, pipe);
415 intel_dp->pps_pipe = pipe;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300416
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp->pps_pipe),
419 port_name(intel_dig_port->port));
420
421 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300422 intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300424
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300425 /*
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
428 */
429 vlv_power_sequencer_kick(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300430
431 return intel_dp->pps_pipe;
432}
433
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300434typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 enum pipe pipe);
436
437static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439{
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441}
442
443static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445{
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447}
448
449static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450 enum pipe pipe)
451{
452 return true;
453}
454
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300455static enum pipe
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300456vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 enum port port,
458 vlv_pipe_check pipe_check)
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300459{
Jani Nikulabf13e812013-09-06 07:40:05 +0300460 enum pipe pipe;
461
Jani Nikulabf13e812013-09-06 07:40:05 +0300462 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 PANEL_PORT_SELECT_MASK;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300465
466 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 continue;
468
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300469 if (!pipe_check(dev_priv, pipe))
470 continue;
471
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300472 return pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300473 }
474
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300475 return INVALID_PIPE;
476}
477
478static void
479vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480{
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300484 enum port port = intel_dig_port->port;
485
486 lockdep_assert_held(&dev_priv->pps_mutex);
487
488 /* try to find a pipe with this port selected */
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300489 /* first pick one where the panel is on */
490 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 vlv_pipe_has_pp_on);
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp->pps_pipe == INVALID_PIPE)
494 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 vlv_pipe_has_vdd_on);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp->pps_pipe == INVALID_PIPE)
498 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 vlv_pipe_any);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300500
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp->pps_pipe == INVALID_PIPE) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504 port_name(port));
505 return;
506 }
507
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port), pipe_name(intel_dp->pps_pipe));
510
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300511 intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300513}
514
Ville Syrjälä773538e82014-09-04 14:54:56 +0300515void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516{
517 struct drm_device *dev = dev_priv->dev;
518 struct intel_encoder *encoder;
519
520 if (WARN_ON(!IS_VALLEYVIEW(dev)))
521 return;
522
523 /*
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
531 */
532
533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 struct intel_dp *intel_dp;
535
536 if (encoder->type != INTEL_OUTPUT_EDP)
537 continue;
538
539 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE;
541 }
Jani Nikulabf13e812013-09-06 07:40:05 +0300542}
543
544static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
545{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554}
555
556static u32 _pp_stat_reg(struct intel_dp *intel_dp)
557{
558 struct drm_device *dev = intel_dp_to_dev(intel_dp);
559
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530560 if (IS_BROXTON(dev))
561 return BXT_PP_STATUS(0);
562 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300563 return PCH_PP_STATUS;
564 else
565 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
566}
567
Clint Taylor01527b32014-07-07 13:01:46 -0700568/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569 This function only applicable when panel PM state is not to be tracked */
570static int edp_notify_handler(struct notifier_block *this, unsigned long code,
571 void *unused)
572{
573 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
574 edp_notifier);
575 struct drm_device *dev = intel_dp_to_dev(intel_dp);
576 struct drm_i915_private *dev_priv = dev->dev_private;
Clint Taylor01527b32014-07-07 13:01:46 -0700577
578 if (!is_edp(intel_dp) || code != SYS_RESTART)
579 return 0;
580
Ville Syrjälä773538e82014-09-04 14:54:56 +0300581 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300582
Clint Taylor01527b32014-07-07 13:01:46 -0700583 if (IS_VALLEYVIEW(dev)) {
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300584 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
Ville Syrjälä649636e2015-09-22 19:50:01 +0300585 u32 pp_ctrl_reg, pp_div_reg;
586 u32 pp_div;
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300587
Clint Taylor01527b32014-07-07 13:01:46 -0700588 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
589 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
590 pp_div = I915_READ(pp_div_reg);
591 pp_div &= PP_REFERENCE_DIVIDER_MASK;
592
593 /* 0x1F write to PP_DIV_REG sets max cycle delay */
594 I915_WRITE(pp_div_reg, pp_div | 0x1F);
595 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
596 msleep(intel_dp->panel_power_cycle_delay);
597 }
598
Ville Syrjälä773538e82014-09-04 14:54:56 +0300599 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300600
Clint Taylor01527b32014-07-07 13:01:46 -0700601 return 0;
602}
603
Daniel Vetter4be73782014-01-17 14:39:48 +0100604static bool edp_have_panel_power(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700605{
Paulo Zanoni30add222012-10-26 19:05:45 -0200606 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700607 struct drm_i915_private *dev_priv = dev->dev_private;
608
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300609 lockdep_assert_held(&dev_priv->pps_mutex);
610
Ville Syrjälä9a423562014-10-16 21:29:48 +0300611 if (IS_VALLEYVIEW(dev) &&
612 intel_dp->pps_pipe == INVALID_PIPE)
613 return false;
614
Jani Nikulabf13e812013-09-06 07:40:05 +0300615 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
Keith Packardebf33b12011-09-29 15:53:27 -0700616}
617
Daniel Vetter4be73782014-01-17 14:39:48 +0100618static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700619{
Paulo Zanoni30add222012-10-26 19:05:45 -0200620 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700621 struct drm_i915_private *dev_priv = dev->dev_private;
622
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300623 lockdep_assert_held(&dev_priv->pps_mutex);
624
Ville Syrjälä9a423562014-10-16 21:29:48 +0300625 if (IS_VALLEYVIEW(dev) &&
626 intel_dp->pps_pipe == INVALID_PIPE)
627 return false;
628
Ville Syrjälä773538e82014-09-04 14:54:56 +0300629 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -0700630}
631
Keith Packard9b984da2011-09-19 13:54:47 -0700632static void
633intel_dp_check_edp(struct intel_dp *intel_dp)
634{
Paulo Zanoni30add222012-10-26 19:05:45 -0200635 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard9b984da2011-09-19 13:54:47 -0700636 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packardebf33b12011-09-29 15:53:27 -0700637
Keith Packard9b984da2011-09-19 13:54:47 -0700638 if (!is_edp(intel_dp))
639 return;
Jesse Barnes453c5422013-03-28 09:55:41 -0700640
Daniel Vetter4be73782014-01-17 14:39:48 +0100641 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
Keith Packard9b984da2011-09-19 13:54:47 -0700642 WARN(1, "eDP powered off while attempting aux channel communication.\n");
643 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
Jani Nikulabf13e812013-09-06 07:40:05 +0300644 I915_READ(_pp_stat_reg(intel_dp)),
645 I915_READ(_pp_ctrl_reg(intel_dp)));
Keith Packard9b984da2011-09-19 13:54:47 -0700646 }
647}
648
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100649static uint32_t
650intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651{
652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653 struct drm_device *dev = intel_dig_port->base.base.dev;
654 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni9ed35ab2013-02-18 19:00:25 -0300655 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100656 uint32_t status;
657 bool done;
658
Daniel Vetteref04f002012-12-01 21:03:59 +0100659#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100660 if (has_aux_irq)
Paulo Zanonib18ac462013-02-18 19:00:24 -0300661 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
Imre Deak35987062013-05-21 20:03:20 +0300662 msecs_to_jiffies_timeout(10));
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100663 else
664 done = wait_for_atomic(C, 10) == 0;
665 if (!done)
666 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
667 has_aux_irq);
668#undef C
669
670 return status;
671}
672
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000673static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
674{
675 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676 struct drm_device *dev = intel_dig_port->base.base.dev;
677
678 /*
679 * The clock divider is based off the hrawclk, and would like to run at
680 * 2MHz. So, take the hrawclk value and divide by 2 and use that
681 */
682 return index ? 0 : intel_hrawclk(dev) / 2;
683}
684
685static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686{
687 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688 struct drm_device *dev = intel_dig_port->base.base.dev;
Ville Syrjälä469d4b22015-03-31 14:11:59 +0300689 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000690
691 if (index)
692 return 0;
693
694 if (intel_dig_port->port == PORT_A) {
Ville Syrjälä05024da2015-06-03 15:45:08 +0300695 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
696
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000697 } else {
698 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
699 }
700}
701
702static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300703{
704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705 struct drm_device *dev = intel_dig_port->base.base.dev;
706 struct drm_i915_private *dev_priv = dev->dev_private;
707
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000708 if (intel_dig_port->port == PORT_A) {
Chris Wilsonbc866252013-07-21 16:00:03 +0100709 if (index)
710 return 0;
Ville Syrjälä05024da2015-06-03 15:45:08 +0300711 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300712 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
713 /* Workaround for non-ULT HSW */
Chris Wilsonbc866252013-07-21 16:00:03 +0100714 switch (index) {
715 case 0: return 63;
716 case 1: return 72;
717 default: return 0;
718 }
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000719 } else {
Chris Wilsonbc866252013-07-21 16:00:03 +0100720 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300721 }
722}
723
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000724static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725{
726 return index ? 0 : 100;
727}
728
Damien Lespiaub6b5e382014-01-20 16:00:59 +0000729static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730{
731 /*
732 * SKL doesn't need us to program the AUX clock divider (Hardware will
733 * derive the clock from CDCLK automatically). We still implement the
734 * get_aux_clock_divider vfunc to plug-in into the existing code.
735 */
736 return index ? 0 : 1;
737}
738
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000739static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
740 bool has_aux_irq,
741 int send_bytes,
742 uint32_t aux_clock_divider)
743{
744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745 struct drm_device *dev = intel_dig_port->base.base.dev;
746 uint32_t precharge, timeout;
747
748 if (IS_GEN6(dev))
749 precharge = 3;
750 else
751 precharge = 5;
752
Ville Syrjäläf3c6a3a2015-11-11 20:34:10 +0200753 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000754 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755 else
756 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
757
758 return DP_AUX_CH_CTL_SEND_BUSY |
Damien Lespiau788d4432014-01-20 15:52:31 +0000759 DP_AUX_CH_CTL_DONE |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000760 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000761 DP_AUX_CH_CTL_TIME_OUT_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000762 timeout |
Damien Lespiau788d4432014-01-20 15:52:31 +0000763 DP_AUX_CH_CTL_RECEIVE_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000764 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000766 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000767}
768
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +0000769static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770 bool has_aux_irq,
771 int send_bytes,
772 uint32_t unused)
773{
774 return DP_AUX_CH_CTL_SEND_BUSY |
775 DP_AUX_CH_CTL_DONE |
776 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_TIME_OUT_1600us |
779 DP_AUX_CH_CTL_RECEIVE_ERROR |
780 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
782}
783
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700784static int
Chris Wilsonea5b2132010-08-04 13:50:23 +0100785intel_dp_aux_ch(struct intel_dp *intel_dp,
Daniel Vetterbd9f74a2014-10-02 09:45:35 +0200786 const uint8_t *send, int send_bytes,
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700787 uint8_t *recv, int recv_size)
788{
Paulo Zanoni174edf12012-10-26 19:05:50 -0200789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700791 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni9ed35ab2013-02-18 19:00:25 -0300792 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700793 uint32_t ch_data = ch_ctl + 4;
Chris Wilsonbc866252013-07-21 16:00:03 +0100794 uint32_t aux_clock_divider;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100795 int i, ret, recv_bytes;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700796 uint32_t status;
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000797 int try, clock = 0;
Daniel Vetter4e6b7882014-02-07 16:33:20 +0100798 bool has_aux_irq = HAS_AUX_IRQ(dev);
Jani Nikula884f19e2014-03-14 16:51:14 +0200799 bool vdd;
800
Ville Syrjälä773538e82014-09-04 14:54:56 +0300801 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300802
Ville Syrjälä72c35002014-08-18 22:16:00 +0300803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300809 vdd = edp_panel_vdd_on(intel_dp);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700816
Keith Packard9b984da2011-09-19 13:54:47 -0700817 intel_dp_check_edp(intel_dp);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +0800818
Paulo Zanonic67a4702013-08-19 13:18:09 -0300819 intel_aux_display_runtime_get(dev_priv);
820
Jesse Barnes11bee432011-08-01 15:02:20 -0700821 /* Try to wait for any previous AUX channel activity */
822 for (try = 0; try < 3; try++) {
Daniel Vetteref04f002012-12-01 21:03:59 +0100823 status = I915_READ_NOTRACE(ch_ctl);
Jesse Barnes11bee432011-08-01 15:02:20 -0700824 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
825 break;
826 msleep(1);
827 }
828
829 if (try == 3) {
Mika Kuoppala02196c72015-08-06 16:48:58 +0300830 static u32 last_status = -1;
831 const u32 status = I915_READ(ch_ctl);
832
833 if (status != last_status) {
834 WARN(1, "dp_aux_ch not started status 0x%08x\n",
835 status);
836 last_status = status;
837 }
838
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100839 ret = -EBUSY;
840 goto out;
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100841 }
842
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300843 /* Only 5 data registers! */
844 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
845 ret = -E2BIG;
846 goto out;
847 }
848
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000849 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
Damien Lespiau153b1102014-01-21 13:37:15 +0000850 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
851 has_aux_irq,
852 send_bytes,
853 aux_clock_divider);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000854
Chris Wilsonbc866252013-07-21 16:00:03 +0100855 /* Must try at least 3 times according to DP spec */
856 for (try = 0; try < 5; try++) {
857 /* Load the send data into the aux channel data registers */
858 for (i = 0; i < send_bytes; i += 4)
859 I915_WRITE(ch_data + i,
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800860 intel_dp_pack_aux(send + i,
861 send_bytes - i));
Akshay Joshi0206e352011-08-16 15:34:10 -0400862
Chris Wilsonbc866252013-07-21 16:00:03 +0100863 /* Send the command and wait for it to complete */
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000864 I915_WRITE(ch_ctl, send_ctl);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100865
Chris Wilsonbc866252013-07-21 16:00:03 +0100866 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
Akshay Joshi0206e352011-08-16 15:34:10 -0400867
Chris Wilsonbc866252013-07-21 16:00:03 +0100868 /* Clear done status and any errors */
869 I915_WRITE(ch_ctl,
870 status |
871 DP_AUX_CH_CTL_DONE |
872 DP_AUX_CH_CTL_TIME_OUT_ERROR |
873 DP_AUX_CH_CTL_RECEIVE_ERROR);
Adam Jacksond7e96fe2011-07-26 15:39:46 -0400874
Todd Previte74ebf292015-04-15 08:38:41 -0700875 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
Chris Wilsonbc866252013-07-21 16:00:03 +0100876 continue;
Todd Previte74ebf292015-04-15 08:38:41 -0700877
878 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
879 * 400us delay required for errors and timeouts
880 * Timeout errors from the HW already meet this
881 * requirement so skip to next iteration
882 */
883 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
884 usleep_range(400, 500);
885 continue;
886 }
Chris Wilsonbc866252013-07-21 16:00:03 +0100887 if (status & DP_AUX_CH_CTL_DONE)
Jim Bridee058c942015-05-27 10:21:48 -0700888 goto done;
Chris Wilsonbc866252013-07-21 16:00:03 +0100889 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700890 }
891
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700892 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700893 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100894 ret = -EBUSY;
895 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700896 }
897
Jim Bridee058c942015-05-27 10:21:48 -0700898done:
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700899 /* Check for timeout or receive error.
900 * Timeouts occur when the sink is not connected
901 */
Keith Packarda5b3da52009-06-11 22:30:32 -0700902 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700903 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100904 ret = -EIO;
905 goto out;
Keith Packarda5b3da52009-06-11 22:30:32 -0700906 }
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700907
908 /* Timeouts occur when the device isn't connected, so they're
909 * "normal" -- don't fill the kernel log with these */
Keith Packarda5b3da52009-06-11 22:30:32 -0700910 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
Zhao Yakui28c97732009-10-09 11:39:41 +0800911 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100912 ret = -ETIMEDOUT;
913 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700914 }
915
916 /* Unload any bytes sent back from the other side */
917 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
918 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700919 if (recv_bytes > recv_size)
920 recv_bytes = recv_size;
Akshay Joshi0206e352011-08-16 15:34:10 -0400921
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100922 for (i = 0; i < recv_bytes; i += 4)
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800923 intel_dp_unpack_aux(I915_READ(ch_data + i),
924 recv + i, recv_bytes - i);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700925
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100926 ret = recv_bytes;
927out:
928 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
Paulo Zanonic67a4702013-08-19 13:18:09 -0300929 intel_aux_display_runtime_put(dev_priv);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100930
Jani Nikula884f19e2014-03-14 16:51:14 +0200931 if (vdd)
932 edp_panel_vdd_off(intel_dp, false);
933
Ville Syrjälä773538e82014-09-04 14:54:56 +0300934 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300935
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100936 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700937}
938
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300939#define BARE_ADDRESS_SIZE 3
940#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
Jani Nikula9d1a1032014-03-14 16:51:15 +0200941static ssize_t
942intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700943{
Jani Nikula9d1a1032014-03-14 16:51:15 +0200944 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
945 uint8_t txbuf[20], rxbuf[20];
946 size_t txsize, rxsize;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700947 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700948
Ville Syrjäläd2d9cbb2015-03-19 11:44:06 +0200949 txbuf[0] = (msg->request << 4) |
950 ((msg->address >> 16) & 0xf);
951 txbuf[1] = (msg->address >> 8) & 0xff;
Jani Nikula9d1a1032014-03-14 16:51:15 +0200952 txbuf[2] = msg->address & 0xff;
953 txbuf[3] = msg->size - 1;
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300954
Jani Nikula9d1a1032014-03-14 16:51:15 +0200955 switch (msg->request & ~DP_AUX_I2C_MOT) {
956 case DP_AUX_NATIVE_WRITE:
957 case DP_AUX_I2C_WRITE:
Ville Syrjäläc1e741222015-08-27 17:23:27 +0300958 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300959 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200960 rxsize = 2; /* 0 or 1 data bytes */
Jani Nikulaf51a44b2014-02-11 11:52:05 +0200961
Jani Nikula9d1a1032014-03-14 16:51:15 +0200962 if (WARN_ON(txsize > 20))
963 return -E2BIG;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700964
Jani Nikula9d1a1032014-03-14 16:51:15 +0200965 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700966
Jani Nikula9d1a1032014-03-14 16:51:15 +0200967 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
968 if (ret > 0) {
969 msg->reply = rxbuf[0] >> 4;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700970
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200971 if (ret > 1) {
972 /* Number of bytes written in a short write. */
973 ret = clamp_t(int, rxbuf[1], 0, msg->size);
974 } else {
975 /* Return payload size. */
976 ret = msg->size;
977 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700978 }
Jani Nikula9d1a1032014-03-14 16:51:15 +0200979 break;
980
981 case DP_AUX_NATIVE_READ:
982 case DP_AUX_I2C_READ:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300983 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
Jani Nikula9d1a1032014-03-14 16:51:15 +0200984 rxsize = msg->size + 1;
985
986 if (WARN_ON(rxsize > 20))
987 return -E2BIG;
988
989 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
990 if (ret > 0) {
991 msg->reply = rxbuf[0] >> 4;
992 /*
993 * Assume happy day, and copy the data. The caller is
994 * expected to check msg->reply before touching it.
995 *
996 * Return payload size.
997 */
998 ret--;
999 memcpy(msg->buffer, rxbuf + 1, ret);
1000 }
1001 break;
1002
1003 default:
1004 ret = -EINVAL;
1005 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001006 }
Jani Nikulaf51a44b2014-02-11 11:52:05 +02001007
Jani Nikula9d1a1032014-03-14 16:51:15 +02001008 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001009}
1010
Ville Syrjäläda00bdc2015-11-11 20:34:13 +02001011static uint32_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1012 enum port port)
1013{
1014 switch (port) {
1015 case PORT_B:
1016 case PORT_C:
1017 case PORT_D:
1018 return DP_AUX_CH_CTL(port);
1019 default:
1020 MISSING_CASE(port);
1021 return DP_AUX_CH_CTL(PORT_B);
1022 }
1023}
1024
1025static uint32_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1026 enum port port)
1027{
1028 switch (port) {
1029 case PORT_A:
1030 return DP_AUX_CH_CTL(port);
1031 case PORT_B:
1032 case PORT_C:
1033 case PORT_D:
1034 return PCH_DP_AUX_CH_CTL(port);
1035 default:
1036 MISSING_CASE(port);
1037 return DP_AUX_CH_CTL(PORT_A);
1038 }
1039}
1040
1041/*
1042 * On SKL we don't have Aux for port E so we rely
1043 * on VBT to set a proper alternate aux channel.
1044 */
1045static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1046{
1047 const struct ddi_vbt_port_info *info =
1048 &dev_priv->vbt.ddi_port_info[PORT_E];
1049
1050 switch (info->alternate_aux_channel) {
1051 case DP_AUX_A:
1052 return PORT_A;
1053 case DP_AUX_B:
1054 return PORT_B;
1055 case DP_AUX_C:
1056 return PORT_C;
1057 case DP_AUX_D:
1058 return PORT_D;
1059 default:
1060 MISSING_CASE(info->alternate_aux_channel);
1061 return PORT_A;
1062 }
1063}
1064
1065static uint32_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1066 enum port port)
1067{
1068 if (port == PORT_E)
1069 port = skl_porte_aux_port(dev_priv);
1070
1071 switch (port) {
1072 case PORT_A:
1073 case PORT_B:
1074 case PORT_C:
1075 case PORT_D:
1076 return DP_AUX_CH_CTL(port);
1077 default:
1078 MISSING_CASE(port);
1079 return DP_AUX_CH_CTL(PORT_A);
1080 }
1081}
1082
Jani Nikula9d1a1032014-03-14 16:51:15 +02001083static void
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001084intel_dp_aux_fini(struct intel_dp *intel_dp)
1085{
1086 drm_dp_aux_unregister(&intel_dp->aux);
1087 kfree(intel_dp->aux.name);
1088}
1089
1090static int
Jani Nikula9d1a1032014-03-14 16:51:15 +02001091intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001092{
Jani Nikula9d1a1032014-03-14 16:51:15 +02001093 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001094 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikula33ad6622014-03-14 16:51:16 +02001095 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1096 enum port port = intel_dig_port->port;
Dave Airlieab2c0672009-12-04 10:55:24 +10001097 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001098
Ville Syrjäläda00bdc2015-11-11 20:34:13 +02001099 if (INTEL_INFO(dev_priv)->gen >= 9)
1100 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg(dev_priv, port);
1101 else if (HAS_PCH_SPLIT(dev_priv))
1102 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg(dev_priv, port);
1103 else
1104 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg(dev_priv, port);
David Flynn8316f332010-12-08 16:10:21 +00001105
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001106 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1107 if (!intel_dp->aux.name)
1108 return -ENOMEM;
1109
Jani Nikula9d1a1032014-03-14 16:51:15 +02001110 intel_dp->aux.dev = dev->dev;
1111 intel_dp->aux.transfer = intel_dp_aux_transfer;
David Flynn8316f332010-12-08 16:10:21 +00001112
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001113 DRM_DEBUG_KMS("registering %s bus for %s\n",
1114 intel_dp->aux.name,
Jani Nikula0b998362014-03-14 16:51:17 +02001115 connector->base.kdev->kobj.name);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001116
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001117 ret = drm_dp_aux_register(&intel_dp->aux);
Jani Nikula0b998362014-03-14 16:51:17 +02001118 if (ret < 0) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001119 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001120 intel_dp->aux.name, ret);
1121 kfree(intel_dp->aux.name);
1122 return ret;
Dave Airlieab2c0672009-12-04 10:55:24 +10001123 }
David Flynn8316f332010-12-08 16:10:21 +00001124
Jani Nikula0b998362014-03-14 16:51:17 +02001125 ret = sysfs_create_link(&connector->base.kdev->kobj,
1126 &intel_dp->aux.ddc.dev.kobj,
1127 intel_dp->aux.ddc.dev.kobj.name);
1128 if (ret < 0) {
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001129 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1130 intel_dp->aux.name, ret);
1131 intel_dp_aux_fini(intel_dp);
1132 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001133 }
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02001134
1135 return 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001136}
1137
Imre Deak80f65de2014-02-11 17:12:49 +02001138static void
1139intel_dp_connector_unregister(struct intel_connector *intel_connector)
1140{
1141 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1142
Dave Airlie0e32b392014-05-02 14:02:48 +10001143 if (!intel_connector->mst_port)
1144 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1145 intel_dp->aux.ddc.dev.kobj.name);
Imre Deak80f65de2014-02-11 17:12:49 +02001146 intel_connector_unregister(intel_connector);
1147}
1148
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001149static void
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001150skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
Damien Lespiau5416d872014-11-14 17:24:33 +00001151{
1152 u32 ctrl1;
1153
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03001154 memset(&pipe_config->dpll_hw_state, 0,
1155 sizeof(pipe_config->dpll_hw_state));
1156
Damien Lespiau5416d872014-11-14 17:24:33 +00001157 pipe_config->ddi_pll_sel = SKL_DPLL0;
1158 pipe_config->dpll_hw_state.cfgcr1 = 0;
1159 pipe_config->dpll_hw_state.cfgcr2 = 0;
1160
1161 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001162 switch (pipe_config->port_clock / 2) {
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301163 case 81000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001164 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
Damien Lespiau5416d872014-11-14 17:24:33 +00001165 SKL_DPLL0);
1166 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301167 case 135000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001168 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
Damien Lespiau5416d872014-11-14 17:24:33 +00001169 SKL_DPLL0);
1170 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301171 case 270000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001172 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
Damien Lespiau5416d872014-11-14 17:24:33 +00001173 SKL_DPLL0);
1174 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301175 case 162000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001176 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301177 SKL_DPLL0);
1178 break;
1179 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1180 results in CDCLK change. Need to handle the change of CDCLK by
1181 disabling pipes and re-enabling them */
1182 case 108000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001183 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301184 SKL_DPLL0);
1185 break;
1186 case 216000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001187 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301188 SKL_DPLL0);
1189 break;
1190
Damien Lespiau5416d872014-11-14 17:24:33 +00001191 }
1192 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1193}
1194
Ander Conselvan de Oliveira6fa2d192015-08-31 11:23:28 +03001195void
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001196hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
Daniel Vetter0e503382014-07-04 11:26:04 -03001197{
Ander Conselvan de Oliveiraee46f3c72015-06-30 16:10:38 +03001198 memset(&pipe_config->dpll_hw_state, 0,
1199 sizeof(pipe_config->dpll_hw_state));
1200
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001201 switch (pipe_config->port_clock / 2) {
1202 case 81000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001203 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1204 break;
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001205 case 135000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001206 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1207 break;
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001208 case 270000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001209 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1210 break;
1211 }
1212}
1213
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301214static int
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001215intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301216{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001217 if (intel_dp->num_sink_rates) {
1218 *sink_rates = intel_dp->sink_rates;
1219 return intel_dp->num_sink_rates;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301220 }
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001221
1222 *sink_rates = default_rates;
1223
1224 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301225}
1226
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001227bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301228{
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001229 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1230 struct drm_device *dev = dig_port->base.base.dev;
1231
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301232 /* WaDisableHBR2:skl */
Jani Nikulae87a0052015-10-20 15:22:02 +03001233 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301234 return false;
1235
1236 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1237 (INTEL_INFO(dev)->gen >= 9))
1238 return true;
1239 else
1240 return false;
1241}
1242
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301243static int
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001244intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301245{
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001246 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1247 struct drm_device *dev = dig_port->base.base.dev;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301248 int size;
1249
Sonika Jindal64987fc2015-05-26 17:50:13 +05301250 if (IS_BROXTON(dev)) {
1251 *source_rates = bxt_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301252 size = ARRAY_SIZE(bxt_rates);
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001253 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
Sonika Jindal637a9c62015-05-07 09:52:08 +05301254 *source_rates = skl_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301255 size = ARRAY_SIZE(skl_rates);
1256 } else {
1257 *source_rates = default_rates;
1258 size = ARRAY_SIZE(default_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301259 }
Ville Syrjälä636280b2015-03-12 17:10:29 +02001260
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301261 /* This depends on the fact that 5.4 is last value in the array */
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001262 if (!intel_dp_source_supports_hbr2(intel_dp))
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301263 size--;
Ville Syrjälä636280b2015-03-12 17:10:29 +02001264
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301265 return size;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301266}
1267
Daniel Vetter0e503382014-07-04 11:26:04 -03001268static void
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001269intel_dp_set_clock(struct intel_encoder *encoder,
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001270 struct intel_crtc_state *pipe_config)
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001271{
1272 struct drm_device *dev = encoder->base.dev;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001273 const struct dp_link_dpll *divisor = NULL;
1274 int i, count = 0;
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001275
1276 if (IS_G4X(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001277 divisor = gen4_dpll;
1278 count = ARRAY_SIZE(gen4_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001279 } else if (HAS_PCH_SPLIT(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001280 divisor = pch_dpll;
1281 count = ARRAY_SIZE(pch_dpll);
Chon Ming Leeef9348c2014-04-09 13:28:18 +03001282 } else if (IS_CHERRYVIEW(dev)) {
1283 divisor = chv_dpll;
1284 count = ARRAY_SIZE(chv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001285 } else if (IS_VALLEYVIEW(dev)) {
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +08001286 divisor = vlv_dpll;
1287 count = ARRAY_SIZE(vlv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001288 }
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001289
1290 if (divisor && count) {
1291 for (i = 0; i < count; i++) {
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001292 if (pipe_config->port_clock == divisor[i].clock) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001293 pipe_config->dpll = divisor[i].dpll;
1294 pipe_config->clock_set = true;
1295 break;
1296 }
1297 }
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001298 }
1299}
1300
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001301static int intersect_rates(const int *source_rates, int source_len,
1302 const int *sink_rates, int sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001303 int *common_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301304{
1305 int i = 0, j = 0, k = 0;
1306
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301307 while (i < source_len && j < sink_len) {
1308 if (source_rates[i] == sink_rates[j]) {
Ville Syrjäläe6bda3e2015-03-12 17:10:37 +02001309 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1310 return k;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001311 common_rates[k] = source_rates[i];
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301312 ++k;
1313 ++i;
1314 ++j;
1315 } else if (source_rates[i] < sink_rates[j]) {
1316 ++i;
1317 } else {
1318 ++j;
1319 }
1320 }
1321 return k;
1322}
1323
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001324static int intel_dp_common_rates(struct intel_dp *intel_dp,
1325 int *common_rates)
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001326{
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001327 const int *source_rates, *sink_rates;
1328 int source_len, sink_len;
1329
1330 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001331 source_len = intel_dp_source_rates(intel_dp, &source_rates);
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001332
1333 return intersect_rates(source_rates, source_len,
1334 sink_rates, sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001335 common_rates);
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001336}
1337
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001338static void snprintf_int_array(char *str, size_t len,
1339 const int *array, int nelem)
1340{
1341 int i;
1342
1343 str[0] = '\0';
1344
1345 for (i = 0; i < nelem; i++) {
Jani Nikulab2f505b2015-05-18 16:01:45 +03001346 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001347 if (r >= len)
1348 return;
1349 str += r;
1350 len -= r;
1351 }
1352}
1353
1354static void intel_dp_print_rates(struct intel_dp *intel_dp)
1355{
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001356 const int *source_rates, *sink_rates;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001357 int source_len, sink_len, common_len;
1358 int common_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001359 char str[128]; /* FIXME: too big for stack? */
1360
1361 if ((drm_debug & DRM_UT_KMS) == 0)
1362 return;
1363
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001364 source_len = intel_dp_source_rates(intel_dp, &source_rates);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001365 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1366 DRM_DEBUG_KMS("source rates: %s\n", str);
1367
1368 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1369 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1370 DRM_DEBUG_KMS("sink rates: %s\n", str);
1371
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001372 common_len = intel_dp_common_rates(intel_dp, common_rates);
1373 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1374 DRM_DEBUG_KMS("common rates: %s\n", str);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001375}
1376
Ville Syrjäläf4896f12015-03-12 17:10:27 +02001377static int rate_to_index(int find, const int *rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301378{
1379 int i = 0;
1380
1381 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1382 if (find == rates[i])
1383 break;
1384
1385 return i;
1386}
1387
Ville Syrjälä50fec212015-03-12 17:10:34 +02001388int
1389intel_dp_max_link_rate(struct intel_dp *intel_dp)
1390{
1391 int rates[DP_MAX_SUPPORTED_RATES] = {};
1392 int len;
1393
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001394 len = intel_dp_common_rates(intel_dp, rates);
Ville Syrjälä50fec212015-03-12 17:10:34 +02001395 if (WARN_ON(len <= 0))
1396 return 162000;
1397
1398 return rates[rate_to_index(0, rates) - 1];
1399}
1400
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001401int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1402{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001403 return rate_to_index(rate, intel_dp->sink_rates);
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001404}
1405
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03001406void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1407 uint8_t *link_bw, uint8_t *rate_select)
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001408{
1409 if (intel_dp->num_sink_rates) {
1410 *link_bw = 0;
1411 *rate_select =
1412 intel_dp_rate_select(intel_dp, port_clock);
1413 } else {
1414 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1415 *rate_select = 0;
1416 }
1417}
1418
Paulo Zanoni00c09d72012-10-26 19:05:52 -02001419bool
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001420intel_dp_compute_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02001421 struct intel_crtc_state *pipe_config)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001422{
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001423 struct drm_device *dev = encoder->base.dev;
Daniel Vetter36008362013-03-27 00:44:59 +01001424 struct drm_i915_private *dev_priv = dev->dev_private;
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02001425 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001426 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001427 enum port port = dp_to_dig_port(intel_dp)->port;
Ander Conselvan de Oliveira84556d52015-03-20 16:18:10 +02001428 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
Jani Nikuladd06f902012-10-19 14:51:50 +03001429 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001430 int lane_count, clock;
Jani Nikula56071a22014-05-06 14:56:52 +03001431 int min_lane_count = 1;
Paulo Zanonieeb63242014-05-06 14:56:50 +03001432 int max_lane_count = intel_dp_max_lane_count(intel_dp);
Todd Previte06ea66b2014-01-20 10:19:39 -07001433 /* Conveniently, the link BW constants become indices with a shift...*/
Jani Nikula56071a22014-05-06 14:56:52 +03001434 int min_clock = 0;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301435 int max_clock;
Daniel Vetter083f9562012-04-20 20:23:49 +02001436 int bpp, mode_rate;
Daniel Vetterff9a6752013-06-01 17:16:21 +02001437 int link_avail, link_clock;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001438 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1439 int common_len;
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001440 uint8_t link_bw, rate_select;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301441
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001442 common_len = intel_dp_common_rates(intel_dp, common_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301443
1444 /* No common link rates between source and sink */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001445 WARN_ON(common_len <= 0);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301446
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001447 max_clock = common_len - 1;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001448
Imre Deakbc7d38a2013-05-16 14:40:36 +03001449 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001450 pipe_config->has_pch_encoder = true;
1451
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001452 pipe_config->has_dp_encoder = true;
Vandana Kannanf769cd22014-08-05 07:51:22 -07001453 pipe_config->has_drrs = false;
Jani Nikula9fcb1702015-05-05 16:32:12 +03001454 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001455
Jani Nikuladd06f902012-10-19 14:51:50 +03001456 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1457 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1458 adjusted_mode);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001459
1460 if (INTEL_INFO(dev)->gen >= 9) {
1461 int ret;
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02001462 ret = skl_update_scaler_crtc(pipe_config);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001463 if (ret)
1464 return ret;
1465 }
1466
Matt Roperb56676272015-11-04 09:05:27 -08001467 if (HAS_GMCH_DISPLAY(dev))
Jesse Barnes2dd24552013-04-25 12:55:01 -07001468 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1469 intel_connector->panel.fitting_mode);
1470 else
Jesse Barnesb074cec2013-04-25 12:55:02 -07001471 intel_pch_panel_fitting(intel_crtc, pipe_config,
1472 intel_connector->panel.fitting_mode);
Zhao Yakui0d3a1be2010-07-19 09:43:13 +01001473 }
1474
Daniel Vettercb1793c2012-06-04 18:39:21 +02001475 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
Daniel Vetter0af78a22012-05-23 11:30:55 +02001476 return false;
1477
Daniel Vetter083f9562012-04-20 20:23:49 +02001478 DRM_DEBUG_KMS("DP link computation with max lane count %i "
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301479 "max bw %d pixel clock %iKHz\n",
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001480 max_lane_count, common_rates[max_clock],
Damien Lespiau241bfc32013-09-25 16:45:37 +01001481 adjusted_mode->crtc_clock);
Daniel Vetter083f9562012-04-20 20:23:49 +02001482
Daniel Vetter36008362013-03-27 00:44:59 +01001483 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1484 * bpc in between. */
Daniel Vetter3e7ca982013-06-01 19:45:56 +02001485 bpp = pipe_config->pipe_bpp;
Jani Nikula56071a22014-05-06 14:56:52 +03001486 if (is_edp(intel_dp)) {
Thulasimani,Sivakumar22ce5622015-07-31 11:05:27 +05301487
1488 /* Get bpp from vbt only for panels that dont have bpp in edid */
1489 if (intel_connector->base.display_info.bpc == 0 &&
1490 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
Jani Nikula56071a22014-05-06 14:56:52 +03001491 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1492 dev_priv->vbt.edp_bpp);
1493 bpp = dev_priv->vbt.edp_bpp;
1494 }
1495
Jani Nikula344c5bb2014-09-09 11:25:13 +03001496 /*
1497 * Use the maximum clock and number of lanes the eDP panel
1498 * advertizes being capable of. The panels are generally
1499 * designed to support only a single clock and lane
1500 * configuration, and typically these values correspond to the
1501 * native resolution of the panel.
1502 */
1503 min_lane_count = max_lane_count;
1504 min_clock = max_clock;
Imre Deak79842112013-07-18 17:44:13 +03001505 }
Daniel Vetter657445f2013-05-04 10:09:18 +02001506
Daniel Vetter36008362013-03-27 00:44:59 +01001507 for (; bpp >= 6*3; bpp -= 2*3) {
Damien Lespiau241bfc32013-09-25 16:45:37 +01001508 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1509 bpp);
Daniel Vetterc4867932012-04-10 10:42:36 +02001510
Dave Airliec6930992014-07-14 11:04:39 +10001511 for (clock = min_clock; clock <= max_clock; clock++) {
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301512 for (lane_count = min_lane_count;
1513 lane_count <= max_lane_count;
1514 lane_count <<= 1) {
1515
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001516 link_clock = common_rates[clock];
Daniel Vetter36008362013-03-27 00:44:59 +01001517 link_avail = intel_dp_max_data_rate(link_clock,
1518 lane_count);
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001519
Daniel Vetter36008362013-03-27 00:44:59 +01001520 if (mode_rate <= link_avail) {
1521 goto found;
1522 }
1523 }
1524 }
1525 }
1526
1527 return false;
1528
1529found:
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001530 if (intel_dp->color_range_auto) {
1531 /*
1532 * See:
1533 * CEA-861-E - 5.1 Default Encoding Parameters
1534 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1535 */
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001536 pipe_config->limited_color_range =
1537 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1538 } else {
1539 pipe_config->limited_color_range =
1540 intel_dp->limited_color_range;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001541 }
1542
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03001543 pipe_config->lane_count = lane_count;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301544
Daniel Vetter657445f2013-05-04 10:09:18 +02001545 pipe_config->pipe_bpp = bpp;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001546 pipe_config->port_clock = common_rates[clock];
Daniel Vetterc4867932012-04-10 10:42:36 +02001547
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001548 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1549 &link_bw, &rate_select);
1550
1551 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1552 link_bw, rate_select, pipe_config->lane_count,
Daniel Vetterff9a6752013-06-01 17:16:21 +02001553 pipe_config->port_clock, bpp);
Daniel Vetter36008362013-03-27 00:44:59 +01001554 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1555 mode_rate, link_avail);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001556
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001557 intel_link_compute_m_n(bpp, lane_count,
Damien Lespiau241bfc32013-09-25 16:45:37 +01001558 adjusted_mode->crtc_clock,
1559 pipe_config->port_clock,
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001560 &pipe_config->dp_m_n);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001561
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301562 if (intel_connector->panel.downclock_mode != NULL &&
Vandana Kannan96178ee2015-01-10 02:25:56 +05301563 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
Vandana Kannanf769cd22014-08-05 07:51:22 -07001564 pipe_config->has_drrs = true;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301565 intel_link_compute_m_n(bpp, lane_count,
1566 intel_connector->panel.downclock_mode->clock,
1567 pipe_config->port_clock,
1568 &pipe_config->dp_m2_n2);
1569 }
1570
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001571 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001572 skl_edp_set_pll_config(pipe_config);
Satheeshakrishna M977bb382014-08-22 09:49:12 +05301573 else if (IS_BROXTON(dev))
1574 /* handled in ddi */;
Damien Lespiau5416d872014-11-14 17:24:33 +00001575 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001576 hsw_dp_set_ddi_pll_sel(pipe_config);
Daniel Vetter0e503382014-07-04 11:26:04 -03001577 else
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001578 intel_dp_set_clock(encoder, pipe_config);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001579
Daniel Vetter36008362013-03-27 00:44:59 +01001580 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001581}
1582
Ville Syrjälä901c2da2015-08-17 18:05:12 +03001583void intel_dp_set_link_params(struct intel_dp *intel_dp,
1584 const struct intel_crtc_state *pipe_config)
1585{
1586 intel_dp->link_rate = pipe_config->port_clock;
1587 intel_dp->lane_count = pipe_config->lane_count;
1588}
1589
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02001590static void intel_dp_prepare(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001591{
Daniel Vetterb934223d2013-07-21 21:37:05 +02001592 struct drm_device *dev = encoder->base.dev;
Keith Packard417e8222011-11-01 19:54:11 -07001593 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001594 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001595 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001596 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03001597 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001598
Ville Syrjälä901c2da2015-08-17 18:05:12 +03001599 intel_dp_set_link_params(intel_dp, crtc->config);
1600
Keith Packard417e8222011-11-01 19:54:11 -07001601 /*
Keith Packard1a2eb462011-11-16 16:26:07 -08001602 * There are four kinds of DP registers:
Keith Packard417e8222011-11-01 19:54:11 -07001603 *
1604 * IBX PCH
Keith Packard1a2eb462011-11-16 16:26:07 -08001605 * SNB CPU
1606 * IVB CPU
Keith Packard417e8222011-11-01 19:54:11 -07001607 * CPT PCH
1608 *
1609 * IBX PCH and CPU are the same for almost everything,
1610 * except that the CPU DP PLL is configured in this
1611 * register
1612 *
1613 * CPT PCH is quite different, having many bits moved
1614 * to the TRANS_DP_CTL register instead. That
1615 * configuration happens (oddly) in ironlake_pch_enable
1616 */
Adam Jackson9c9e7922010-04-05 17:57:59 -04001617
Keith Packard417e8222011-11-01 19:54:11 -07001618 /* Preserve the BIOS-computed detected bit. This is
1619 * supposed to be read-only.
1620 */
1621 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001622
Keith Packard417e8222011-11-01 19:54:11 -07001623 /* Handle DP bits in common between all three register formats */
Keith Packard417e8222011-11-01 19:54:11 -07001624 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03001625 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001626
Keith Packard417e8222011-11-01 19:54:11 -07001627 /* Split out the IBX/CPU vs CPT settings */
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001628
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001629 if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08001630 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1631 intel_dp->DP |= DP_SYNC_HS_HIGH;
1632 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1633 intel_dp->DP |= DP_SYNC_VS_HIGH;
1634 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1635
Jani Nikula6aba5b62013-10-04 15:08:10 +03001636 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard1a2eb462011-11-16 16:26:07 -08001637 intel_dp->DP |= DP_ENHANCED_FRAMING;
1638
Daniel Vetter7c62a162013-06-01 17:16:20 +02001639 intel_dp->DP |= crtc->pipe << 29;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001640 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001641 u32 trans_dp;
1642
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001643 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001644
1645 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1646 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1647 trans_dp |= TRANS_DP_ENH_FRAMING;
1648 else
1649 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1650 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001651 } else {
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001652 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1653 crtc->config->limited_color_range)
1654 intel_dp->DP |= DP_COLOR_RANGE_16_235;
Keith Packard417e8222011-11-01 19:54:11 -07001655
1656 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1657 intel_dp->DP |= DP_SYNC_HS_HIGH;
1658 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1659 intel_dp->DP |= DP_SYNC_VS_HIGH;
1660 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1661
Jani Nikula6aba5b62013-10-04 15:08:10 +03001662 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard417e8222011-11-01 19:54:11 -07001663 intel_dp->DP |= DP_ENHANCED_FRAMING;
1664
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001665 if (IS_CHERRYVIEW(dev))
Chon Ming Lee44f37d12014-04-09 13:28:21 +03001666 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001667 else if (crtc->pipe == PIPE_B)
1668 intel_dp->DP |= DP_PIPEB_SELECT;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001669 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001670}
1671
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001672#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1673#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001674
Paulo Zanoni1a5ef5b2013-12-19 14:29:43 -02001675#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1676#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
Keith Packard99ea7122011-11-01 19:57:50 -07001677
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001678#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1679#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001680
Daniel Vetter4be73782014-01-17 14:39:48 +01001681static void wait_panel_status(struct intel_dp *intel_dp,
Keith Packard99ea7122011-11-01 19:57:50 -07001682 u32 mask,
1683 u32 value)
1684{
Paulo Zanoni30add222012-10-26 19:05:45 -02001685 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001686 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07001687 u32 pp_stat_reg, pp_ctrl_reg;
1688
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001689 lockdep_assert_held(&dev_priv->pps_mutex);
1690
Jani Nikulabf13e812013-09-06 07:40:05 +03001691 pp_stat_reg = _pp_stat_reg(intel_dp);
1692 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001693
1694 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001695 mask, value,
1696 I915_READ(pp_stat_reg),
1697 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001698
Jesse Barnes453c5422013-03-28 09:55:41 -07001699 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
Keith Packard99ea7122011-11-01 19:57:50 -07001700 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001701 I915_READ(pp_stat_reg),
1702 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001703 }
Chris Wilson54c136d2013-12-02 09:57:16 +00001704
1705 DRM_DEBUG_KMS("Wait complete\n");
Keith Packard99ea7122011-11-01 19:57:50 -07001706}
1707
Daniel Vetter4be73782014-01-17 14:39:48 +01001708static void wait_panel_on(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001709{
1710 DRM_DEBUG_KMS("Wait for panel power on\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001711 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001712}
1713
Daniel Vetter4be73782014-01-17 14:39:48 +01001714static void wait_panel_off(struct intel_dp *intel_dp)
Keith Packardbd943152011-09-18 23:09:52 -07001715{
Keith Packardbd943152011-09-18 23:09:52 -07001716 DRM_DEBUG_KMS("Wait for panel power off time\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001717 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
Keith Packardbd943152011-09-18 23:09:52 -07001718}
Keith Packardbd943152011-09-18 23:09:52 -07001719
Daniel Vetter4be73782014-01-17 14:39:48 +01001720static void wait_panel_power_cycle(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001721{
1722 DRM_DEBUG_KMS("Wait for panel power cycle\n");
Paulo Zanonidce56b32013-12-19 14:29:40 -02001723
1724 /* When we disable the VDD override bit last we have to do the manual
1725 * wait. */
1726 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1727 intel_dp->panel_power_cycle_delay);
1728
Daniel Vetter4be73782014-01-17 14:39:48 +01001729 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001730}
Keith Packardbd943152011-09-18 23:09:52 -07001731
Daniel Vetter4be73782014-01-17 14:39:48 +01001732static void wait_backlight_on(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001733{
1734 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1735 intel_dp->backlight_on_delay);
1736}
1737
Daniel Vetter4be73782014-01-17 14:39:48 +01001738static void edp_wait_backlight_off(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001739{
1740 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1741 intel_dp->backlight_off_delay);
1742}
Keith Packard99ea7122011-11-01 19:57:50 -07001743
Keith Packard832dd3c2011-11-01 19:34:06 -07001744/* Read the current pp_control value, unlocking the register if it
1745 * is locked
1746 */
1747
Jesse Barnes453c5422013-03-28 09:55:41 -07001748static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
Keith Packard832dd3c2011-11-01 19:34:06 -07001749{
Jesse Barnes453c5422013-03-28 09:55:41 -07001750 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1751 struct drm_i915_private *dev_priv = dev->dev_private;
1752 u32 control;
Jesse Barnes453c5422013-03-28 09:55:41 -07001753
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001754 lockdep_assert_held(&dev_priv->pps_mutex);
1755
Jani Nikulabf13e812013-09-06 07:40:05 +03001756 control = I915_READ(_pp_ctrl_reg(intel_dp));
Vandana Kannanb0a08be2015-06-18 11:00:55 +05301757 if (!IS_BROXTON(dev)) {
1758 control &= ~PANEL_UNLOCK_MASK;
1759 control |= PANEL_UNLOCK_REGS;
1760 }
Keith Packard832dd3c2011-11-01 19:34:06 -07001761 return control;
Keith Packardbd943152011-09-18 23:09:52 -07001762}
1763
Ville Syrjälä951468f2014-09-04 14:55:31 +03001764/*
1765 * Must be paired with edp_panel_vdd_off().
1766 * Must hold pps_mutex around the whole on/off sequence.
1767 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1768 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +03001769static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001770{
Paulo Zanoni30add222012-10-26 19:05:45 -02001771 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001772 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1773 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Jesse Barnes5d613502011-01-24 17:10:54 -08001774 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02001775 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001776 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001777 u32 pp_stat_reg, pp_ctrl_reg;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001778 bool need_to_disable = !intel_dp->want_panel_vdd;
Jesse Barnes5d613502011-01-24 17:10:54 -08001779
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001780 lockdep_assert_held(&dev_priv->pps_mutex);
1781
Keith Packard97af61f572011-09-28 16:23:51 -07001782 if (!is_edp(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001783 return false;
Keith Packardbd943152011-09-18 23:09:52 -07001784
Egbert Eich2c623c12014-11-25 12:54:57 +01001785 cancel_delayed_work(&intel_dp->panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001786 intel_dp->want_panel_vdd = true;
Keith Packard99ea7122011-11-01 19:57:50 -07001787
Daniel Vetter4be73782014-01-17 14:39:48 +01001788 if (edp_have_panel_vdd(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001789 return need_to_disable;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001790
Imre Deak4e6e1a52014-03-27 17:45:11 +02001791 power_domain = intel_display_port_power_domain(intel_encoder);
1792 intel_display_power_get(dev_priv, power_domain);
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001793
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001794 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1795 port_name(intel_dig_port->port));
Keith Packardbd943152011-09-18 23:09:52 -07001796
Daniel Vetter4be73782014-01-17 14:39:48 +01001797 if (!edp_have_panel_power(intel_dp))
1798 wait_panel_power_cycle(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001799
Jesse Barnes453c5422013-03-28 09:55:41 -07001800 pp = ironlake_get_pp_control(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001801 pp |= EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -07001802
Jani Nikulabf13e812013-09-06 07:40:05 +03001803 pp_stat_reg = _pp_stat_reg(intel_dp);
1804 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001805
1806 I915_WRITE(pp_ctrl_reg, pp);
1807 POSTING_READ(pp_ctrl_reg);
1808 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1809 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Keith Packardebf33b12011-09-29 15:53:27 -07001810 /*
1811 * If the panel wasn't on, delay before accessing aux channel
1812 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001813 if (!edp_have_panel_power(intel_dp)) {
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001814 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1815 port_name(intel_dig_port->port));
Keith Packardf01eca22011-09-28 16:48:10 -07001816 msleep(intel_dp->panel_power_up_delay);
Keith Packardf01eca22011-09-28 16:48:10 -07001817 }
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001818
1819 return need_to_disable;
1820}
1821
Ville Syrjälä951468f2014-09-04 14:55:31 +03001822/*
1823 * Must be paired with intel_edp_panel_vdd_off() or
1824 * intel_edp_panel_off().
1825 * Nested calls to these functions are not allowed since
1826 * we drop the lock. Caller must use some higher level
1827 * locking to prevent nested calls from other threads.
1828 */
Daniel Vetterb80d6c72014-03-19 15:54:37 +01001829void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001830{
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001831 bool vdd;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001832
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001833 if (!is_edp(intel_dp))
1834 return;
1835
Ville Syrjälä773538e82014-09-04 14:54:56 +03001836 pps_lock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001837 vdd = edp_panel_vdd_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001838 pps_unlock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001839
Rob Clarke2c719b2014-12-15 13:56:32 -05001840 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001841 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes5d613502011-01-24 17:10:54 -08001842}
1843
Daniel Vetter4be73782014-01-17 14:39:48 +01001844static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001845{
Paulo Zanoni30add222012-10-26 19:05:45 -02001846 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001847 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001848 struct intel_digital_port *intel_dig_port =
1849 dp_to_dig_port(intel_dp);
1850 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1851 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001852 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001853 u32 pp_stat_reg, pp_ctrl_reg;
Jesse Barnes5d613502011-01-24 17:10:54 -08001854
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001855 lockdep_assert_held(&dev_priv->pps_mutex);
Daniel Vettera0e99e62012-12-02 01:05:46 +01001856
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001857 WARN_ON(intel_dp->want_panel_vdd);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001858
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001859 if (!edp_have_panel_vdd(intel_dp))
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001860 return;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001861
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001862 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1863 port_name(intel_dig_port->port));
Jesse Barnes453c5422013-03-28 09:55:41 -07001864
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001865 pp = ironlake_get_pp_control(intel_dp);
1866 pp &= ~EDP_FORCE_VDD;
Jesse Barnes453c5422013-03-28 09:55:41 -07001867
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001868 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1869 pp_stat_reg = _pp_stat_reg(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001870
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001871 I915_WRITE(pp_ctrl_reg, pp);
1872 POSTING_READ(pp_ctrl_reg);
Paulo Zanoni90791a52013-12-06 17:32:42 -02001873
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001874 /* Make sure sequencer is idle before allowing subsequent activity */
1875 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1876 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001877
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001878 if ((pp & POWER_TARGET_ON) == 0)
1879 intel_dp->last_power_cycle = jiffies;
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001880
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001881 power_domain = intel_display_port_power_domain(intel_encoder);
1882 intel_display_power_put(dev_priv, power_domain);
Keith Packardbd943152011-09-18 23:09:52 -07001883}
1884
Daniel Vetter4be73782014-01-17 14:39:48 +01001885static void edp_panel_vdd_work(struct work_struct *__work)
Keith Packardbd943152011-09-18 23:09:52 -07001886{
1887 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1888 struct intel_dp, panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001889
Ville Syrjälä773538e82014-09-04 14:54:56 +03001890 pps_lock(intel_dp);
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001891 if (!intel_dp->want_panel_vdd)
1892 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001893 pps_unlock(intel_dp);
Keith Packardbd943152011-09-18 23:09:52 -07001894}
1895
Imre Deakaba86892014-07-30 15:57:31 +03001896static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1897{
1898 unsigned long delay;
1899
1900 /*
1901 * Queue the timer to fire a long time from now (relative to the power
1902 * down delay) to keep the panel power up across a sequence of
1903 * operations.
1904 */
1905 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1906 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1907}
1908
Ville Syrjälä951468f2014-09-04 14:55:31 +03001909/*
1910 * Must be paired with edp_panel_vdd_on().
1911 * Must hold pps_mutex around the whole on/off sequence.
1912 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1913 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001914static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
Keith Packardbd943152011-09-18 23:09:52 -07001915{
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001916 struct drm_i915_private *dev_priv =
1917 intel_dp_to_dev(intel_dp)->dev_private;
1918
1919 lockdep_assert_held(&dev_priv->pps_mutex);
1920
Keith Packard97af61f572011-09-28 16:23:51 -07001921 if (!is_edp(intel_dp))
1922 return;
Jesse Barnes5d613502011-01-24 17:10:54 -08001923
Rob Clarke2c719b2014-12-15 13:56:32 -05001924 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001925 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packardf2e8b182011-11-01 20:01:35 -07001926
Keith Packardbd943152011-09-18 23:09:52 -07001927 intel_dp->want_panel_vdd = false;
1928
Imre Deakaba86892014-07-30 15:57:31 +03001929 if (sync)
Daniel Vetter4be73782014-01-17 14:39:48 +01001930 edp_panel_vdd_off_sync(intel_dp);
Imre Deakaba86892014-07-30 15:57:31 +03001931 else
1932 edp_panel_vdd_schedule_off(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001933}
1934
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001935static void edp_panel_on(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07001936{
Paulo Zanoni30add222012-10-26 19:05:45 -02001937 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001938 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packard99ea7122011-11-01 19:57:50 -07001939 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001940 u32 pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07001941
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001942 lockdep_assert_held(&dev_priv->pps_mutex);
1943
Keith Packard97af61f572011-09-28 16:23:51 -07001944 if (!is_edp(intel_dp))
Keith Packardbd943152011-09-18 23:09:52 -07001945 return;
Keith Packard99ea7122011-11-01 19:57:50 -07001946
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001947 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1948 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packard99ea7122011-11-01 19:57:50 -07001949
Ville Syrjäläe7a89ac2014-10-16 21:30:07 +03001950 if (WARN(edp_have_panel_power(intel_dp),
1951 "eDP port %c panel power already on\n",
1952 port_name(dp_to_dig_port(intel_dp)->port)))
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001953 return;
Jesse Barnes9934c132010-07-22 13:18:19 -07001954
Daniel Vetter4be73782014-01-17 14:39:48 +01001955 wait_panel_power_cycle(intel_dp);
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07001956
Jani Nikulabf13e812013-09-06 07:40:05 +03001957 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001958 pp = ironlake_get_pp_control(intel_dp);
Keith Packard05ce1a42011-09-29 16:33:01 -07001959 if (IS_GEN5(dev)) {
1960 /* ILK workaround: disable reset around power sequence */
1961 pp &= ~PANEL_POWER_RESET;
Jani Nikulabf13e812013-09-06 07:40:05 +03001962 I915_WRITE(pp_ctrl_reg, pp);
1963 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07001964 }
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07001965
Keith Packard1c0ae802011-09-19 13:59:29 -07001966 pp |= POWER_TARGET_ON;
Keith Packard99ea7122011-11-01 19:57:50 -07001967 if (!IS_GEN5(dev))
1968 pp |= PANEL_POWER_RESET;
1969
Jesse Barnes453c5422013-03-28 09:55:41 -07001970 I915_WRITE(pp_ctrl_reg, pp);
1971 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07001972
Daniel Vetter4be73782014-01-17 14:39:48 +01001973 wait_panel_on(intel_dp);
Paulo Zanonidce56b32013-12-19 14:29:40 -02001974 intel_dp->last_power_on = jiffies;
Jesse Barnes9934c132010-07-22 13:18:19 -07001975
Keith Packard05ce1a42011-09-29 16:33:01 -07001976 if (IS_GEN5(dev)) {
1977 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
Jani Nikulabf13e812013-09-06 07:40:05 +03001978 I915_WRITE(pp_ctrl_reg, pp);
1979 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07001980 }
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001981}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001982
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001983void intel_edp_panel_on(struct intel_dp *intel_dp)
1984{
1985 if (!is_edp(intel_dp))
1986 return;
1987
1988 pps_lock(intel_dp);
1989 edp_panel_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001990 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001991}
1992
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001993
1994static void edp_panel_off(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07001995{
Imre Deak4e6e1a52014-03-27 17:45:11 +02001996 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1997 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanoni30add222012-10-26 19:05:45 -02001998 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001999 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02002000 enum intel_display_power_domain power_domain;
Keith Packard99ea7122011-11-01 19:57:50 -07002001 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002002 u32 pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07002003
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002004 lockdep_assert_held(&dev_priv->pps_mutex);
2005
Keith Packard97af61f572011-09-28 16:23:51 -07002006 if (!is_edp(intel_dp))
2007 return;
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002008
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002009 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2010 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002011
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002012 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2013 port_name(dp_to_dig_port(intel_dp)->port));
Jani Nikula24f3e092014-03-17 16:43:36 +02002014
Jesse Barnes453c5422013-03-28 09:55:41 -07002015 pp = ironlake_get_pp_control(intel_dp);
Daniel Vetter35a38552012-08-12 22:17:14 +02002016 /* We need to switch off panel power _and_ force vdd, for otherwise some
2017 * panels get very unhappy and cease to work. */
Patrik Jakobssonb3064152014-03-04 00:42:44 +01002018 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2019 EDP_BLC_ENABLE);
Jesse Barnes453c5422013-03-28 09:55:41 -07002020
Jani Nikulabf13e812013-09-06 07:40:05 +03002021 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002022
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002023 intel_dp->want_panel_vdd = false;
2024
Jesse Barnes453c5422013-03-28 09:55:41 -07002025 I915_WRITE(pp_ctrl_reg, pp);
2026 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07002027
Paulo Zanonidce56b32013-12-19 14:29:40 -02002028 intel_dp->last_power_cycle = jiffies;
Daniel Vetter4be73782014-01-17 14:39:48 +01002029 wait_panel_off(intel_dp);
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002030
2031 /* We got a reference when we enabled the VDD. */
Imre Deak4e6e1a52014-03-27 17:45:11 +02002032 power_domain = intel_display_port_power_domain(intel_encoder);
2033 intel_display_power_put(dev_priv, power_domain);
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002034}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002035
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002036void intel_edp_panel_off(struct intel_dp *intel_dp)
2037{
2038 if (!is_edp(intel_dp))
2039 return;
2040
2041 pps_lock(intel_dp);
2042 edp_panel_off(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002043 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002044}
2045
Jani Nikula1250d102014-08-12 17:11:39 +03002046/* Enable backlight in the panel power control. */
2047static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002048{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002049 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2050 struct drm_device *dev = intel_dig_port->base.base.dev;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002051 struct drm_i915_private *dev_priv = dev->dev_private;
2052 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002053 u32 pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002054
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07002055 /*
2056 * If we enable the backlight right away following a panel power
2057 * on, we may see slight flicker as the panel syncs with the eDP
2058 * link. So delay a bit to make sure the image is solid before
2059 * allowing it to appear.
2060 */
Daniel Vetter4be73782014-01-17 14:39:48 +01002061 wait_backlight_on(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002062
Ville Syrjälä773538e82014-09-04 14:54:56 +03002063 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002064
Jesse Barnes453c5422013-03-28 09:55:41 -07002065 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002066 pp |= EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002067
Jani Nikulabf13e812013-09-06 07:40:05 +03002068 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002069
2070 I915_WRITE(pp_ctrl_reg, pp);
2071 POSTING_READ(pp_ctrl_reg);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002072
Ville Syrjälä773538e82014-09-04 14:54:56 +03002073 pps_unlock(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002074}
2075
Jani Nikula1250d102014-08-12 17:11:39 +03002076/* Enable backlight PWM and backlight PP control. */
2077void intel_edp_backlight_on(struct intel_dp *intel_dp)
2078{
2079 if (!is_edp(intel_dp))
2080 return;
2081
2082 DRM_DEBUG_KMS("\n");
2083
2084 intel_panel_enable_backlight(intel_dp->attached_connector);
2085 _intel_edp_backlight_on(intel_dp);
2086}
2087
2088/* Disable backlight in the panel power control. */
2089static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002090{
Paulo Zanoni30add222012-10-26 19:05:45 -02002091 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002092 struct drm_i915_private *dev_priv = dev->dev_private;
2093 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002094 u32 pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002095
Keith Packardf01eca22011-09-28 16:48:10 -07002096 if (!is_edp(intel_dp))
2097 return;
2098
Ville Syrjälä773538e82014-09-04 14:54:56 +03002099 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002100
Jesse Barnes453c5422013-03-28 09:55:41 -07002101 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002102 pp &= ~EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002103
Jani Nikulabf13e812013-09-06 07:40:05 +03002104 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002105
2106 I915_WRITE(pp_ctrl_reg, pp);
2107 POSTING_READ(pp_ctrl_reg);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002108
Ville Syrjälä773538e82014-09-04 14:54:56 +03002109 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002110
Paulo Zanonidce56b32013-12-19 14:29:40 -02002111 intel_dp->last_backlight_off = jiffies;
Jesse Barnesf7d23232014-03-31 11:13:56 -07002112 edp_wait_backlight_off(intel_dp);
Jani Nikula1250d102014-08-12 17:11:39 +03002113}
Jesse Barnesf7d23232014-03-31 11:13:56 -07002114
Jani Nikula1250d102014-08-12 17:11:39 +03002115/* Disable backlight PP control and backlight PWM. */
2116void intel_edp_backlight_off(struct intel_dp *intel_dp)
2117{
2118 if (!is_edp(intel_dp))
2119 return;
2120
2121 DRM_DEBUG_KMS("\n");
2122
2123 _intel_edp_backlight_off(intel_dp);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002124 intel_panel_disable_backlight(intel_dp->attached_connector);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002125}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002126
Jani Nikula73580fb72014-08-12 17:11:41 +03002127/*
2128 * Hook for controlling the panel power control backlight through the bl_power
2129 * sysfs attribute. Take care to handle multiple calls.
2130 */
2131static void intel_edp_backlight_power(struct intel_connector *connector,
2132 bool enable)
2133{
2134 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002135 bool is_enabled;
2136
Ville Syrjälä773538e82014-09-04 14:54:56 +03002137 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002138 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002139 pps_unlock(intel_dp);
Jani Nikula73580fb72014-08-12 17:11:41 +03002140
2141 if (is_enabled == enable)
2142 return;
2143
Jani Nikula23ba9372014-08-27 14:08:43 +03002144 DRM_DEBUG_KMS("panel power control backlight %s\n",
2145 enable ? "enable" : "disable");
Jani Nikula73580fb72014-08-12 17:11:41 +03002146
2147 if (enable)
2148 _intel_edp_backlight_on(intel_dp);
2149 else
2150 _intel_edp_backlight_off(intel_dp);
2151}
2152
Ville Syrjälä64e10772015-10-29 21:26:01 +02002153static const char *state_string(bool enabled)
2154{
2155 return enabled ? "on" : "off";
2156}
2157
2158static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2159{
2160 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2161 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2162 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2163
2164 I915_STATE_WARN(cur_state != state,
2165 "DP port %c state assertion failure (expected %s, current %s)\n",
2166 port_name(dig_port->port),
2167 state_string(state), state_string(cur_state));
2168}
2169#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2170
2171static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2172{
2173 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2174
2175 I915_STATE_WARN(cur_state != state,
2176 "eDP PLL state assertion failure (expected %s, current %s)\n",
2177 state_string(state), state_string(cur_state));
2178}
2179#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2180#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2181
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002182static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002183{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002184 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä64e10772015-10-29 21:26:01 +02002185 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2186 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesd240f202010-08-13 15:43:26 -07002187
Ville Syrjälä64e10772015-10-29 21:26:01 +02002188 assert_pipe_disabled(dev_priv, crtc->pipe);
2189 assert_dp_port_disabled(intel_dp);
2190 assert_edp_pll_disabled(dev_priv);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002191
Ville Syrjäläabfce942015-10-29 21:26:03 +02002192 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2193 crtc->config->port_clock);
2194
2195 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2196
2197 if (crtc->config->port_clock == 162000)
2198 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2199 else
2200 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2201
2202 I915_WRITE(DP_A, intel_dp->DP);
2203 POSTING_READ(DP_A);
2204 udelay(500);
2205
Daniel Vetter07679352012-09-06 22:15:42 +02002206 intel_dp->DP |= DP_PLL_ENABLE;
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002207
Daniel Vetter07679352012-09-06 22:15:42 +02002208 I915_WRITE(DP_A, intel_dp->DP);
Jesse Barnes298b0b32010-10-07 16:01:24 -07002209 POSTING_READ(DP_A);
2210 udelay(200);
Jesse Barnesd240f202010-08-13 15:43:26 -07002211}
2212
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002213static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002214{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002215 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä64e10772015-10-29 21:26:01 +02002216 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2217 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesd240f202010-08-13 15:43:26 -07002218
Ville Syrjälä64e10772015-10-29 21:26:01 +02002219 assert_pipe_disabled(dev_priv, crtc->pipe);
2220 assert_dp_port_disabled(intel_dp);
2221 assert_edp_pll_enabled(dev_priv);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002222
Ville Syrjäläabfce942015-10-29 21:26:03 +02002223 DRM_DEBUG_KMS("disabling eDP PLL\n");
2224
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002225 intel_dp->DP &= ~DP_PLL_ENABLE;
Daniel Vetter07679352012-09-06 22:15:42 +02002226
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002227 I915_WRITE(DP_A, intel_dp->DP);
Chris Wilson1af5fa12010-09-08 21:07:28 +01002228 POSTING_READ(DP_A);
Jesse Barnesd240f202010-08-13 15:43:26 -07002229 udelay(200);
2230}
2231
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002232/* If the sink supports it, try to set the power state appropriately */
Paulo Zanonic19b0662012-10-15 15:51:41 -03002233void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002234{
2235 int ret, i;
2236
2237 /* Should have a valid DPCD by this point */
2238 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2239 return;
2240
2241 if (mode != DRM_MODE_DPMS_ON) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002242 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2243 DP_SET_POWER_D3);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002244 } else {
2245 /*
2246 * When turning on, we need to retry for 1ms to give the sink
2247 * time to wake up.
2248 */
2249 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002250 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2251 DP_SET_POWER_D0);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002252 if (ret == 1)
2253 break;
2254 msleep(1);
2255 }
2256 }
Jani Nikulaf9cac722014-09-02 16:33:52 +03002257
2258 if (ret != 1)
2259 DRM_DEBUG_KMS("failed to %s sink power state\n",
2260 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002261}
2262
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002263static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2264 enum pipe *pipe)
Jesse Barnesd240f202010-08-13 15:43:26 -07002265{
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002266 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03002267 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002268 struct drm_device *dev = encoder->base.dev;
2269 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak6d129be2014-03-05 16:20:54 +02002270 enum intel_display_power_domain power_domain;
2271 u32 tmp;
2272
2273 power_domain = intel_display_port_power_domain(encoder);
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002274 if (!intel_display_power_is_enabled(dev_priv, power_domain))
Imre Deak6d129be2014-03-05 16:20:54 +02002275 return false;
2276
2277 tmp = I915_READ(intel_dp->output_reg);
Jesse Barnesd240f202010-08-13 15:43:26 -07002278
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002279 if (!(tmp & DP_PORT_EN))
2280 return false;
2281
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002282 if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002283 *pipe = PORT_TO_PIPE_CPT(tmp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002284 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002285 enum pipe p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002286
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002287 for_each_pipe(dev_priv, p) {
2288 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2289 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2290 *pipe = p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002291 return true;
2292 }
2293 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002294
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002295 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2296 intel_dp->output_reg);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002297 } else if (IS_CHERRYVIEW(dev)) {
2298 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2299 } else {
2300 *pipe = PORT_TO_PIPE(tmp);
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002301 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002302
2303 return true;
2304}
2305
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002306static void intel_dp_get_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02002307 struct intel_crtc_state *pipe_config)
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002308{
2309 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002310 u32 tmp, flags = 0;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002311 struct drm_device *dev = encoder->base.dev;
2312 struct drm_i915_private *dev_priv = dev->dev_private;
2313 enum port port = dp_to_dig_port(intel_dp)->port;
2314 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjälä18442d02013-09-13 16:00:08 +03002315 int dotclock;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002316
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002317 tmp = I915_READ(intel_dp->output_reg);
Jani Nikula9fcb1702015-05-05 16:32:12 +03002318
2319 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002320
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002321 if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläb81e34c2015-07-06 15:10:03 +03002322 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2323
2324 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
Xiong Zhang63000ef2013-06-28 12:59:06 +08002325 flags |= DRM_MODE_FLAG_PHSYNC;
2326 else
2327 flags |= DRM_MODE_FLAG_NHSYNC;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002328
Ville Syrjäläb81e34c2015-07-06 15:10:03 +03002329 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
Xiong Zhang63000ef2013-06-28 12:59:06 +08002330 flags |= DRM_MODE_FLAG_PVSYNC;
2331 else
2332 flags |= DRM_MODE_FLAG_NVSYNC;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002333 } else {
2334 if (tmp & DP_SYNC_HS_HIGH)
2335 flags |= DRM_MODE_FLAG_PHSYNC;
2336 else
2337 flags |= DRM_MODE_FLAG_NHSYNC;
2338
2339 if (tmp & DP_SYNC_VS_HIGH)
2340 flags |= DRM_MODE_FLAG_PVSYNC;
2341 else
2342 flags |= DRM_MODE_FLAG_NVSYNC;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002343 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002344
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002345 pipe_config->base.adjusted_mode.flags |= flags;
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002346
Ville Syrjälä8c875fc2014-09-12 15:46:29 +03002347 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2348 tmp & DP_COLOR_RANGE_16_235)
2349 pipe_config->limited_color_range = true;
2350
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002351 pipe_config->has_dp_encoder = true;
2352
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03002353 pipe_config->lane_count =
2354 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2355
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002356 intel_dp_get_m_n(crtc, pipe_config);
2357
Ville Syrjälä18442d02013-09-13 16:00:08 +03002358 if (port == PORT_A) {
Ville Syrjäläb377e0d2015-10-29 21:25:59 +02002359 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002360 pipe_config->port_clock = 162000;
2361 else
2362 pipe_config->port_clock = 270000;
2363 }
Ville Syrjälä18442d02013-09-13 16:00:08 +03002364
2365 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2366 &pipe_config->dp_m_n);
2367
2368 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2369 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2370
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002371 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
Daniel Vetter7f16e5c2013-11-04 16:28:47 +01002372
Jani Nikulac6cd2ee2013-10-21 10:52:07 +03002373 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2374 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2375 /*
2376 * This is a big fat ugly hack.
2377 *
2378 * Some machines in UEFI boot mode provide us a VBT that has 18
2379 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2380 * unknown we fail to light up. Yet the same BIOS boots up with
2381 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2382 * max, not what it tells us to use.
2383 *
2384 * Note: This will still be broken if the eDP panel is not lit
2385 * up by the BIOS, and thus we can't get the mode at module
2386 * load.
2387 */
2388 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2389 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2390 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2391 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002392}
2393
Daniel Vettere8cb4552012-07-01 13:05:48 +02002394static void intel_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002395{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002396 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002397 struct drm_device *dev = encoder->base.dev;
Jani Nikula495a5bb2014-10-27 16:26:55 +02002398 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2399
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002400 if (crtc->config->has_audio)
Jani Nikula495a5bb2014-10-27 16:26:55 +02002401 intel_audio_codec_disable(encoder);
Daniel Vetter6cb49832012-05-20 17:14:50 +02002402
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002403 if (HAS_PSR(dev) && !HAS_DDI(dev))
2404 intel_psr_disable(intel_dp);
2405
Daniel Vetter6cb49832012-05-20 17:14:50 +02002406 /* Make sure the panel is off before trying to change the mode. But also
2407 * ensure that we have vdd while we switch off the panel. */
Jani Nikula24f3e092014-03-17 16:43:36 +02002408 intel_edp_panel_vdd_on(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01002409 intel_edp_backlight_off(intel_dp);
Jani Nikulafdbc3b12013-11-12 17:10:13 +02002410 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
Daniel Vetter4be73782014-01-17 14:39:48 +01002411 intel_edp_panel_off(intel_dp);
Daniel Vetter37398502012-09-06 22:15:44 +02002412
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002413 /* disable the port before the pipe on g4x */
2414 if (INTEL_INFO(dev)->gen < 5)
Daniel Vetter37398502012-09-06 22:15:44 +02002415 intel_dp_link_down(intel_dp);
Jesse Barnesd240f202010-08-13 15:43:26 -07002416}
2417
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002418static void ilk_post_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002419{
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002420 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002421 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002422
Ville Syrjälä49277c32014-03-31 18:21:26 +03002423 intel_dp_link_down(intel_dp);
Ville Syrjäläabfce942015-10-29 21:26:03 +02002424
2425 /* Only ilk+ has port A */
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002426 if (port == PORT_A)
2427 ironlake_edp_pll_off(intel_dp);
Ville Syrjälä49277c32014-03-31 18:21:26 +03002428}
2429
2430static void vlv_post_disable_dp(struct intel_encoder *encoder)
2431{
2432 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2433
2434 intel_dp_link_down(intel_dp);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002435}
2436
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002437static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2438 bool reset)
2439{
2440 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2441 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2442 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2443 enum pipe pipe = crtc->pipe;
2444 uint32_t val;
2445
2446 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2447 if (reset)
2448 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2449 else
2450 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2451 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2452
2453 if (crtc->config->lane_count > 2) {
2454 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2455 if (reset)
2456 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2457 else
2458 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2459 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2460 }
2461
2462 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2463 val |= CHV_PCS_REQ_SOFTRESET_EN;
2464 if (reset)
2465 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2466 else
2467 val |= DPIO_PCS_CLK_SOFT_RESET;
2468 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2469
2470 if (crtc->config->lane_count > 2) {
2471 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2472 val |= CHV_PCS_REQ_SOFTRESET_EN;
2473 if (reset)
2474 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2475 else
2476 val |= DPIO_PCS_CLK_SOFT_RESET;
2477 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2478 }
2479}
2480
Ville Syrjälä580d3812014-04-09 13:29:00 +03002481static void chv_post_disable_dp(struct intel_encoder *encoder)
2482{
2483 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002484 struct drm_device *dev = encoder->base.dev;
2485 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä580d3812014-04-09 13:29:00 +03002486
2487 intel_dp_link_down(intel_dp);
2488
Ville Syrjäläa5805162015-05-26 20:42:30 +03002489 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002490
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002491 /* Assert data lane reset */
2492 chv_data_lane_soft_reset(encoder, true);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002493
Ville Syrjäläa5805162015-05-26 20:42:30 +03002494 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002495}
2496
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002497static void
2498_intel_dp_set_link_train(struct intel_dp *intel_dp,
2499 uint32_t *DP,
2500 uint8_t dp_train_pat)
2501{
2502 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2503 struct drm_device *dev = intel_dig_port->base.base.dev;
2504 struct drm_i915_private *dev_priv = dev->dev_private;
2505 enum port port = intel_dig_port->port;
2506
2507 if (HAS_DDI(dev)) {
2508 uint32_t temp = I915_READ(DP_TP_CTL(port));
2509
2510 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2511 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2512 else
2513 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2514
2515 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2516 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2517 case DP_TRAINING_PATTERN_DISABLE:
2518 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2519
2520 break;
2521 case DP_TRAINING_PATTERN_1:
2522 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2523 break;
2524 case DP_TRAINING_PATTERN_2:
2525 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2526 break;
2527 case DP_TRAINING_PATTERN_3:
2528 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2529 break;
2530 }
2531 I915_WRITE(DP_TP_CTL(port), temp);
2532
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002533 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2534 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002535 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2536
2537 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2538 case DP_TRAINING_PATTERN_DISABLE:
2539 *DP |= DP_LINK_TRAIN_OFF_CPT;
2540 break;
2541 case DP_TRAINING_PATTERN_1:
2542 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2543 break;
2544 case DP_TRAINING_PATTERN_2:
2545 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2546 break;
2547 case DP_TRAINING_PATTERN_3:
2548 DRM_ERROR("DP training pattern 3 not supported\n");
2549 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2550 break;
2551 }
2552
2553 } else {
2554 if (IS_CHERRYVIEW(dev))
2555 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2556 else
2557 *DP &= ~DP_LINK_TRAIN_MASK;
2558
2559 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2560 case DP_TRAINING_PATTERN_DISABLE:
2561 *DP |= DP_LINK_TRAIN_OFF;
2562 break;
2563 case DP_TRAINING_PATTERN_1:
2564 *DP |= DP_LINK_TRAIN_PAT_1;
2565 break;
2566 case DP_TRAINING_PATTERN_2:
2567 *DP |= DP_LINK_TRAIN_PAT_2;
2568 break;
2569 case DP_TRAINING_PATTERN_3:
2570 if (IS_CHERRYVIEW(dev)) {
2571 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2572 } else {
2573 DRM_ERROR("DP training pattern 3 not supported\n");
2574 *DP |= DP_LINK_TRAIN_PAT_2;
2575 }
2576 break;
2577 }
2578 }
2579}
2580
2581static void intel_dp_enable_port(struct intel_dp *intel_dp)
2582{
2583 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2584 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002585 struct intel_crtc *crtc =
2586 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002587
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002588 /* enable with pattern 1 (as per spec) */
2589 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2590 DP_TRAINING_PATTERN_1);
2591
2592 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2593 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b713f52014-10-16 21:27:35 +03002594
2595 /*
2596 * Magic for VLV/CHV. We _must_ first set up the register
2597 * without actually enabling the port, and then do another
2598 * write to enable the port. Otherwise link training will
2599 * fail when the power sequencer is freshly used for this port.
2600 */
2601 intel_dp->DP |= DP_PORT_EN;
Ville Syrjälä6fec7662015-11-10 16:16:17 +02002602 if (crtc->config->has_audio)
2603 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
Ville Syrjälä7b713f52014-10-16 21:27:35 +03002604
2605 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2606 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002607}
2608
Daniel Vettere8cb4552012-07-01 13:05:48 +02002609static void intel_enable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002610{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002611 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2612 struct drm_device *dev = encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002613 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulac1dec792014-10-27 16:26:56 +02002614 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Chris Wilsonea5b2132010-08-04 13:50:23 +01002615 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002616 enum port port = dp_to_dig_port(intel_dp)->port;
2617 enum pipe pipe = crtc->pipe;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002618
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02002619 if (WARN_ON(dp_reg & DP_PORT_EN))
2620 return;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002621
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002622 pps_lock(intel_dp);
2623
2624 if (IS_VALLEYVIEW(dev))
2625 vlv_init_panel_power_sequencer(intel_dp);
2626
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002627 intel_dp_enable_port(intel_dp);
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002628
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002629 if (port == PORT_A && IS_GEN5(dev_priv)) {
2630 /*
2631 * Underrun reporting for the other pipe was disabled in
2632 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2633 * enabled, so it's now safe to re-enable underrun reporting.
2634 */
2635 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2636 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2637 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2638 }
2639
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002640 edp_panel_vdd_on(intel_dp);
2641 edp_panel_on(intel_dp);
2642 edp_panel_vdd_off(intel_dp, true);
2643
2644 pps_unlock(intel_dp);
2645
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002646 if (IS_VALLEYVIEW(dev)) {
2647 unsigned int lane_mask = 0x0;
2648
2649 if (IS_CHERRYVIEW(dev))
2650 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2651
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03002652 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2653 lane_mask);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002654 }
Ville Syrjälä61234fa2014-10-16 21:27:34 +03002655
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002656 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2657 intel_dp_start_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03002658 intel_dp_stop_link_train(intel_dp);
Jani Nikulac1dec792014-10-27 16:26:56 +02002659
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002660 if (crtc->config->has_audio) {
Jani Nikulac1dec792014-10-27 16:26:56 +02002661 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002662 pipe_name(pipe));
Jani Nikulac1dec792014-10-27 16:26:56 +02002663 intel_audio_codec_enable(encoder);
2664 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002665}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002666
Jani Nikulaecff4f32013-09-06 07:38:29 +03002667static void g4x_enable_dp(struct intel_encoder *encoder)
2668{
Jani Nikula828f5c62013-09-05 16:44:45 +03002669 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2670
Jani Nikulaecff4f32013-09-06 07:38:29 +03002671 intel_enable_dp(encoder);
Daniel Vetter4be73782014-01-17 14:39:48 +01002672 intel_edp_backlight_on(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002673}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002674
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002675static void vlv_enable_dp(struct intel_encoder *encoder)
2676{
Jani Nikula828f5c62013-09-05 16:44:45 +03002677 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2678
Daniel Vetter4be73782014-01-17 14:39:48 +01002679 intel_edp_backlight_on(intel_dp);
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002680 intel_psr_enable(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002681}
2682
Jani Nikulaecff4f32013-09-06 07:38:29 +03002683static void g4x_pre_enable_dp(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002684{
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002685 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002686 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002687 enum port port = dp_to_dig_port(intel_dp)->port;
2688 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002689
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002690 intel_dp_prepare(encoder);
2691
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002692 if (port == PORT_A && IS_GEN5(dev_priv)) {
2693 /*
2694 * We get FIFO underruns on the other pipe when
2695 * enabling the CPU eDP PLL, and when enabling CPU
2696 * eDP port. We could potentially avoid the PLL
2697 * underrun with a vblank wait just prior to enabling
2698 * the PLL, but that doesn't appear to help the port
2699 * enable case. Just sweep it all under the rug.
2700 */
2701 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2702 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2703 }
2704
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002705 /* Only ilk+ has port A */
Ville Syrjäläabfce942015-10-29 21:26:03 +02002706 if (port == PORT_A)
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002707 ironlake_edp_pll_on(intel_dp);
2708}
2709
Ville Syrjälä83b84592014-10-16 21:29:51 +03002710static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2711{
2712 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2713 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2714 enum pipe pipe = intel_dp->pps_pipe;
2715 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2716
2717 edp_panel_vdd_off_sync(intel_dp);
2718
2719 /*
2720 * VLV seems to get confused when multiple power seqeuencers
2721 * have the same port selected (even if only one has power/vdd
2722 * enabled). The failure manifests as vlv_wait_port_ready() failing
2723 * CHV on the other hand doesn't seem to mind having the same port
2724 * selected in multiple power seqeuencers, but let's clear the
2725 * port select always when logically disconnecting a power sequencer
2726 * from a port.
2727 */
2728 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2729 pipe_name(pipe), port_name(intel_dig_port->port));
2730 I915_WRITE(pp_on_reg, 0);
2731 POSTING_READ(pp_on_reg);
2732
2733 intel_dp->pps_pipe = INVALID_PIPE;
2734}
2735
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002736static void vlv_steal_power_sequencer(struct drm_device *dev,
2737 enum pipe pipe)
2738{
2739 struct drm_i915_private *dev_priv = dev->dev_private;
2740 struct intel_encoder *encoder;
2741
2742 lockdep_assert_held(&dev_priv->pps_mutex);
2743
Ville Syrjäläac3c12e2014-10-16 21:29:56 +03002744 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2745 return;
2746
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002747 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2748 base.head) {
2749 struct intel_dp *intel_dp;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002750 enum port port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002751
2752 if (encoder->type != INTEL_OUTPUT_EDP)
2753 continue;
2754
2755 intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002756 port = dp_to_dig_port(intel_dp)->port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002757
2758 if (intel_dp->pps_pipe != pipe)
2759 continue;
2760
2761 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
Ville Syrjälä773538e82014-09-04 14:54:56 +03002762 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002763
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02002764 WARN(encoder->base.crtc,
Ville Syrjälä034e43c2014-10-16 21:27:28 +03002765 "stealing pipe %c power sequencer from active eDP port %c\n",
2766 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002767
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002768 /* make sure vdd is off before we steal it */
Ville Syrjälä83b84592014-10-16 21:29:51 +03002769 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002770 }
2771}
2772
2773static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2774{
2775 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2776 struct intel_encoder *encoder = &intel_dig_port->base;
2777 struct drm_device *dev = encoder->base.dev;
2778 struct drm_i915_private *dev_priv = dev->dev_private;
2779 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002780
2781 lockdep_assert_held(&dev_priv->pps_mutex);
2782
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002783 if (!is_edp(intel_dp))
2784 return;
2785
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002786 if (intel_dp->pps_pipe == crtc->pipe)
2787 return;
2788
2789 /*
2790 * If another power sequencer was being used on this
2791 * port previously make sure to turn off vdd there while
2792 * we still have control of it.
2793 */
2794 if (intel_dp->pps_pipe != INVALID_PIPE)
Ville Syrjälä83b84592014-10-16 21:29:51 +03002795 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002796
2797 /*
2798 * We may be stealing the power
2799 * sequencer from another port.
2800 */
2801 vlv_steal_power_sequencer(dev, crtc->pipe);
2802
2803 /* now it's all ours */
2804 intel_dp->pps_pipe = crtc->pipe;
2805
2806 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2807 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2808
2809 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03002810 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2811 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002812}
2813
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002814static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2815{
2816 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2817 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Jesse Barnesb2634012013-03-28 09:55:40 -07002818 struct drm_device *dev = encoder->base.dev;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002819 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002820 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002821 enum dpio_channel port = vlv_dport_to_channel(dport);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002822 int pipe = intel_crtc->pipe;
2823 u32 val;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002824
Ville Syrjäläa5805162015-05-26 20:42:30 +03002825 mutex_lock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002826
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002827 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002828 val = 0;
2829 if (pipe)
2830 val |= (1<<21);
2831 else
2832 val &= ~(1<<21);
2833 val |= 0x001000c4;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002834 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2835 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2836 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002837
Ville Syrjäläa5805162015-05-26 20:42:30 +03002838 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002839
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002840 intel_enable_dp(encoder);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002841}
2842
Jani Nikulaecff4f32013-09-06 07:38:29 +03002843static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
Jesse Barnes89b667f2013-04-18 14:51:36 -07002844{
2845 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2846 struct drm_device *dev = encoder->base.dev;
2847 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002848 struct intel_crtc *intel_crtc =
2849 to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002850 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002851 int pipe = intel_crtc->pipe;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002852
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002853 intel_dp_prepare(encoder);
2854
Jesse Barnes89b667f2013-04-18 14:51:36 -07002855 /* Program Tx lane resets to default */
Ville Syrjäläa5805162015-05-26 20:42:30 +03002856 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002857 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002858 DPIO_PCS_TX_LANE2_RESET |
2859 DPIO_PCS_TX_LANE1_RESET);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002860 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002861 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2862 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2863 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2864 DPIO_PCS_CLK_SOFT_RESET);
2865
2866 /* Fix up inter-pair skew failure */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002867 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2868 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2869 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03002870 mutex_unlock(&dev_priv->sb_lock);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002871}
2872
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002873static void chv_pre_enable_dp(struct intel_encoder *encoder)
2874{
2875 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2876 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2877 struct drm_device *dev = encoder->base.dev;
2878 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002879 struct intel_crtc *intel_crtc =
2880 to_intel_crtc(encoder->base.crtc);
2881 enum dpio_channel ch = vlv_dport_to_channel(dport);
2882 int pipe = intel_crtc->pipe;
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002883 int data, i, stagger;
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002884 u32 val;
2885
Ville Syrjäläa5805162015-05-26 20:42:30 +03002886 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002887
Ville Syrjälä570e2a72014-08-18 14:42:46 +03002888 /* allow hardware to manage TX FIFO reset source */
2889 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2890 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2891 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2892
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002893 if (intel_crtc->config->lane_count > 2) {
2894 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2895 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2896 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2897 }
Ville Syrjälä570e2a72014-08-18 14:42:46 +03002898
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002899 /* Program Tx lane latency optimal setting*/
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002900 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002901 /* Set the upar bit */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002902 if (intel_crtc->config->lane_count == 1)
2903 data = 0x0;
2904 else
2905 data = (i == 1) ? 0x0 : 0x1;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002906 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2907 data << DPIO_UPAR_SHIFT);
2908 }
2909
2910 /* Data lane stagger programming */
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002911 if (intel_crtc->config->port_clock > 270000)
2912 stagger = 0x18;
2913 else if (intel_crtc->config->port_clock > 135000)
2914 stagger = 0xd;
2915 else if (intel_crtc->config->port_clock > 67500)
2916 stagger = 0x7;
2917 else if (intel_crtc->config->port_clock > 33750)
2918 stagger = 0x4;
2919 else
2920 stagger = 0x2;
2921
2922 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2923 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2924 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2925
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002926 if (intel_crtc->config->lane_count > 2) {
2927 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2928 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2929 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2930 }
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002931
2932 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2933 DPIO_LANESTAGGER_STRAP(stagger) |
2934 DPIO_LANESTAGGER_STRAP_OVRD |
2935 DPIO_TX1_STAGGER_MASK(0x1f) |
2936 DPIO_TX1_STAGGER_MULT(6) |
2937 DPIO_TX2_STAGGER_MULT(0));
2938
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002939 if (intel_crtc->config->lane_count > 2) {
2940 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2941 DPIO_LANESTAGGER_STRAP(stagger) |
2942 DPIO_LANESTAGGER_STRAP_OVRD |
2943 DPIO_TX1_STAGGER_MASK(0x1f) |
2944 DPIO_TX1_STAGGER_MULT(7) |
2945 DPIO_TX2_STAGGER_MULT(5));
2946 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002947
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002948 /* Deassert data lane reset */
2949 chv_data_lane_soft_reset(encoder, false);
2950
Ville Syrjäläa5805162015-05-26 20:42:30 +03002951 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002952
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002953 intel_enable_dp(encoder);
Ville Syrjäläb0b33842015-07-08 23:45:55 +03002954
2955 /* Second common lane will stay alive on its own now */
2956 if (dport->release_cl2_override) {
2957 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2958 dport->release_cl2_override = false;
2959 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002960}
2961
Ville Syrjälä9197c882014-04-09 13:29:05 +03002962static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2963{
2964 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2965 struct drm_device *dev = encoder->base.dev;
2966 struct drm_i915_private *dev_priv = dev->dev_private;
2967 struct intel_crtc *intel_crtc =
2968 to_intel_crtc(encoder->base.crtc);
2969 enum dpio_channel ch = vlv_dport_to_channel(dport);
2970 enum pipe pipe = intel_crtc->pipe;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002971 unsigned int lane_mask =
2972 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002973 u32 val;
2974
Ville Syrjälä625695f2014-06-28 02:04:02 +03002975 intel_dp_prepare(encoder);
2976
Ville Syrjäläb0b33842015-07-08 23:45:55 +03002977 /*
2978 * Must trick the second common lane into life.
2979 * Otherwise we can't even access the PLL.
2980 */
2981 if (ch == DPIO_CH0 && pipe == PIPE_B)
2982 dport->release_cl2_override =
2983 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2984
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002985 chv_phy_powergate_lanes(encoder, true, lane_mask);
2986
Ville Syrjäläa5805162015-05-26 20:42:30 +03002987 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002988
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002989 /* Assert data lane reset */
2990 chv_data_lane_soft_reset(encoder, true);
2991
Ville Syrjäläb9e5ac32014-05-27 16:30:18 +03002992 /* program left/right clock distribution */
2993 if (pipe != PIPE_B) {
2994 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2995 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2996 if (ch == DPIO_CH0)
2997 val |= CHV_BUFLEFTENA1_FORCE;
2998 if (ch == DPIO_CH1)
2999 val |= CHV_BUFRIGHTENA1_FORCE;
3000 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3001 } else {
3002 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3003 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3004 if (ch == DPIO_CH0)
3005 val |= CHV_BUFLEFTENA2_FORCE;
3006 if (ch == DPIO_CH1)
3007 val |= CHV_BUFRIGHTENA2_FORCE;
3008 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3009 }
3010
Ville Syrjälä9197c882014-04-09 13:29:05 +03003011 /* program clock channel usage */
3012 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3013 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3014 if (pipe != PIPE_B)
3015 val &= ~CHV_PCS_USEDCLKCHANNEL;
3016 else
3017 val |= CHV_PCS_USEDCLKCHANNEL;
3018 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3019
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003020 if (intel_crtc->config->lane_count > 2) {
3021 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3022 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3023 if (pipe != PIPE_B)
3024 val &= ~CHV_PCS_USEDCLKCHANNEL;
3025 else
3026 val |= CHV_PCS_USEDCLKCHANNEL;
3027 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3028 }
Ville Syrjälä9197c882014-04-09 13:29:05 +03003029
3030 /*
3031 * This a a bit weird since generally CL
3032 * matches the pipe, but here we need to
3033 * pick the CL based on the port.
3034 */
3035 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3036 if (pipe != PIPE_B)
3037 val &= ~CHV_CMN_USEDCLKCHANNEL;
3038 else
3039 val |= CHV_CMN_USEDCLKCHANNEL;
3040 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3041
Ville Syrjäläa5805162015-05-26 20:42:30 +03003042 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03003043}
3044
Ville Syrjäläd6db9952015-07-08 23:45:49 +03003045static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3046{
3047 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3048 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3049 u32 val;
3050
3051 mutex_lock(&dev_priv->sb_lock);
3052
3053 /* disable left/right clock distribution */
3054 if (pipe != PIPE_B) {
3055 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3056 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3057 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3058 } else {
3059 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3060 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3061 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3062 }
3063
3064 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003065
Ville Syrjäläb0b33842015-07-08 23:45:55 +03003066 /*
3067 * Leave the power down bit cleared for at least one
3068 * lane so that chv_powergate_phy_ch() will power
3069 * on something when the channel is otherwise unused.
3070 * When the port is off and the override is removed
3071 * the lanes power down anyway, so otherwise it doesn't
3072 * really matter what the state of power down bits is
3073 * after this.
3074 */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003075 chv_phy_powergate_lanes(encoder, false, 0x0);
Ville Syrjäläd6db9952015-07-08 23:45:49 +03003076}
3077
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003078/*
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003079 * Native read with retry for link status and receiver capability reads for
3080 * cases where the sink may still be asleep.
Jani Nikula9d1a1032014-03-14 16:51:15 +02003081 *
3082 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3083 * supposed to retry 3 times per the spec.
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003084 */
Jani Nikula9d1a1032014-03-14 16:51:15 +02003085static ssize_t
3086intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3087 void *buffer, size_t size)
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003088{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003089 ssize_t ret;
3090 int i;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003091
Ville Syrjäläf6a19062014-10-16 20:46:09 +03003092 /*
3093 * Sometime we just get the same incorrect byte repeated
3094 * over the entire buffer. Doing just one throw away read
3095 * initially seems to "solve" it.
3096 */
3097 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3098
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003099 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003100 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3101 if (ret == size)
3102 return ret;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003103 msleep(1);
3104 }
3105
Jani Nikula9d1a1032014-03-14 16:51:15 +02003106 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003107}
3108
3109/*
3110 * Fetch AUX CH registers 0x202 - 0x207 which contain
3111 * link status information
3112 */
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003113bool
Keith Packard93f62da2011-11-01 19:45:03 -07003114intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003115{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003116 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3117 DP_LANE0_1_STATUS,
3118 link_status,
3119 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003120}
3121
Paulo Zanoni11002442014-06-13 18:45:41 -03003122/* These are source-specific values. */
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003123uint8_t
Keith Packard1a2eb462011-11-16 16:26:07 -08003124intel_dp_voltage_max(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003125{
Paulo Zanoni30add222012-10-26 19:05:45 -02003126 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303127 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003128 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003129
Vandana Kannan93147262014-11-18 15:45:29 +05303130 if (IS_BROXTON(dev))
3131 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3132 else if (INTEL_INFO(dev)->gen >= 9) {
Sonika Jindal9e458032015-05-06 17:35:48 +05303133 if (dev_priv->edp_low_vswing && port == PORT_A)
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303134 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003135 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303136 } else if (IS_VALLEYVIEW(dev))
Sonika Jindalbd600182014-08-08 16:23:41 +05303137 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003138 else if (IS_GEN7(dev) && port == PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05303139 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003140 else if (HAS_PCH_CPT(dev) && port != PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05303141 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Keith Packard1a2eb462011-11-16 16:26:07 -08003142 else
Sonika Jindalbd600182014-08-08 16:23:41 +05303143 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Keith Packard1a2eb462011-11-16 16:26:07 -08003144}
3145
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003146uint8_t
Keith Packard1a2eb462011-11-16 16:26:07 -08003147intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3148{
Paulo Zanoni30add222012-10-26 19:05:45 -02003149 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003150 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003151
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003152 if (INTEL_INFO(dev)->gen >= 9) {
3153 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3154 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3155 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3156 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3157 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3158 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3159 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303160 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3161 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003162 default:
3163 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3164 }
3165 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003166 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303167 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3168 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3169 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3170 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3171 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3172 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3173 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003174 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303175 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003176 }
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003177 } else if (IS_VALLEYVIEW(dev)) {
3178 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303179 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3180 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3181 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3182 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3183 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3184 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3185 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003186 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303187 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003188 }
Imre Deakbc7d38a2013-05-16 14:40:36 +03003189 } else if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08003190 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303191 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3192 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3193 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3194 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3195 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Keith Packard1a2eb462011-11-16 16:26:07 -08003196 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303197 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003198 }
3199 } else {
3200 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303201 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3202 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3203 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3204 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3205 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3206 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3207 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packard1a2eb462011-11-16 16:26:07 -08003208 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303209 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003210 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003211 }
3212}
3213
Daniel Vetter5829975c2015-04-16 11:36:52 +02003214static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003215{
3216 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3217 struct drm_i915_private *dev_priv = dev->dev_private;
3218 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003219 struct intel_crtc *intel_crtc =
3220 to_intel_crtc(dport->base.base.crtc);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003221 unsigned long demph_reg_value, preemph_reg_value,
3222 uniqtranscale_reg_value;
3223 uint8_t train_set = intel_dp->train_set[0];
Chon Ming Leee4607fc2013-11-06 14:36:35 +08003224 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003225 int pipe = intel_crtc->pipe;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003226
3227 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303228 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003229 preemph_reg_value = 0x0004000;
3230 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303231 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003232 demph_reg_value = 0x2B405555;
3233 uniqtranscale_reg_value = 0x552AB83A;
3234 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303235 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003236 demph_reg_value = 0x2B404040;
3237 uniqtranscale_reg_value = 0x5548B83A;
3238 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303239 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003240 demph_reg_value = 0x2B245555;
3241 uniqtranscale_reg_value = 0x5560B83A;
3242 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303243 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003244 demph_reg_value = 0x2B405555;
3245 uniqtranscale_reg_value = 0x5598DA3A;
3246 break;
3247 default:
3248 return 0;
3249 }
3250 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303251 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003252 preemph_reg_value = 0x0002000;
3253 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303254 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003255 demph_reg_value = 0x2B404040;
3256 uniqtranscale_reg_value = 0x5552B83A;
3257 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303258 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003259 demph_reg_value = 0x2B404848;
3260 uniqtranscale_reg_value = 0x5580B83A;
3261 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303262 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003263 demph_reg_value = 0x2B404040;
3264 uniqtranscale_reg_value = 0x55ADDA3A;
3265 break;
3266 default:
3267 return 0;
3268 }
3269 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303270 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003271 preemph_reg_value = 0x0000000;
3272 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303273 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003274 demph_reg_value = 0x2B305555;
3275 uniqtranscale_reg_value = 0x5570B83A;
3276 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003278 demph_reg_value = 0x2B2B4040;
3279 uniqtranscale_reg_value = 0x55ADDA3A;
3280 break;
3281 default:
3282 return 0;
3283 }
3284 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303285 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003286 preemph_reg_value = 0x0006000;
3287 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303288 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003289 demph_reg_value = 0x1B405555;
3290 uniqtranscale_reg_value = 0x55ADDA3A;
3291 break;
3292 default:
3293 return 0;
3294 }
3295 break;
3296 default:
3297 return 0;
3298 }
3299
Ville Syrjäläa5805162015-05-26 20:42:30 +03003300 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003301 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3302 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3303 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003304 uniqtranscale_reg_value);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003305 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3306 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3307 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3308 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03003309 mutex_unlock(&dev_priv->sb_lock);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003310
3311 return 0;
3312}
3313
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003314static bool chv_need_uniq_trans_scale(uint8_t train_set)
3315{
3316 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3317 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3318}
3319
Daniel Vetter5829975c2015-04-16 11:36:52 +02003320static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003321{
3322 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3323 struct drm_i915_private *dev_priv = dev->dev_private;
3324 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3325 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003326 u32 deemph_reg_value, margin_reg_value, val;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003327 uint8_t train_set = intel_dp->train_set[0];
3328 enum dpio_channel ch = vlv_dport_to_channel(dport);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003329 enum pipe pipe = intel_crtc->pipe;
3330 int i;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003331
3332 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303333 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003334 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303335 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003336 deemph_reg_value = 128;
3337 margin_reg_value = 52;
3338 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003340 deemph_reg_value = 128;
3341 margin_reg_value = 77;
3342 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003344 deemph_reg_value = 128;
3345 margin_reg_value = 102;
3346 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003348 deemph_reg_value = 128;
3349 margin_reg_value = 154;
3350 /* FIXME extra to set for 1200 */
3351 break;
3352 default:
3353 return 0;
3354 }
3355 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303356 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003357 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303358 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003359 deemph_reg_value = 85;
3360 margin_reg_value = 78;
3361 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303362 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003363 deemph_reg_value = 85;
3364 margin_reg_value = 116;
3365 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303366 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003367 deemph_reg_value = 85;
3368 margin_reg_value = 154;
3369 break;
3370 default:
3371 return 0;
3372 }
3373 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303374 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003375 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303376 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003377 deemph_reg_value = 64;
3378 margin_reg_value = 104;
3379 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303380 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003381 deemph_reg_value = 64;
3382 margin_reg_value = 154;
3383 break;
3384 default:
3385 return 0;
3386 }
3387 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303388 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003389 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303390 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003391 deemph_reg_value = 43;
3392 margin_reg_value = 154;
3393 break;
3394 default:
3395 return 0;
3396 }
3397 break;
3398 default:
3399 return 0;
3400 }
3401
Ville Syrjäläa5805162015-05-26 20:42:30 +03003402 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003403
3404 /* Clear calc init */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003405 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3406 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003407 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3408 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
Ville Syrjälä1966e592014-04-09 13:29:04 +03003409 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3410
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003411 if (intel_crtc->config->lane_count > 2) {
3412 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3413 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3414 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3415 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3416 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3417 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003418
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003419 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3420 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3421 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3422 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3423
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003424 if (intel_crtc->config->lane_count > 2) {
3425 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3426 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3427 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3428 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3429 }
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003430
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003431 /* Program swing deemph */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003432 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003433 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3434 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3435 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3436 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3437 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003438
3439 /* Program swing margin */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003440 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003441 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003442
Ville Syrjälä1fb44502014-06-28 02:04:03 +03003443 val &= ~DPIO_SWING_MARGIN000_MASK;
3444 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003445
3446 /*
3447 * Supposedly this value shouldn't matter when unique transition
3448 * scale is disabled, but in fact it does matter. Let's just
3449 * always program the same value and hope it's OK.
3450 */
3451 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3452 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3453
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003454 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3455 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003456
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003457 /*
3458 * The document said it needs to set bit 27 for ch0 and bit 26
3459 * for ch1. Might be a typo in the doc.
3460 * For now, for this unique transition scale selection, set bit
3461 * 27 for ch0 and ch1.
3462 */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003463 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003464 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003465 if (chv_need_uniq_trans_scale(train_set))
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003466 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003467 else
3468 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3469 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003470 }
3471
3472 /* Start swing calculation */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003473 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3474 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3475 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3476
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003477 if (intel_crtc->config->lane_count > 2) {
3478 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3479 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3480 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3481 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003482
Ville Syrjäläa5805162015-05-26 20:42:30 +03003483 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003484
3485 return 0;
3486}
3487
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003488static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003489gen4_signal_levels(uint8_t train_set)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003490{
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003491 uint32_t signal_levels = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003492
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003493 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303494 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003495 default:
3496 signal_levels |= DP_VOLTAGE_0_4;
3497 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303498 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003499 signal_levels |= DP_VOLTAGE_0_6;
3500 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303501 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003502 signal_levels |= DP_VOLTAGE_0_8;
3503 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303504 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003505 signal_levels |= DP_VOLTAGE_1_2;
3506 break;
3507 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003508 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303509 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003510 default:
3511 signal_levels |= DP_PRE_EMPHASIS_0;
3512 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303513 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003514 signal_levels |= DP_PRE_EMPHASIS_3_5;
3515 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303516 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003517 signal_levels |= DP_PRE_EMPHASIS_6;
3518 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303519 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003520 signal_levels |= DP_PRE_EMPHASIS_9_5;
3521 break;
3522 }
3523 return signal_levels;
3524}
3525
Zhenyu Wange3421a12010-04-08 09:43:27 +08003526/* Gen6's DP voltage swing and pre-emphasis control */
3527static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003528gen6_edp_signal_levels(uint8_t train_set)
Zhenyu Wange3421a12010-04-08 09:43:27 +08003529{
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003530 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3531 DP_TRAIN_PRE_EMPHASIS_MASK);
3532 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303533 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3534 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003535 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303536 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003537 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303538 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3539 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003540 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303541 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3542 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003543 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303544 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3545 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003546 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003547 default:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003548 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3549 "0x%x\n", signal_levels);
3550 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003551 }
3552}
3553
Keith Packard1a2eb462011-11-16 16:26:07 -08003554/* Gen7's DP voltage swing and pre-emphasis control */
3555static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003556gen7_edp_signal_levels(uint8_t train_set)
Keith Packard1a2eb462011-11-16 16:26:07 -08003557{
3558 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3559 DP_TRAIN_PRE_EMPHASIS_MASK);
3560 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303561 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003562 return EDP_LINK_TRAIN_400MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303563 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003564 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303565 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packard1a2eb462011-11-16 16:26:07 -08003566 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3567
Sonika Jindalbd600182014-08-08 16:23:41 +05303568 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003569 return EDP_LINK_TRAIN_600MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303570 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003571 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3572
Sonika Jindalbd600182014-08-08 16:23:41 +05303573 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003574 return EDP_LINK_TRAIN_800MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303575 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003576 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3577
3578 default:
3579 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3580 "0x%x\n", signal_levels);
3581 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3582 }
3583}
3584
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003585void
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003586intel_dp_set_signal_levels(struct intel_dp *intel_dp)
Paulo Zanonif0a34242012-12-06 16:51:50 -02003587{
3588 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003589 enum port port = intel_dig_port->port;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003590 struct drm_device *dev = intel_dig_port->base.base.dev;
Ander Conselvan de Oliveirab905a912015-10-23 13:01:47 +03003591 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehallf8896f52015-06-25 11:11:03 +03003592 uint32_t signal_levels, mask = 0;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003593 uint8_t train_set = intel_dp->train_set[0];
3594
David Weinehallf8896f52015-06-25 11:11:03 +03003595 if (HAS_DDI(dev)) {
3596 signal_levels = ddi_signal_levels(intel_dp);
3597
3598 if (IS_BROXTON(dev))
3599 signal_levels = 0;
3600 else
3601 mask = DDI_BUF_EMP_MASK;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003602 } else if (IS_CHERRYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003603 signal_levels = chv_signal_levels(intel_dp);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003604 } else if (IS_VALLEYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003605 signal_levels = vlv_signal_levels(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003606 } else if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003607 signal_levels = gen7_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003608 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003609 } else if (IS_GEN6(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003610 signal_levels = gen6_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003611 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3612 } else {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003613 signal_levels = gen4_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003614 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3615 }
3616
Vandana Kannan96fb9f92014-11-18 15:45:27 +05303617 if (mask)
3618 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3619
3620 DRM_DEBUG_KMS("Using vswing level %d\n",
3621 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3622 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3623 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3624 DP_TRAIN_PRE_EMPHASIS_SHIFT);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003625
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003626 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
Ander Conselvan de Oliveirab905a912015-10-23 13:01:47 +03003627
3628 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3629 POSTING_READ(intel_dp->output_reg);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003630}
3631
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003632void
Ander Conselvan de Oliveirae9c176d2015-10-23 13:01:45 +03003633intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3634 uint8_t dp_train_pat)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003635{
Paulo Zanoni174edf12012-10-26 19:05:50 -02003636 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03003637 struct drm_i915_private *dev_priv =
3638 to_i915(intel_dig_port->base.base.dev);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003639
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003640 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003641
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003642 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
Chris Wilsonea5b2132010-08-04 13:50:23 +01003643 POSTING_READ(intel_dp->output_reg);
Ander Conselvan de Oliveirae9c176d2015-10-23 13:01:45 +03003644}
3645
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003646void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
Imre Deak3ab9c632013-05-03 12:57:41 +03003647{
3648 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3649 struct drm_device *dev = intel_dig_port->base.base.dev;
3650 struct drm_i915_private *dev_priv = dev->dev_private;
3651 enum port port = intel_dig_port->port;
3652 uint32_t val;
3653
3654 if (!HAS_DDI(dev))
3655 return;
3656
3657 val = I915_READ(DP_TP_CTL(port));
3658 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3659 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3660 I915_WRITE(DP_TP_CTL(port), val);
3661
3662 /*
3663 * On PORT_A we can have only eDP in SST mode. There the only reason
3664 * we need to set idle transmission mode is to work around a HW issue
3665 * where we enable the pipe while not in idle link-training mode.
3666 * In this case there is requirement to wait for a minimum number of
3667 * idle patterns to be sent.
3668 */
3669 if (port == PORT_A)
3670 return;
3671
3672 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3673 1))
3674 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3675}
3676
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003677static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01003678intel_dp_link_down(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003679{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003680 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003681 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003682 enum port port = intel_dig_port->port;
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003683 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003684 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonea5b2132010-08-04 13:50:23 +01003685 uint32_t DP = intel_dp->DP;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003686
Daniel Vetterbc76e3202014-05-20 22:46:50 +02003687 if (WARN_ON(HAS_DDI(dev)))
Paulo Zanonic19b0662012-10-15 15:51:41 -03003688 return;
3689
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02003690 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
Chris Wilson1b39d6f2010-12-06 11:20:45 +00003691 return;
3692
Zhao Yakui28c97732009-10-09 11:39:41 +08003693 DRM_DEBUG_KMS("\n");
Zhenyu Wang32f9d652009-07-24 01:00:32 +08003694
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03003695 if ((IS_GEN7(dev) && port == PORT_A) ||
3696 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Zhenyu Wange3421a12010-04-08 09:43:27 +08003697 DP &= ~DP_LINK_TRAIN_MASK_CPT;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003698 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003699 } else {
Ville Syrjäläaad3d142014-06-28 02:04:25 +03003700 if (IS_CHERRYVIEW(dev))
3701 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3702 else
3703 DP &= ~DP_LINK_TRAIN_MASK;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003704 DP |= DP_LINK_TRAIN_PAT_IDLE;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003705 }
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003706 I915_WRITE(intel_dp->output_reg, DP);
Chris Wilsonfe255d02010-09-11 21:37:48 +01003707 POSTING_READ(intel_dp->output_reg);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08003708
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003709 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3710 I915_WRITE(intel_dp->output_reg, DP);
3711 POSTING_READ(intel_dp->output_reg);
3712
3713 /*
3714 * HW workaround for IBX, we need to move the port
3715 * to transcoder A after disabling it to allow the
3716 * matching HDMI port to be enabled on transcoder A.
3717 */
3718 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
Ville Syrjälä0c241d52015-10-30 19:23:22 +02003719 /*
3720 * We get CPU/PCH FIFO underruns on the other pipe when
3721 * doing the workaround. Sweep them under the rug.
3722 */
3723 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3724 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3725
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003726 /* always enable with pattern 1 (as per spec) */
3727 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3728 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3729 I915_WRITE(intel_dp->output_reg, DP);
3730 POSTING_READ(intel_dp->output_reg);
3731
3732 DP &= ~DP_PORT_EN;
Eric Anholt5bddd172010-11-18 09:32:59 +08003733 I915_WRITE(intel_dp->output_reg, DP);
Daniel Vetter0ca09682014-11-24 16:54:11 +01003734 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä0c241d52015-10-30 19:23:22 +02003735
3736 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3737 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3738 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
Eric Anholt5bddd172010-11-18 09:32:59 +08003739 }
3740
Keith Packardf01eca22011-09-28 16:48:10 -07003741 msleep(intel_dp->panel_power_down_delay);
Ville Syrjälä6fec7662015-11-10 16:16:17 +02003742
3743 intel_dp->DP = DP;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003744}
3745
Keith Packard26d61aa2011-07-25 20:01:09 -07003746static bool
3747intel_dp_get_dpcd(struct intel_dp *intel_dp)
Keith Packard92fd8fd2011-07-25 19:50:10 -07003748{
Rodrigo Vivia031d702013-10-03 16:15:06 -03003749 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3750 struct drm_device *dev = dig_port->base.base.dev;
3751 struct drm_i915_private *dev_priv = dev->dev_private;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303752 uint8_t rev;
Rodrigo Vivia031d702013-10-03 16:15:06 -03003753
Jani Nikula9d1a1032014-03-14 16:51:15 +02003754 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3755 sizeof(intel_dp->dpcd)) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003756 return false; /* aux transfer failed */
Keith Packard92fd8fd2011-07-25 19:50:10 -07003757
Andy Shevchenkoa8e98152014-09-01 14:12:01 +03003758 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
Damien Lespiau577c7a52012-12-13 16:09:02 +00003759
Adam Jacksonedb39242012-09-18 10:58:49 -04003760 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3761 return false; /* DPCD not present */
3762
Shobhit Kumar2293bb52013-07-11 18:44:56 -03003763 /* Check if the panel supports PSR */
3764 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
Jani Nikula50003932013-09-20 16:42:17 +03003765 if (is_edp(intel_dp)) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003766 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3767 intel_dp->psr_dpcd,
3768 sizeof(intel_dp->psr_dpcd));
Rodrigo Vivia031d702013-10-03 16:15:06 -03003769 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3770 dev_priv->psr.sink_support = true;
Jani Nikula50003932013-09-20 16:42:17 +03003771 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
Rodrigo Vivia031d702013-10-03 16:15:06 -03003772 }
Sonika Jindal474d1ec2015-04-02 11:02:44 +05303773
3774 if (INTEL_INFO(dev)->gen >= 9 &&
3775 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3776 uint8_t frame_sync_cap;
3777
3778 dev_priv->psr.sink_support = true;
3779 intel_dp_dpcd_read_wake(&intel_dp->aux,
3780 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3781 &frame_sync_cap, 1);
3782 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3783 /* PSR2 needs frame sync as well */
3784 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3785 DRM_DEBUG_KMS("PSR2 %s on sink",
3786 dev_priv->psr.psr2_support ? "supported" : "not supported");
3787 }
Jani Nikula50003932013-09-20 16:42:17 +03003788 }
3789
Jani Nikulabc5133d2015-09-03 11:16:07 +03003790 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03003791 yesno(intel_dp_source_supports_hbr2(intel_dp)),
Jani Nikula742f4912015-09-03 11:16:09 +03003792 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
Todd Previte06ea66b2014-01-20 10:19:39 -07003793
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303794 /* Intermediate frequency support */
3795 if (is_edp(intel_dp) &&
3796 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3797 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3798 (rev >= 0x03)) { /* eDp v1.4 or higher */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003799 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003800 int i;
3801
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303802 intel_dp_dpcd_read_wake(&intel_dp->aux,
3803 DP_SUPPORTED_LINK_RATES,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003804 sink_rates,
3805 sizeof(sink_rates));
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003806
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003807 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3808 int val = le16_to_cpu(sink_rates[i]);
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003809
3810 if (val == 0)
3811 break;
3812
Sonika Jindalaf77b972015-05-07 13:59:28 +05303813 /* Value read is in kHz while drm clock is saved in deca-kHz */
3814 intel_dp->sink_rates[i] = (val * 200) / 10;
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003815 }
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003816 intel_dp->num_sink_rates = i;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303817 }
Ville Syrjälä0336400e2015-03-12 17:10:39 +02003818
3819 intel_dp_print_rates(intel_dp);
3820
Adam Jacksonedb39242012-09-18 10:58:49 -04003821 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3822 DP_DWN_STRM_PORT_PRESENT))
3823 return true; /* native DP sink */
3824
3825 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3826 return true; /* no per-port downstream info */
3827
Jani Nikula9d1a1032014-03-14 16:51:15 +02003828 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3829 intel_dp->downstream_ports,
3830 DP_MAX_DOWNSTREAM_PORTS) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003831 return false; /* downstream port status fetch failed */
3832
3833 return true;
Keith Packard92fd8fd2011-07-25 19:50:10 -07003834}
3835
Adam Jackson0d198322012-05-14 16:05:47 -04003836static void
3837intel_dp_probe_oui(struct intel_dp *intel_dp)
3838{
3839 u8 buf[3];
3840
3841 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3842 return;
3843
Jani Nikula9d1a1032014-03-14 16:51:15 +02003844 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04003845 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3846 buf[0], buf[1], buf[2]);
3847
Jani Nikula9d1a1032014-03-14 16:51:15 +02003848 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04003849 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3850 buf[0], buf[1], buf[2]);
3851}
3852
Dave Airlie0e32b392014-05-02 14:02:48 +10003853static bool
3854intel_dp_probe_mst(struct intel_dp *intel_dp)
3855{
3856 u8 buf[1];
3857
3858 if (!intel_dp->can_mst)
3859 return false;
3860
3861 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3862 return false;
3863
Dave Airlie0e32b392014-05-02 14:02:48 +10003864 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3865 if (buf[0] & DP_MST_CAP) {
3866 DRM_DEBUG_KMS("Sink is MST capable\n");
3867 intel_dp->is_mst = true;
3868 } else {
3869 DRM_DEBUG_KMS("Sink is not MST capable\n");
3870 intel_dp->is_mst = false;
3871 }
3872 }
Dave Airlie0e32b392014-05-02 14:02:48 +10003873
3874 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3875 return intel_dp->is_mst;
3876}
3877
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003878static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003879{
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003880 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3881 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
Rodrigo Viviad9dc912014-09-16 19:18:12 -04003882 u8 buf;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003883 int ret = 0;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003884
3885 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003886 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003887 ret = -EIO;
3888 goto out;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003889 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003890
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003891 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003892 buf & ~DP_TEST_SINK_START) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003893 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003894 ret = -EIO;
3895 goto out;
3896 }
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003897
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003898 intel_dp->sink_crc.started = false;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003899 out:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003900 hsw_enable_ips(intel_crtc);
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003901 return ret;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003902}
3903
3904static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3905{
3906 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3907 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3908 u8 buf;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003909 int ret;
3910
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003911 if (intel_dp->sink_crc.started) {
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003912 ret = intel_dp_sink_crc_stop(intel_dp);
3913 if (ret)
3914 return ret;
3915 }
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003916
3917 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3918 return -EIO;
3919
3920 if (!(buf & DP_TEST_CRC_SUPPORTED))
3921 return -ENOTTY;
3922
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003923 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
3924
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003925 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3926 return -EIO;
3927
3928 hsw_disable_ips(intel_crtc);
3929
3930 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3931 buf | DP_TEST_SINK_START) < 0) {
3932 hsw_enable_ips(intel_crtc);
3933 return -EIO;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003934 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003935
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003936 intel_dp->sink_crc.started = true;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003937 return 0;
3938}
3939
3940int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3941{
3942 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3943 struct drm_device *dev = dig_port->base.base.dev;
3944 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3945 u8 buf;
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003946 int count, ret;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003947 int attempts = 6;
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07003948 bool old_equal_new;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003949
3950 ret = intel_dp_sink_crc_start(intel_dp);
3951 if (ret)
3952 return ret;
3953
Rodrigo Viviad9dc912014-09-16 19:18:12 -04003954 do {
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003955 intel_wait_for_vblank(dev, intel_crtc->pipe);
3956
Rodrigo Vivi1dda5f92014-10-01 07:32:37 -07003957 if (drm_dp_dpcd_readb(&intel_dp->aux,
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003958 DP_TEST_SINK_MISC, &buf) < 0) {
3959 ret = -EIO;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07003960 goto stop;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003961 }
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003962 count = buf & DP_TEST_COUNT_MASK;
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07003963
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003964 /*
3965 * Count might be reset during the loop. In this case
3966 * last known count needs to be reset as well.
3967 */
3968 if (count == 0)
3969 intel_dp->sink_crc.last_count = 0;
3970
3971 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3972 ret = -EIO;
3973 goto stop;
3974 }
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07003975
3976 old_equal_new = (count == intel_dp->sink_crc.last_count &&
3977 !memcmp(intel_dp->sink_crc.last_crc, crc,
3978 6 * sizeof(u8)));
3979
3980 } while (--attempts && (count == 0 || old_equal_new));
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003981
3982 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
3983 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
Rodrigo Viviad9dc912014-09-16 19:18:12 -04003984
3985 if (attempts == 0) {
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07003986 if (old_equal_new) {
3987 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
3988 } else {
3989 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3990 ret = -ETIMEDOUT;
3991 goto stop;
3992 }
Rodrigo Viviad9dc912014-09-16 19:18:12 -04003993 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003994
Rodrigo Viviafe0d672015-07-23 16:35:45 -07003995stop:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003996 intel_dp_sink_crc_stop(intel_dp);
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003997 return ret;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003998}
3999
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004000static bool
4001intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4002{
Jani Nikula9d1a1032014-03-14 16:51:15 +02004003 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4004 DP_DEVICE_SERVICE_IRQ_VECTOR,
4005 sink_irq_vector, 1) == 1;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004006}
4007
Dave Airlie0e32b392014-05-02 14:02:48 +10004008static bool
4009intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4010{
4011 int ret;
4012
4013 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4014 DP_SINK_COUNT_ESI,
4015 sink_irq_vector, 14);
4016 if (ret != 14)
4017 return false;
4018
4019 return true;
4020}
4021
Todd Previtec5d5ab72015-04-15 08:38:38 -07004022static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004023{
Todd Previtec5d5ab72015-04-15 08:38:38 -07004024 uint8_t test_result = DP_TEST_ACK;
4025 return test_result;
4026}
4027
4028static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4029{
4030 uint8_t test_result = DP_TEST_NAK;
4031 return test_result;
4032}
4033
4034static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4035{
4036 uint8_t test_result = DP_TEST_NAK;
Todd Previte559be302015-05-04 07:48:20 -07004037 struct intel_connector *intel_connector = intel_dp->attached_connector;
4038 struct drm_connector *connector = &intel_connector->base;
4039
4040 if (intel_connector->detect_edid == NULL ||
Daniel Vetterac6f2e22015-05-08 16:15:41 +02004041 connector->edid_corrupt ||
Todd Previte559be302015-05-04 07:48:20 -07004042 intel_dp->aux.i2c_defer_count > 6) {
4043 /* Check EDID read for NACKs, DEFERs and corruption
4044 * (DP CTS 1.2 Core r1.1)
4045 * 4.2.2.4 : Failed EDID read, I2C_NAK
4046 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4047 * 4.2.2.6 : EDID corruption detected
4048 * Use failsafe mode for all cases
4049 */
4050 if (intel_dp->aux.i2c_nack_count > 0 ||
4051 intel_dp->aux.i2c_defer_count > 0)
4052 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4053 intel_dp->aux.i2c_nack_count,
4054 intel_dp->aux.i2c_defer_count);
4055 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4056 } else {
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304057 struct edid *block = intel_connector->detect_edid;
4058
4059 /* We have to write the checksum
4060 * of the last block read
4061 */
4062 block += intel_connector->detect_edid->extensions;
4063
Todd Previte559be302015-05-04 07:48:20 -07004064 if (!drm_dp_dpcd_write(&intel_dp->aux,
4065 DP_TEST_EDID_CHECKSUM,
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304066 &block->checksum,
Dan Carpenter5a1cc652015-05-12 21:07:37 +03004067 1))
Todd Previte559be302015-05-04 07:48:20 -07004068 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4069
4070 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4071 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4072 }
4073
4074 /* Set test active flag here so userspace doesn't interrupt things */
4075 intel_dp->compliance_test_active = 1;
4076
Todd Previtec5d5ab72015-04-15 08:38:38 -07004077 return test_result;
4078}
4079
4080static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4081{
4082 uint8_t test_result = DP_TEST_NAK;
4083 return test_result;
4084}
4085
4086static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4087{
4088 uint8_t response = DP_TEST_NAK;
4089 uint8_t rxdata = 0;
4090 int status = 0;
4091
Todd Previte559be302015-05-04 07:48:20 -07004092 intel_dp->compliance_test_active = 0;
Todd Previtec5d5ab72015-04-15 08:38:38 -07004093 intel_dp->compliance_test_type = 0;
Todd Previte559be302015-05-04 07:48:20 -07004094 intel_dp->compliance_test_data = 0;
4095
Todd Previtec5d5ab72015-04-15 08:38:38 -07004096 intel_dp->aux.i2c_nack_count = 0;
4097 intel_dp->aux.i2c_defer_count = 0;
4098
4099 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4100 if (status <= 0) {
4101 DRM_DEBUG_KMS("Could not read test request from sink\n");
4102 goto update_status;
4103 }
4104
4105 switch (rxdata) {
4106 case DP_TEST_LINK_TRAINING:
4107 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4108 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4109 response = intel_dp_autotest_link_training(intel_dp);
4110 break;
4111 case DP_TEST_LINK_VIDEO_PATTERN:
4112 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4113 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4114 response = intel_dp_autotest_video_pattern(intel_dp);
4115 break;
4116 case DP_TEST_LINK_EDID_READ:
4117 DRM_DEBUG_KMS("EDID test requested\n");
4118 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4119 response = intel_dp_autotest_edid(intel_dp);
4120 break;
4121 case DP_TEST_LINK_PHY_TEST_PATTERN:
4122 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4123 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4124 response = intel_dp_autotest_phy_pattern(intel_dp);
4125 break;
4126 default:
4127 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4128 break;
4129 }
4130
4131update_status:
4132 status = drm_dp_dpcd_write(&intel_dp->aux,
4133 DP_TEST_RESPONSE,
4134 &response, 1);
4135 if (status <= 0)
4136 DRM_DEBUG_KMS("Could not write test response to sink\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004137}
4138
Dave Airlie0e32b392014-05-02 14:02:48 +10004139static int
4140intel_dp_check_mst_status(struct intel_dp *intel_dp)
4141{
4142 bool bret;
4143
4144 if (intel_dp->is_mst) {
4145 u8 esi[16] = { 0 };
4146 int ret = 0;
4147 int retry;
4148 bool handled;
4149 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4150go_again:
4151 if (bret == true) {
4152
4153 /* check link status - esi[10] = 0x200c */
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03004154 if (intel_dp->active_mst_links &&
Ville Syrjälä901c2da2015-08-17 18:05:12 +03004155 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
Dave Airlie0e32b392014-05-02 14:02:48 +10004156 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4157 intel_dp_start_link_train(intel_dp);
Dave Airlie0e32b392014-05-02 14:02:48 +10004158 intel_dp_stop_link_train(intel_dp);
4159 }
4160
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004161 DRM_DEBUG_KMS("got esi %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004162 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4163
4164 if (handled) {
4165 for (retry = 0; retry < 3; retry++) {
4166 int wret;
4167 wret = drm_dp_dpcd_write(&intel_dp->aux,
4168 DP_SINK_COUNT_ESI+1,
4169 &esi[1], 3);
4170 if (wret == 3) {
4171 break;
4172 }
4173 }
4174
4175 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4176 if (bret == true) {
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004177 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004178 goto go_again;
4179 }
4180 } else
4181 ret = 0;
4182
4183 return ret;
4184 } else {
4185 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4186 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4187 intel_dp->is_mst = false;
4188 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4189 /* send a hotplug event */
4190 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4191 }
4192 }
4193 return -EINVAL;
4194}
4195
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004196/*
4197 * According to DP spec
4198 * 5.1.2:
4199 * 1. Read DPCD
4200 * 2. Configure link according to Receiver Capabilities
4201 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4202 * 4. Check link status on receipt of hot-plug interrupt
4203 */
Damien Lespiaua5146202015-02-10 19:32:22 +00004204static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01004205intel_dp_check_link_status(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004206{
Dave Airlie5b215bc2014-08-05 10:40:20 +10004207 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004208 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004209 u8 sink_irq_vector;
Keith Packard93f62da2011-11-01 19:45:03 -07004210 u8 link_status[DP_LINK_STATUS_SIZE];
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004211
Dave Airlie5b215bc2014-08-05 10:40:20 +10004212 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4213
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02004214 if (!intel_encoder->base.crtc)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004215 return;
4216
Imre Deak1a125d82014-08-18 14:42:46 +03004217 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4218 return;
4219
Keith Packard92fd8fd2011-07-25 19:50:10 -07004220 /* Try to read receiver status if the link appears to be up */
Keith Packard93f62da2011-11-01 19:45:03 -07004221 if (!intel_dp_get_link_status(intel_dp, link_status)) {
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004222 return;
4223 }
4224
Keith Packard92fd8fd2011-07-25 19:50:10 -07004225 /* Now read the DPCD to see if it's actually running */
Keith Packard26d61aa2011-07-25 20:01:09 -07004226 if (!intel_dp_get_dpcd(intel_dp)) {
Jesse Barnes59cd09e2011-07-07 11:10:59 -07004227 return;
4228 }
4229
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004230 /* Try to read the source of the interrupt */
4231 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4232 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4233 /* Clear interrupt source */
Jani Nikula9d1a1032014-03-14 16:51:15 +02004234 drm_dp_dpcd_writeb(&intel_dp->aux,
4235 DP_DEVICE_SERVICE_IRQ_VECTOR,
4236 sink_irq_vector);
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004237
4238 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
Todd Previte09b1eb12015-04-20 15:27:34 -07004239 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004240 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4241 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4242 }
4243
Ville Syrjälä901c2da2015-08-17 18:05:12 +03004244 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
Keith Packard92fd8fd2011-07-25 19:50:10 -07004245 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
Jani Nikula8e329a02014-06-03 14:56:21 +03004246 intel_encoder->base.name);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004247 intel_dp_start_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03004248 intel_dp_stop_link_train(intel_dp);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004249 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004250}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004251
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004252/* XXX this is probably wrong for multiple downstream ports */
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004253static enum drm_connector_status
Keith Packard26d61aa2011-07-25 20:01:09 -07004254intel_dp_detect_dpcd(struct intel_dp *intel_dp)
Adam Jackson71ba90002011-07-12 17:38:04 -04004255{
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004256 uint8_t *dpcd = intel_dp->dpcd;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004257 uint8_t type;
4258
4259 if (!intel_dp_get_dpcd(intel_dp))
4260 return connector_status_disconnected;
4261
4262 /* if there's no downstream port, we're done */
4263 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
Keith Packard26d61aa2011-07-25 20:01:09 -07004264 return connector_status_connected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004265
4266 /* If we're HPD-aware, SINK_COUNT changes dynamically */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004267 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4268 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
Adam Jackson23235172012-09-20 16:42:45 -04004269 uint8_t reg;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004270
4271 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4272 &reg, 1) < 0)
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004273 return connector_status_unknown;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004274
Adam Jackson23235172012-09-20 16:42:45 -04004275 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4276 : connector_status_disconnected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004277 }
4278
4279 /* If no HPD, poke DDC gently */
Jani Nikula0b998362014-03-14 16:51:17 +02004280 if (drm_probe_ddc(&intel_dp->aux.ddc))
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004281 return connector_status_connected;
4282
4283 /* Well we tried, say unknown for unreliable port types */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004284 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4285 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4286 if (type == DP_DS_PORT_TYPE_VGA ||
4287 type == DP_DS_PORT_TYPE_NON_EDID)
4288 return connector_status_unknown;
4289 } else {
4290 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4291 DP_DWN_STRM_PORT_TYPE_MASK;
4292 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4293 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4294 return connector_status_unknown;
4295 }
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004296
4297 /* Anything else is out of spec, warn and ignore */
4298 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
Keith Packard26d61aa2011-07-25 20:01:09 -07004299 return connector_status_disconnected;
Adam Jackson71ba90002011-07-12 17:38:04 -04004300}
4301
4302static enum drm_connector_status
Chris Wilsond410b562014-09-02 20:03:59 +01004303edp_detect(struct intel_dp *intel_dp)
4304{
4305 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4306 enum drm_connector_status status;
4307
4308 status = intel_panel_detect(dev);
4309 if (status == connector_status_unknown)
4310 status = connector_status_connected;
4311
4312 return status;
4313}
4314
Jani Nikulab93433c2015-08-20 10:47:36 +03004315static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4316 struct intel_digital_port *port)
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004317{
Jani Nikulab93433c2015-08-20 10:47:36 +03004318 u32 bit;
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07004319
Jani Nikula0df53b72015-08-20 10:47:40 +03004320 switch (port->port) {
4321 case PORT_A:
4322 return true;
4323 case PORT_B:
4324 bit = SDE_PORTB_HOTPLUG;
4325 break;
4326 case PORT_C:
4327 bit = SDE_PORTC_HOTPLUG;
4328 break;
4329 case PORT_D:
4330 bit = SDE_PORTD_HOTPLUG;
4331 break;
4332 default:
4333 MISSING_CASE(port->port);
4334 return false;
4335 }
4336
4337 return I915_READ(SDEISR) & bit;
4338}
4339
4340static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4341 struct intel_digital_port *port)
4342{
4343 u32 bit;
4344
4345 switch (port->port) {
4346 case PORT_A:
4347 return true;
4348 case PORT_B:
4349 bit = SDE_PORTB_HOTPLUG_CPT;
4350 break;
4351 case PORT_C:
4352 bit = SDE_PORTC_HOTPLUG_CPT;
4353 break;
4354 case PORT_D:
4355 bit = SDE_PORTD_HOTPLUG_CPT;
4356 break;
Jani Nikulaa78695d2015-09-18 15:54:50 +03004357 case PORT_E:
4358 bit = SDE_PORTE_HOTPLUG_SPT;
4359 break;
Jani Nikula0df53b72015-08-20 10:47:40 +03004360 default:
4361 MISSING_CASE(port->port);
4362 return false;
Jani Nikulab93433c2015-08-20 10:47:36 +03004363 }
Damien Lespiau1b469632012-12-13 16:09:01 +00004364
Jani Nikulab93433c2015-08-20 10:47:36 +03004365 return I915_READ(SDEISR) & bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004366}
4367
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004368static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
Jani Nikula1d245982015-08-20 10:47:37 +03004369 struct intel_digital_port *port)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004370{
Jani Nikula9642c812015-08-20 10:47:41 +03004371 u32 bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004372
Jani Nikula9642c812015-08-20 10:47:41 +03004373 switch (port->port) {
4374 case PORT_B:
4375 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4376 break;
4377 case PORT_C:
4378 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4379 break;
4380 case PORT_D:
4381 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4382 break;
4383 default:
4384 MISSING_CASE(port->port);
4385 return false;
4386 }
4387
4388 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4389}
4390
4391static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4392 struct intel_digital_port *port)
4393{
4394 u32 bit;
4395
4396 switch (port->port) {
4397 case PORT_B:
4398 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4399 break;
4400 case PORT_C:
4401 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4402 break;
4403 case PORT_D:
4404 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4405 break;
4406 default:
4407 MISSING_CASE(port->port);
4408 return false;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004409 }
4410
Jani Nikula1d245982015-08-20 10:47:37 +03004411 return I915_READ(PORT_HOTPLUG_STAT) & bit;
Dave Airlie2a592be2014-09-01 16:58:12 +10004412}
4413
Jani Nikulae464bfd2015-08-20 10:47:42 +03004414static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304415 struct intel_digital_port *intel_dig_port)
Jani Nikulae464bfd2015-08-20 10:47:42 +03004416{
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304417 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4418 enum port port;
Jani Nikulae464bfd2015-08-20 10:47:42 +03004419 u32 bit;
4420
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304421 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4422 switch (port) {
Jani Nikulae464bfd2015-08-20 10:47:42 +03004423 case PORT_A:
4424 bit = BXT_DE_PORT_HP_DDIA;
4425 break;
4426 case PORT_B:
4427 bit = BXT_DE_PORT_HP_DDIB;
4428 break;
4429 case PORT_C:
4430 bit = BXT_DE_PORT_HP_DDIC;
4431 break;
4432 default:
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304433 MISSING_CASE(port);
Jani Nikulae464bfd2015-08-20 10:47:42 +03004434 return false;
4435 }
4436
4437 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4438}
4439
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004440/*
4441 * intel_digital_port_connected - is the specified port connected?
4442 * @dev_priv: i915 private structure
4443 * @port: the port to test
4444 *
4445 * Return %true if @port is connected, %false otherwise.
4446 */
Sonika Jindal237ed862015-09-15 09:44:20 +05304447bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004448 struct intel_digital_port *port)
4449{
Jani Nikula0df53b72015-08-20 10:47:40 +03004450 if (HAS_PCH_IBX(dev_priv))
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004451 return ibx_digital_port_connected(dev_priv, port);
Jani Nikula0df53b72015-08-20 10:47:40 +03004452 if (HAS_PCH_SPLIT(dev_priv))
4453 return cpt_digital_port_connected(dev_priv, port);
Jani Nikulae464bfd2015-08-20 10:47:42 +03004454 else if (IS_BROXTON(dev_priv))
4455 return bxt_digital_port_connected(dev_priv, port);
Jani Nikula9642c812015-08-20 10:47:41 +03004456 else if (IS_VALLEYVIEW(dev_priv))
4457 return vlv_digital_port_connected(dev_priv, port);
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004458 else
4459 return g4x_digital_port_connected(dev_priv, port);
4460}
4461
Dave Airlie2a592be2014-09-01 16:58:12 +10004462static enum drm_connector_status
Jani Nikulab93433c2015-08-20 10:47:36 +03004463ironlake_dp_detect(struct intel_dp *intel_dp)
4464{
4465 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4466 struct drm_i915_private *dev_priv = dev->dev_private;
4467 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4468
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004469 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
Jani Nikulab93433c2015-08-20 10:47:36 +03004470 return connector_status_disconnected;
4471
4472 return intel_dp_detect_dpcd(intel_dp);
4473}
4474
4475static enum drm_connector_status
Dave Airlie2a592be2014-09-01 16:58:12 +10004476g4x_dp_detect(struct intel_dp *intel_dp)
4477{
4478 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4479 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Dave Airlie2a592be2014-09-01 16:58:12 +10004480
4481 /* Can't disconnect eDP, but you can close the lid... */
4482 if (is_edp(intel_dp)) {
4483 enum drm_connector_status status;
4484
4485 status = intel_panel_detect(dev);
4486 if (status == connector_status_unknown)
4487 status = connector_status_connected;
4488 return status;
4489 }
4490
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004491 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004492 return connector_status_disconnected;
4493
Keith Packard26d61aa2011-07-25 20:01:09 -07004494 return intel_dp_detect_dpcd(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004495}
4496
Keith Packard8c241fe2011-09-28 16:38:44 -07004497static struct edid *
Chris Wilsonbeb60602014-09-02 20:04:00 +01004498intel_dp_get_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004499{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004500 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packard8c241fe2011-09-28 16:38:44 -07004501
Jani Nikula9cd300e2012-10-19 14:51:52 +03004502 /* use cached edid if we have one */
4503 if (intel_connector->edid) {
Jani Nikula9cd300e2012-10-19 14:51:52 +03004504 /* invalid edid */
4505 if (IS_ERR(intel_connector->edid))
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004506 return NULL;
4507
Jani Nikula55e9ede2013-10-01 10:38:54 +03004508 return drm_edid_duplicate(intel_connector->edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004509 } else
4510 return drm_get_edid(&intel_connector->base,
4511 &intel_dp->aux.ddc);
Keith Packard8c241fe2011-09-28 16:38:44 -07004512}
4513
Chris Wilsonbeb60602014-09-02 20:04:00 +01004514static void
4515intel_dp_set_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004516{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004517 struct intel_connector *intel_connector = intel_dp->attached_connector;
4518 struct edid *edid;
Keith Packard8c241fe2011-09-28 16:38:44 -07004519
Chris Wilsonbeb60602014-09-02 20:04:00 +01004520 edid = intel_dp_get_edid(intel_dp);
4521 intel_connector->detect_edid = edid;
Jani Nikula9cd300e2012-10-19 14:51:52 +03004522
Chris Wilsonbeb60602014-09-02 20:04:00 +01004523 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4524 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4525 else
4526 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4527}
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004528
Chris Wilsonbeb60602014-09-02 20:04:00 +01004529static void
4530intel_dp_unset_edid(struct intel_dp *intel_dp)
4531{
4532 struct intel_connector *intel_connector = intel_dp->attached_connector;
4533
4534 kfree(intel_connector->detect_edid);
4535 intel_connector->detect_edid = NULL;
4536
4537 intel_dp->has_audio = false;
4538}
4539
4540static enum intel_display_power_domain
4541intel_dp_power_get(struct intel_dp *dp)
4542{
4543 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4544 enum intel_display_power_domain power_domain;
4545
4546 power_domain = intel_display_port_power_domain(encoder);
4547 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4548
4549 return power_domain;
4550}
4551
4552static void
4553intel_dp_power_put(struct intel_dp *dp,
4554 enum intel_display_power_domain power_domain)
4555{
4556 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4557 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
Keith Packard8c241fe2011-09-28 16:38:44 -07004558}
4559
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004560static enum drm_connector_status
4561intel_dp_detect(struct drm_connector *connector, bool force)
4562{
4563 struct intel_dp *intel_dp = intel_attached_dp(connector);
Paulo Zanonid63885d2012-10-26 19:05:49 -02004564 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4565 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004566 struct drm_device *dev = connector->dev;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004567 enum drm_connector_status status;
Imre Deak671dedd2014-03-05 16:20:53 +02004568 enum intel_display_power_domain power_domain;
Dave Airlie0e32b392014-05-02 14:02:48 +10004569 bool ret;
Todd Previte09b1eb12015-04-20 15:27:34 -07004570 u8 sink_irq_vector;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004571
Chris Wilson164c8592013-07-20 20:27:08 +01004572 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03004573 connector->base.id, connector->name);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004574 intel_dp_unset_edid(intel_dp);
Chris Wilson164c8592013-07-20 20:27:08 +01004575
Dave Airlie0e32b392014-05-02 14:02:48 +10004576 if (intel_dp->is_mst) {
4577 /* MST devices are disconnected from a monitor POV */
4578 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4579 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004580 return connector_status_disconnected;
Dave Airlie0e32b392014-05-02 14:02:48 +10004581 }
4582
Chris Wilsonbeb60602014-09-02 20:04:00 +01004583 power_domain = intel_dp_power_get(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004584
Chris Wilsond410b562014-09-02 20:03:59 +01004585 /* Can't disconnect eDP, but you can close the lid... */
4586 if (is_edp(intel_dp))
4587 status = edp_detect(intel_dp);
4588 else if (HAS_PCH_SPLIT(dev))
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004589 status = ironlake_dp_detect(intel_dp);
4590 else
4591 status = g4x_dp_detect(intel_dp);
4592 if (status != connector_status_connected)
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004593 goto out;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004594
Adam Jackson0d198322012-05-14 16:05:47 -04004595 intel_dp_probe_oui(intel_dp);
4596
Dave Airlie0e32b392014-05-02 14:02:48 +10004597 ret = intel_dp_probe_mst(intel_dp);
4598 if (ret) {
4599 /* if we are in MST mode then this connector
4600 won't appear connected or have anything with EDID on it */
4601 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4602 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4603 status = connector_status_disconnected;
4604 goto out;
4605 }
4606
Chris Wilsonbeb60602014-09-02 20:04:00 +01004607 intel_dp_set_edid(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004608
Paulo Zanonid63885d2012-10-26 19:05:49 -02004609 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4610 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004611 status = connector_status_connected;
4612
Todd Previte09b1eb12015-04-20 15:27:34 -07004613 /* Try to read the source of the interrupt */
4614 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4615 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4616 /* Clear interrupt source */
4617 drm_dp_dpcd_writeb(&intel_dp->aux,
4618 DP_DEVICE_SERVICE_IRQ_VECTOR,
4619 sink_irq_vector);
4620
4621 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4622 intel_dp_handle_test_request(intel_dp);
4623 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4624 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4625 }
4626
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004627out:
Chris Wilsonbeb60602014-09-02 20:04:00 +01004628 intel_dp_power_put(intel_dp, power_domain);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004629 return status;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004630}
4631
Chris Wilsonbeb60602014-09-02 20:04:00 +01004632static void
4633intel_dp_force(struct drm_connector *connector)
4634{
4635 struct intel_dp *intel_dp = intel_attached_dp(connector);
4636 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4637 enum intel_display_power_domain power_domain;
4638
4639 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4640 connector->base.id, connector->name);
4641 intel_dp_unset_edid(intel_dp);
4642
4643 if (connector->status != connector_status_connected)
4644 return;
4645
4646 power_domain = intel_dp_power_get(intel_dp);
4647
4648 intel_dp_set_edid(intel_dp);
4649
4650 intel_dp_power_put(intel_dp, power_domain);
4651
4652 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4653 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4654}
4655
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004656static int intel_dp_get_modes(struct drm_connector *connector)
4657{
Jani Nikuladd06f902012-10-19 14:51:50 +03004658 struct intel_connector *intel_connector = to_intel_connector(connector);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004659 struct edid *edid;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004660
Chris Wilsonbeb60602014-09-02 20:04:00 +01004661 edid = intel_connector->detect_edid;
4662 if (edid) {
4663 int ret = intel_connector_update_modes(connector, edid);
4664 if (ret)
4665 return ret;
4666 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004667
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004668 /* if eDP has no EDID, fall back to fixed mode */
Chris Wilsonbeb60602014-09-02 20:04:00 +01004669 if (is_edp(intel_attached_dp(connector)) &&
4670 intel_connector->panel.fixed_mode) {
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004671 struct drm_display_mode *mode;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004672
4673 mode = drm_mode_duplicate(connector->dev,
Jani Nikuladd06f902012-10-19 14:51:50 +03004674 intel_connector->panel.fixed_mode);
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004675 if (mode) {
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004676 drm_mode_probed_add(connector, mode);
4677 return 1;
4678 }
4679 }
Chris Wilsonbeb60602014-09-02 20:04:00 +01004680
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004681 return 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004682}
4683
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004684static bool
4685intel_dp_detect_audio(struct drm_connector *connector)
4686{
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004687 bool has_audio = false;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004688 struct edid *edid;
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004689
Chris Wilsonbeb60602014-09-02 20:04:00 +01004690 edid = to_intel_connector(connector)->detect_edid;
4691 if (edid)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004692 has_audio = drm_detect_monitor_audio(edid);
Imre Deak671dedd2014-03-05 16:20:53 +02004693
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004694 return has_audio;
4695}
4696
Chris Wilsonf6849602010-09-19 09:29:33 +01004697static int
4698intel_dp_set_property(struct drm_connector *connector,
4699 struct drm_property *property,
4700 uint64_t val)
4701{
Chris Wilsone953fd72011-02-21 22:23:52 +00004702 struct drm_i915_private *dev_priv = connector->dev->dev_private;
Yuly Novikov53b41832012-10-26 12:04:00 +03004703 struct intel_connector *intel_connector = to_intel_connector(connector);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004704 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4705 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonf6849602010-09-19 09:29:33 +01004706 int ret;
4707
Rob Clark662595d2012-10-11 20:36:04 -05004708 ret = drm_object_property_set_value(&connector->base, property, val);
Chris Wilsonf6849602010-09-19 09:29:33 +01004709 if (ret)
4710 return ret;
4711
Chris Wilson3f43c482011-05-12 22:17:24 +01004712 if (property == dev_priv->force_audio_property) {
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004713 int i = val;
4714 bool has_audio;
4715
4716 if (i == intel_dp->force_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004717 return 0;
4718
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004719 intel_dp->force_audio = i;
Chris Wilsonf6849602010-09-19 09:29:33 +01004720
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004721 if (i == HDMI_AUDIO_AUTO)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004722 has_audio = intel_dp_detect_audio(connector);
4723 else
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004724 has_audio = (i == HDMI_AUDIO_ON);
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004725
4726 if (has_audio == intel_dp->has_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004727 return 0;
4728
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004729 intel_dp->has_audio = has_audio;
Chris Wilsonf6849602010-09-19 09:29:33 +01004730 goto done;
4731 }
4732
Chris Wilsone953fd72011-02-21 22:23:52 +00004733 if (property == dev_priv->broadcast_rgb_property) {
Daniel Vetterae4edb82013-04-22 17:07:23 +02004734 bool old_auto = intel_dp->color_range_auto;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004735 bool old_range = intel_dp->limited_color_range;
Daniel Vetterae4edb82013-04-22 17:07:23 +02004736
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004737 switch (val) {
4738 case INTEL_BROADCAST_RGB_AUTO:
4739 intel_dp->color_range_auto = true;
4740 break;
4741 case INTEL_BROADCAST_RGB_FULL:
4742 intel_dp->color_range_auto = false;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004743 intel_dp->limited_color_range = false;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004744 break;
4745 case INTEL_BROADCAST_RGB_LIMITED:
4746 intel_dp->color_range_auto = false;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004747 intel_dp->limited_color_range = true;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004748 break;
4749 default:
4750 return -EINVAL;
4751 }
Daniel Vetterae4edb82013-04-22 17:07:23 +02004752
4753 if (old_auto == intel_dp->color_range_auto &&
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004754 old_range == intel_dp->limited_color_range)
Daniel Vetterae4edb82013-04-22 17:07:23 +02004755 return 0;
4756
Chris Wilsone953fd72011-02-21 22:23:52 +00004757 goto done;
4758 }
4759
Yuly Novikov53b41832012-10-26 12:04:00 +03004760 if (is_edp(intel_dp) &&
4761 property == connector->dev->mode_config.scaling_mode_property) {
4762 if (val == DRM_MODE_SCALE_NONE) {
4763 DRM_DEBUG_KMS("no scaling not supported\n");
4764 return -EINVAL;
4765 }
4766
4767 if (intel_connector->panel.fitting_mode == val) {
4768 /* the eDP scaling property is not changed */
4769 return 0;
4770 }
4771 intel_connector->panel.fitting_mode = val;
4772
4773 goto done;
4774 }
4775
Chris Wilsonf6849602010-09-19 09:29:33 +01004776 return -EINVAL;
4777
4778done:
Chris Wilsonc0c36b942012-12-19 16:08:43 +00004779 if (intel_encoder->base.crtc)
4780 intel_crtc_restore_mode(intel_encoder->base.crtc);
Chris Wilsonf6849602010-09-19 09:29:33 +01004781
4782 return 0;
4783}
4784
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004785static void
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004786intel_dp_connector_destroy(struct drm_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004787{
Jani Nikula1d508702012-10-19 14:51:49 +03004788 struct intel_connector *intel_connector = to_intel_connector(connector);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004789
Chris Wilson10e972d2014-09-04 21:43:45 +01004790 kfree(intel_connector->detect_edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004791
Jani Nikula9cd300e2012-10-19 14:51:52 +03004792 if (!IS_ERR_OR_NULL(intel_connector->edid))
4793 kfree(intel_connector->edid);
4794
Paulo Zanoniacd8db102013-06-12 17:27:23 -03004795 /* Can't call is_edp() since the encoder may have been destroyed
4796 * already. */
4797 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
Jani Nikula1d508702012-10-19 14:51:49 +03004798 intel_panel_fini(&intel_connector->panel);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004799
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004800 drm_connector_cleanup(connector);
Zhenyu Wang55f78c42010-03-29 16:13:57 +08004801 kfree(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004802}
4803
Paulo Zanoni00c09d72012-10-26 19:05:52 -02004804void intel_dp_encoder_destroy(struct drm_encoder *encoder)
Daniel Vetter24d05922010-08-20 18:08:28 +02004805{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004806 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4807 struct intel_dp *intel_dp = &intel_dig_port->dp;
Daniel Vetter24d05922010-08-20 18:08:28 +02004808
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02004809 intel_dp_aux_fini(intel_dp);
Dave Airlie0e32b392014-05-02 14:02:48 +10004810 intel_dp_mst_encoder_cleanup(intel_dig_port);
Keith Packardbd943152011-09-18 23:09:52 -07004811 if (is_edp(intel_dp)) {
4812 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03004813 /*
4814 * vdd might still be enabled do to the delayed vdd off.
4815 * Make sure vdd is actually turned off here.
4816 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03004817 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01004818 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004819 pps_unlock(intel_dp);
4820
Clint Taylor01527b32014-07-07 13:01:46 -07004821 if (intel_dp->edp_notifier.notifier_call) {
4822 unregister_reboot_notifier(&intel_dp->edp_notifier);
4823 intel_dp->edp_notifier.notifier_call = NULL;
4824 }
Keith Packardbd943152011-09-18 23:09:52 -07004825 }
Imre Deakc8bd0e42014-12-12 17:57:38 +02004826 drm_encoder_cleanup(encoder);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004827 kfree(intel_dig_port);
Daniel Vetter24d05922010-08-20 18:08:28 +02004828}
4829
Imre Deak07f9cd02014-08-18 14:42:45 +03004830static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4831{
4832 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4833
4834 if (!is_edp(intel_dp))
4835 return;
4836
Ville Syrjälä951468f2014-09-04 14:55:31 +03004837 /*
4838 * vdd might still be enabled do to the delayed vdd off.
4839 * Make sure vdd is actually turned off here.
4840 */
Ville Syrjäläafa4e532014-11-25 15:43:48 +02004841 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004842 pps_lock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004843 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004844 pps_unlock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004845}
4846
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004847static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4848{
4849 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4850 struct drm_device *dev = intel_dig_port->base.base.dev;
4851 struct drm_i915_private *dev_priv = dev->dev_private;
4852 enum intel_display_power_domain power_domain;
4853
4854 lockdep_assert_held(&dev_priv->pps_mutex);
4855
4856 if (!edp_have_panel_vdd(intel_dp))
4857 return;
4858
4859 /*
4860 * The VDD bit needs a power domain reference, so if the bit is
4861 * already enabled when we boot or resume, grab this reference and
4862 * schedule a vdd off, so we don't hold on to the reference
4863 * indefinitely.
4864 */
4865 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4866 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4867 intel_display_power_get(dev_priv, power_domain);
4868
4869 edp_panel_vdd_schedule_off(intel_dp);
4870}
4871
Imre Deak6d93c0c2014-07-31 14:03:36 +03004872static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4873{
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004874 struct intel_dp *intel_dp;
4875
4876 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4877 return;
4878
4879 intel_dp = enc_to_intel_dp(encoder);
4880
4881 pps_lock(intel_dp);
4882
4883 /*
4884 * Read out the current power sequencer assignment,
4885 * in case the BIOS did something with it.
4886 */
4887 if (IS_VALLEYVIEW(encoder->dev))
4888 vlv_initial_power_sequencer_setup(intel_dp);
4889
4890 intel_edp_panel_vdd_sanitize(intel_dp);
4891
4892 pps_unlock(intel_dp);
Imre Deak6d93c0c2014-07-31 14:03:36 +03004893}
4894
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004895static const struct drm_connector_funcs intel_dp_connector_funcs = {
Maarten Lankhorst4d688a22015-08-05 12:37:06 +02004896 .dpms = drm_atomic_helper_connector_dpms,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004897 .detect = intel_dp_detect,
Chris Wilsonbeb60602014-09-02 20:04:00 +01004898 .force = intel_dp_force,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004899 .fill_modes = drm_helper_probe_single_connector_modes,
Chris Wilsonf6849602010-09-19 09:29:33 +01004900 .set_property = intel_dp_set_property,
Matt Roper2545e4a2015-01-22 16:51:27 -08004901 .atomic_get_property = intel_connector_atomic_get_property,
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004902 .destroy = intel_dp_connector_destroy,
Matt Roperc6f95f22015-01-22 16:50:32 -08004903 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
Ander Conselvan de Oliveira98969722015-03-20 16:18:06 +02004904 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004905};
4906
4907static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4908 .get_modes = intel_dp_get_modes,
4909 .mode_valid = intel_dp_mode_valid,
Chris Wilsondf0e9242010-09-09 16:20:55 +01004910 .best_encoder = intel_best_encoder,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004911};
4912
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004913static const struct drm_encoder_funcs intel_dp_enc_funcs = {
Imre Deak6d93c0c2014-07-31 14:03:36 +03004914 .reset = intel_dp_encoder_reset,
Daniel Vetter24d05922010-08-20 18:08:28 +02004915 .destroy = intel_dp_encoder_destroy,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004916};
4917
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004918enum irqreturn
Dave Airlie13cf5502014-06-18 11:29:35 +10004919intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4920{
4921 struct intel_dp *intel_dp = &intel_dig_port->dp;
Imre Deak1c767b32014-08-18 14:42:42 +03004922 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Dave Airlie0e32b392014-05-02 14:02:48 +10004923 struct drm_device *dev = intel_dig_port->base.base.dev;
4924 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak1c767b32014-08-18 14:42:42 +03004925 enum intel_display_power_domain power_domain;
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004926 enum irqreturn ret = IRQ_NONE;
Imre Deak1c767b32014-08-18 14:42:42 +03004927
Dave Airlie0e32b392014-05-02 14:02:48 +10004928 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4929 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
Dave Airlie13cf5502014-06-18 11:29:35 +10004930
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03004931 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4932 /*
4933 * vdd off can generate a long pulse on eDP which
4934 * would require vdd on to handle it, and thus we
4935 * would end up in an endless cycle of
4936 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4937 */
4938 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4939 port_name(intel_dig_port->port));
Ville Syrjäläa8b3d522015-02-10 14:11:46 +02004940 return IRQ_HANDLED;
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03004941 }
4942
Ville Syrjälä26fbb772014-08-11 18:37:37 +03004943 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4944 port_name(intel_dig_port->port),
Dave Airlie0e32b392014-05-02 14:02:48 +10004945 long_hpd ? "long" : "short");
Dave Airlie13cf5502014-06-18 11:29:35 +10004946
Imre Deak1c767b32014-08-18 14:42:42 +03004947 power_domain = intel_display_port_power_domain(intel_encoder);
4948 intel_display_power_get(dev_priv, power_domain);
4949
Dave Airlie0e32b392014-05-02 14:02:48 +10004950 if (long_hpd) {
Mika Kahola5fa836a2015-04-29 09:17:40 +03004951 /* indicate that we need to restart link training */
4952 intel_dp->train_set_valid = false;
Dave Airlie2a592be2014-09-01 16:58:12 +10004953
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004954 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4955 goto mst_fail;
Dave Airlie0e32b392014-05-02 14:02:48 +10004956
4957 if (!intel_dp_get_dpcd(intel_dp)) {
4958 goto mst_fail;
4959 }
4960
4961 intel_dp_probe_oui(intel_dp);
4962
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03004963 if (!intel_dp_probe_mst(intel_dp)) {
4964 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4965 intel_dp_check_link_status(intel_dp);
4966 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10004967 goto mst_fail;
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03004968 }
Dave Airlie0e32b392014-05-02 14:02:48 +10004969 } else {
4970 if (intel_dp->is_mst) {
Imre Deak1c767b32014-08-18 14:42:42 +03004971 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
Dave Airlie0e32b392014-05-02 14:02:48 +10004972 goto mst_fail;
4973 }
4974
4975 if (!intel_dp->is_mst) {
Dave Airlie5b215bc2014-08-05 10:40:20 +10004976 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
Dave Airlie0e32b392014-05-02 14:02:48 +10004977 intel_dp_check_link_status(intel_dp);
Dave Airlie5b215bc2014-08-05 10:40:20 +10004978 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10004979 }
4980 }
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004981
4982 ret = IRQ_HANDLED;
4983
Imre Deak1c767b32014-08-18 14:42:42 +03004984 goto put_power;
Dave Airlie0e32b392014-05-02 14:02:48 +10004985mst_fail:
4986 /* if we were in MST mode, and device is not there get out of MST mode */
4987 if (intel_dp->is_mst) {
4988 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4989 intel_dp->is_mst = false;
4990 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4991 }
Imre Deak1c767b32014-08-18 14:42:42 +03004992put_power:
4993 intel_display_power_put(dev_priv, power_domain);
4994
4995 return ret;
Dave Airlie13cf5502014-06-18 11:29:35 +10004996}
4997
Zhenyu Wange3421a12010-04-08 09:43:27 +08004998/* Return which DP Port should be selected for Transcoder DP control */
4999int
Akshay Joshi0206e352011-08-16 15:34:10 -04005000intel_trans_dp_port_sel(struct drm_crtc *crtc)
Zhenyu Wange3421a12010-04-08 09:43:27 +08005001{
5002 struct drm_device *dev = crtc->dev;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005003 struct intel_encoder *intel_encoder;
5004 struct intel_dp *intel_dp;
Zhenyu Wange3421a12010-04-08 09:43:27 +08005005
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005006 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5007 intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonea5b2132010-08-04 13:50:23 +01005008
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005009 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5010 intel_encoder->type == INTEL_OUTPUT_EDP)
Chris Wilsonea5b2132010-08-04 13:50:23 +01005011 return intel_dp->output_reg;
Zhenyu Wange3421a12010-04-08 09:43:27 +08005012 }
Chris Wilsonea5b2132010-08-04 13:50:23 +01005013
Zhenyu Wange3421a12010-04-08 09:43:27 +08005014 return -1;
5015}
5016
Rodrigo Vivi477ec322015-08-06 15:51:39 +08005017/* check the VBT to see whether the eDP is on another port */
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005018bool intel_dp_is_edp(struct drm_device *dev, enum port port)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005019{
5020 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni768f69c2013-09-11 18:02:47 -03005021 union child_device_config *p_child;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005022 int i;
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005023 static const short port_mapping[] = {
Rodrigo Vivi477ec322015-08-06 15:51:39 +08005024 [PORT_B] = DVO_PORT_DPB,
5025 [PORT_C] = DVO_PORT_DPC,
5026 [PORT_D] = DVO_PORT_DPD,
5027 [PORT_E] = DVO_PORT_DPE,
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005028 };
Zhao Yakui36e83a12010-06-12 14:32:21 +08005029
Ville Syrjälä53ce81a2015-09-11 21:04:38 +03005030 /*
5031 * eDP not supported on g4x. so bail out early just
5032 * for a bit extra safety in case the VBT is bonkers.
5033 */
5034 if (INTEL_INFO(dev)->gen < 5)
5035 return false;
5036
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005037 if (port == PORT_A)
5038 return true;
5039
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005040 if (!dev_priv->vbt.child_dev_num)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005041 return false;
5042
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005043 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5044 p_child = dev_priv->vbt.child_dev + i;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005045
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005046 if (p_child->common.dvo_port == port_mapping[port] &&
Ville Syrjäläf02586d2013-11-01 20:32:08 +02005047 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5048 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
Zhao Yakui36e83a12010-06-12 14:32:21 +08005049 return true;
5050 }
5051 return false;
5052}
5053
Dave Airlie0e32b392014-05-02 14:02:48 +10005054void
Chris Wilsonf6849602010-09-19 09:29:33 +01005055intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5056{
Yuly Novikov53b41832012-10-26 12:04:00 +03005057 struct intel_connector *intel_connector = to_intel_connector(connector);
5058
Chris Wilson3f43c482011-05-12 22:17:24 +01005059 intel_attach_force_audio_property(connector);
Chris Wilsone953fd72011-02-21 22:23:52 +00005060 intel_attach_broadcast_rgb_property(connector);
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02005061 intel_dp->color_range_auto = true;
Yuly Novikov53b41832012-10-26 12:04:00 +03005062
5063 if (is_edp(intel_dp)) {
5064 drm_mode_create_scaling_mode_property(connector->dev);
Rob Clark6de6d842012-10-11 20:36:04 -05005065 drm_object_attach_property(
5066 &connector->base,
Yuly Novikov53b41832012-10-26 12:04:00 +03005067 connector->dev->mode_config.scaling_mode_property,
Yuly Novikov8e740cd2012-10-26 12:04:01 +03005068 DRM_MODE_SCALE_ASPECT);
5069 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
Yuly Novikov53b41832012-10-26 12:04:00 +03005070 }
Chris Wilsonf6849602010-09-19 09:29:33 +01005071}
5072
Imre Deakdada1a92014-01-29 13:25:41 +02005073static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5074{
5075 intel_dp->last_power_cycle = jiffies;
5076 intel_dp->last_power_on = jiffies;
5077 intel_dp->last_backlight_off = jiffies;
5078}
5079
Daniel Vetter67a54562012-10-20 20:57:45 +02005080static void
5081intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005082 struct intel_dp *intel_dp)
Daniel Vetter67a54562012-10-20 20:57:45 +02005083{
5084 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005085 struct edp_power_seq cur, vbt, spec,
5086 *final = &intel_dp->pps_delays;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305087 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5088 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
Jesse Barnes453c5422013-03-28 09:55:41 -07005089
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005090 lockdep_assert_held(&dev_priv->pps_mutex);
5091
Ville Syrjälä81ddbc62014-10-16 21:27:31 +03005092 /* already initialized? */
5093 if (final->t11_t12 != 0)
5094 return;
5095
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305096 if (IS_BROXTON(dev)) {
5097 /*
5098 * TODO: BXT has 2 sets of PPS registers.
5099 * Correct Register for Broxton need to be identified
5100 * using VBT. hardcoding for now
5101 */
5102 pp_ctrl_reg = BXT_PP_CONTROL(0);
5103 pp_on_reg = BXT_PP_ON_DELAYS(0);
5104 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5105 } else if (HAS_PCH_SPLIT(dev)) {
Jani Nikulabf13e812013-09-06 07:40:05 +03005106 pp_ctrl_reg = PCH_PP_CONTROL;
Jesse Barnes453c5422013-03-28 09:55:41 -07005107 pp_on_reg = PCH_PP_ON_DELAYS;
5108 pp_off_reg = PCH_PP_OFF_DELAYS;
5109 pp_div_reg = PCH_PP_DIVISOR;
5110 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005111 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5112
5113 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5114 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5115 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5116 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005117 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005118
5119 /* Workaround: Need to write PP_CONTROL with the unlock key as
5120 * the very first thing. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305121 pp_ctl = ironlake_get_pp_control(intel_dp);
Daniel Vetter67a54562012-10-20 20:57:45 +02005122
Jesse Barnes453c5422013-03-28 09:55:41 -07005123 pp_on = I915_READ(pp_on_reg);
5124 pp_off = I915_READ(pp_off_reg);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305125 if (!IS_BROXTON(dev)) {
5126 I915_WRITE(pp_ctrl_reg, pp_ctl);
5127 pp_div = I915_READ(pp_div_reg);
5128 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005129
5130 /* Pull timing values out of registers */
5131 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5132 PANEL_POWER_UP_DELAY_SHIFT;
5133
5134 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5135 PANEL_LIGHT_ON_DELAY_SHIFT;
5136
5137 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5138 PANEL_LIGHT_OFF_DELAY_SHIFT;
5139
5140 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5141 PANEL_POWER_DOWN_DELAY_SHIFT;
5142
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305143 if (IS_BROXTON(dev)) {
5144 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5145 BXT_POWER_CYCLE_DELAY_SHIFT;
5146 if (tmp > 0)
5147 cur.t11_t12 = (tmp - 1) * 1000;
5148 else
5149 cur.t11_t12 = 0;
5150 } else {
5151 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
Daniel Vetter67a54562012-10-20 20:57:45 +02005152 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305153 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005154
5155 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5156 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5157
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005158 vbt = dev_priv->vbt.edp_pps;
Daniel Vetter67a54562012-10-20 20:57:45 +02005159
5160 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5161 * our hw here, which are all in 100usec. */
5162 spec.t1_t3 = 210 * 10;
5163 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5164 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5165 spec.t10 = 500 * 10;
5166 /* This one is special and actually in units of 100ms, but zero
5167 * based in the hw (so we need to add 100 ms). But the sw vbt
5168 * table multiplies it with 1000 to make it in units of 100usec,
5169 * too. */
5170 spec.t11_t12 = (510 + 100) * 10;
5171
5172 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5173 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5174
5175 /* Use the max of the register settings and vbt. If both are
5176 * unset, fall back to the spec limits. */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005177#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
Daniel Vetter67a54562012-10-20 20:57:45 +02005178 spec.field : \
5179 max(cur.field, vbt.field))
5180 assign_final(t1_t3);
5181 assign_final(t8);
5182 assign_final(t9);
5183 assign_final(t10);
5184 assign_final(t11_t12);
5185#undef assign_final
5186
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005187#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
Daniel Vetter67a54562012-10-20 20:57:45 +02005188 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5189 intel_dp->backlight_on_delay = get_delay(t8);
5190 intel_dp->backlight_off_delay = get_delay(t9);
5191 intel_dp->panel_power_down_delay = get_delay(t10);
5192 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5193#undef get_delay
5194
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005195 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5196 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5197 intel_dp->panel_power_cycle_delay);
5198
5199 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5200 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005201}
5202
5203static void
5204intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005205 struct intel_dp *intel_dp)
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005206{
5207 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07005208 u32 pp_on, pp_off, pp_div, port_sel = 0;
5209 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305210 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
Ville Syrjäläad933b52014-08-18 22:15:56 +03005211 enum port port = dp_to_dig_port(intel_dp)->port;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005212 const struct edp_power_seq *seq = &intel_dp->pps_delays;
Jesse Barnes453c5422013-03-28 09:55:41 -07005213
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005214 lockdep_assert_held(&dev_priv->pps_mutex);
Jesse Barnes453c5422013-03-28 09:55:41 -07005215
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305216 if (IS_BROXTON(dev)) {
5217 /*
5218 * TODO: BXT has 2 sets of PPS registers.
5219 * Correct Register for Broxton need to be identified
5220 * using VBT. hardcoding for now
5221 */
5222 pp_ctrl_reg = BXT_PP_CONTROL(0);
5223 pp_on_reg = BXT_PP_ON_DELAYS(0);
5224 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5225
5226 } else if (HAS_PCH_SPLIT(dev)) {
Jesse Barnes453c5422013-03-28 09:55:41 -07005227 pp_on_reg = PCH_PP_ON_DELAYS;
5228 pp_off_reg = PCH_PP_OFF_DELAYS;
5229 pp_div_reg = PCH_PP_DIVISOR;
5230 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005231 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5232
5233 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5234 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5235 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005236 }
5237
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005238 /*
5239 * And finally store the new values in the power sequencer. The
5240 * backlight delays are set to 1 because we do manual waits on them. For
5241 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5242 * we'll end up waiting for the backlight off delay twice: once when we
5243 * do the manual sleep, and once when we disable the panel and wait for
5244 * the PP_STATUS bit to become zero.
5245 */
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005246 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005247 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5248 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005249 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
Daniel Vetter67a54562012-10-20 20:57:45 +02005250 /* Compute the divisor for the pp clock, simply match the Bspec
5251 * formula. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305252 if (IS_BROXTON(dev)) {
5253 pp_div = I915_READ(pp_ctrl_reg);
5254 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5255 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5256 << BXT_POWER_CYCLE_DELAY_SHIFT);
5257 } else {
5258 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5259 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5260 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5261 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005262
5263 /* Haswell doesn't have any port selection bits for the panel
5264 * power sequencer any more. */
Imre Deakbc7d38a2013-05-16 14:40:36 +03005265 if (IS_VALLEYVIEW(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005266 port_sel = PANEL_PORT_SELECT_VLV(port);
Imre Deakbc7d38a2013-05-16 14:40:36 +03005267 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005268 if (port == PORT_A)
Jani Nikulaa24c1442013-09-05 16:44:46 +03005269 port_sel = PANEL_PORT_SELECT_DPA;
Daniel Vetter67a54562012-10-20 20:57:45 +02005270 else
Jani Nikulaa24c1442013-09-05 16:44:46 +03005271 port_sel = PANEL_PORT_SELECT_DPD;
Daniel Vetter67a54562012-10-20 20:57:45 +02005272 }
5273
Jesse Barnes453c5422013-03-28 09:55:41 -07005274 pp_on |= port_sel;
5275
5276 I915_WRITE(pp_on_reg, pp_on);
5277 I915_WRITE(pp_off_reg, pp_off);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305278 if (IS_BROXTON(dev))
5279 I915_WRITE(pp_ctrl_reg, pp_div);
5280 else
5281 I915_WRITE(pp_div_reg, pp_div);
Daniel Vetter67a54562012-10-20 20:57:45 +02005282
Daniel Vetter67a54562012-10-20 20:57:45 +02005283 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07005284 I915_READ(pp_on_reg),
5285 I915_READ(pp_off_reg),
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305286 IS_BROXTON(dev) ?
5287 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
Jesse Barnes453c5422013-03-28 09:55:41 -07005288 I915_READ(pp_div_reg));
Keith Packardc8110e52009-05-06 11:51:10 -07005289}
5290
Vandana Kannanb33a2812015-02-13 15:33:03 +05305291/**
5292 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5293 * @dev: DRM device
5294 * @refresh_rate: RR to be programmed
5295 *
5296 * This function gets called when refresh rate (RR) has to be changed from
5297 * one frequency to another. Switches can be between high and low RR
5298 * supported by the panel or to any other RR based on media playback (in
5299 * this case, RR value needs to be passed from user space).
5300 *
5301 * The caller of this function needs to take a lock on dev_priv->drrs.
5302 */
Vandana Kannan96178ee2015-01-10 02:25:56 +05305303static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305304{
5305 struct drm_i915_private *dev_priv = dev->dev_private;
5306 struct intel_encoder *encoder;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305307 struct intel_digital_port *dig_port = NULL;
5308 struct intel_dp *intel_dp = dev_priv->drrs.dp;
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02005309 struct intel_crtc_state *config = NULL;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305310 struct intel_crtc *intel_crtc = NULL;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305311 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305312
5313 if (refresh_rate <= 0) {
5314 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5315 return;
5316 }
5317
Vandana Kannan96178ee2015-01-10 02:25:56 +05305318 if (intel_dp == NULL) {
5319 DRM_DEBUG_KMS("DRRS not supported.\n");
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305320 return;
5321 }
5322
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005323 /*
Rodrigo Vivie4d59f62014-11-20 02:22:08 -08005324 * FIXME: This needs proper synchronization with psr state for some
5325 * platforms that cannot have PSR and DRRS enabled at the same time.
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005326 */
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305327
Vandana Kannan96178ee2015-01-10 02:25:56 +05305328 dig_port = dp_to_dig_port(intel_dp);
5329 encoder = &dig_port->base;
Ander Conselvan de Oliveira723f9aa2015-03-20 16:18:18 +02005330 intel_crtc = to_intel_crtc(encoder->base.crtc);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305331
5332 if (!intel_crtc) {
5333 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5334 return;
5335 }
5336
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005337 config = intel_crtc->config;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305338
Vandana Kannan96178ee2015-01-10 02:25:56 +05305339 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305340 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5341 return;
5342 }
5343
Vandana Kannan96178ee2015-01-10 02:25:56 +05305344 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5345 refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305346 index = DRRS_LOW_RR;
5347
Vandana Kannan96178ee2015-01-10 02:25:56 +05305348 if (index == dev_priv->drrs.refresh_rate_type) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305349 DRM_DEBUG_KMS(
5350 "DRRS requested for previously set RR...ignoring\n");
5351 return;
5352 }
5353
5354 if (!intel_crtc->active) {
5355 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5356 return;
5357 }
5358
Durgadoss R44395bf2015-02-13 15:33:02 +05305359 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
Vandana Kannana4c30b12015-02-13 15:33:00 +05305360 switch (index) {
5361 case DRRS_HIGH_RR:
5362 intel_dp_set_m_n(intel_crtc, M1_N1);
5363 break;
5364 case DRRS_LOW_RR:
5365 intel_dp_set_m_n(intel_crtc, M2_N2);
5366 break;
5367 case DRRS_MAX_RR:
5368 default:
5369 DRM_ERROR("Unsupported refreshrate type\n");
5370 }
5371 } else if (INTEL_INFO(dev)->gen > 6) {
Ville Syrjälä649636e2015-09-22 19:50:01 +03005372 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5373 u32 val;
Vandana Kannana4c30b12015-02-13 15:33:00 +05305374
Ville Syrjälä649636e2015-09-22 19:50:01 +03005375 val = I915_READ(reg);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305376 if (index > DRRS_HIGH_RR) {
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305377 if (IS_VALLEYVIEW(dev))
5378 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5379 else
5380 val |= PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305381 } else {
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305382 if (IS_VALLEYVIEW(dev))
5383 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5384 else
5385 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305386 }
5387 I915_WRITE(reg, val);
5388 }
5389
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305390 dev_priv->drrs.refresh_rate_type = index;
5391
5392 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5393}
5394
Vandana Kannanb33a2812015-02-13 15:33:03 +05305395/**
5396 * intel_edp_drrs_enable - init drrs struct if supported
5397 * @intel_dp: DP struct
5398 *
5399 * Initializes frontbuffer_bits and drrs.dp
5400 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305401void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5402{
5403 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5404 struct drm_i915_private *dev_priv = dev->dev_private;
5405 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5406 struct drm_crtc *crtc = dig_port->base.base.crtc;
5407 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5408
5409 if (!intel_crtc->config->has_drrs) {
5410 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5411 return;
5412 }
5413
5414 mutex_lock(&dev_priv->drrs.mutex);
5415 if (WARN_ON(dev_priv->drrs.dp)) {
5416 DRM_ERROR("DRRS already enabled\n");
5417 goto unlock;
5418 }
5419
5420 dev_priv->drrs.busy_frontbuffer_bits = 0;
5421
5422 dev_priv->drrs.dp = intel_dp;
5423
5424unlock:
5425 mutex_unlock(&dev_priv->drrs.mutex);
5426}
5427
Vandana Kannanb33a2812015-02-13 15:33:03 +05305428/**
5429 * intel_edp_drrs_disable - Disable DRRS
5430 * @intel_dp: DP struct
5431 *
5432 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305433void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5434{
5435 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5436 struct drm_i915_private *dev_priv = dev->dev_private;
5437 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5438 struct drm_crtc *crtc = dig_port->base.base.crtc;
5439 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5440
5441 if (!intel_crtc->config->has_drrs)
5442 return;
5443
5444 mutex_lock(&dev_priv->drrs.mutex);
5445 if (!dev_priv->drrs.dp) {
5446 mutex_unlock(&dev_priv->drrs.mutex);
5447 return;
5448 }
5449
5450 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5451 intel_dp_set_drrs_state(dev_priv->dev,
5452 intel_dp->attached_connector->panel.
5453 fixed_mode->vrefresh);
5454
5455 dev_priv->drrs.dp = NULL;
5456 mutex_unlock(&dev_priv->drrs.mutex);
5457
5458 cancel_delayed_work_sync(&dev_priv->drrs.work);
5459}
5460
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305461static void intel_edp_drrs_downclock_work(struct work_struct *work)
5462{
5463 struct drm_i915_private *dev_priv =
5464 container_of(work, typeof(*dev_priv), drrs.work.work);
5465 struct intel_dp *intel_dp;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305466
Vandana Kannan96178ee2015-01-10 02:25:56 +05305467 mutex_lock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305468
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305469 intel_dp = dev_priv->drrs.dp;
5470
5471 if (!intel_dp)
5472 goto unlock;
5473
5474 /*
5475 * The delayed work can race with an invalidate hence we need to
5476 * recheck.
5477 */
5478
5479 if (dev_priv->drrs.busy_frontbuffer_bits)
5480 goto unlock;
5481
5482 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5483 intel_dp_set_drrs_state(dev_priv->dev,
5484 intel_dp->attached_connector->panel.
5485 downclock_mode->vrefresh);
5486
5487unlock:
Vandana Kannan96178ee2015-01-10 02:25:56 +05305488 mutex_unlock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305489}
5490
Vandana Kannanb33a2812015-02-13 15:33:03 +05305491/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305492 * intel_edp_drrs_invalidate - Disable Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305493 * @dev: DRM device
5494 * @frontbuffer_bits: frontbuffer plane tracking bits
5495 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305496 * This function gets called everytime rendering on the given planes start.
5497 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
Vandana Kannanb33a2812015-02-13 15:33:03 +05305498 *
5499 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5500 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305501void intel_edp_drrs_invalidate(struct drm_device *dev,
5502 unsigned frontbuffer_bits)
5503{
5504 struct drm_i915_private *dev_priv = dev->dev_private;
5505 struct drm_crtc *crtc;
5506 enum pipe pipe;
5507
Daniel Vetter9da7d692015-04-09 16:44:15 +02005508 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305509 return;
5510
Daniel Vetter88f933a2015-04-09 16:44:16 +02005511 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305512
Vandana Kannana93fad02015-01-10 02:25:59 +05305513 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005514 if (!dev_priv->drrs.dp) {
5515 mutex_unlock(&dev_priv->drrs.mutex);
5516 return;
5517 }
5518
Vandana Kannana93fad02015-01-10 02:25:59 +05305519 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5520 pipe = to_intel_crtc(crtc)->pipe;
5521
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005522 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5523 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5524
Ramalingam C0ddfd202015-06-15 20:50:05 +05305525 /* invalidate means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005526 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Vandana Kannana93fad02015-01-10 02:25:59 +05305527 intel_dp_set_drrs_state(dev_priv->dev,
5528 dev_priv->drrs.dp->attached_connector->panel.
5529 fixed_mode->vrefresh);
Vandana Kannana93fad02015-01-10 02:25:59 +05305530
Vandana Kannana93fad02015-01-10 02:25:59 +05305531 mutex_unlock(&dev_priv->drrs.mutex);
5532}
5533
Vandana Kannanb33a2812015-02-13 15:33:03 +05305534/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305535 * intel_edp_drrs_flush - Restart Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305536 * @dev: DRM device
5537 * @frontbuffer_bits: frontbuffer plane tracking bits
5538 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305539 * This function gets called every time rendering on the given planes has
5540 * completed or flip on a crtc is completed. So DRRS should be upclocked
5541 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5542 * if no other planes are dirty.
Vandana Kannanb33a2812015-02-13 15:33:03 +05305543 *
5544 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5545 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305546void intel_edp_drrs_flush(struct drm_device *dev,
5547 unsigned frontbuffer_bits)
5548{
5549 struct drm_i915_private *dev_priv = dev->dev_private;
5550 struct drm_crtc *crtc;
5551 enum pipe pipe;
5552
Daniel Vetter9da7d692015-04-09 16:44:15 +02005553 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305554 return;
5555
Daniel Vetter88f933a2015-04-09 16:44:16 +02005556 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305557
Vandana Kannana93fad02015-01-10 02:25:59 +05305558 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005559 if (!dev_priv->drrs.dp) {
5560 mutex_unlock(&dev_priv->drrs.mutex);
5561 return;
5562 }
5563
Vandana Kannana93fad02015-01-10 02:25:59 +05305564 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5565 pipe = to_intel_crtc(crtc)->pipe;
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005566
5567 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Vandana Kannana93fad02015-01-10 02:25:59 +05305568 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5569
Ramalingam C0ddfd202015-06-15 20:50:05 +05305570 /* flush means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005571 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Ramalingam C0ddfd202015-06-15 20:50:05 +05305572 intel_dp_set_drrs_state(dev_priv->dev,
5573 dev_priv->drrs.dp->attached_connector->panel.
5574 fixed_mode->vrefresh);
5575
5576 /*
5577 * flush also means no more activity hence schedule downclock, if all
5578 * other fbs are quiescent too
5579 */
5580 if (!dev_priv->drrs.busy_frontbuffer_bits)
Vandana Kannana93fad02015-01-10 02:25:59 +05305581 schedule_delayed_work(&dev_priv->drrs.work,
5582 msecs_to_jiffies(1000));
5583 mutex_unlock(&dev_priv->drrs.mutex);
5584}
5585
Vandana Kannanb33a2812015-02-13 15:33:03 +05305586/**
5587 * DOC: Display Refresh Rate Switching (DRRS)
5588 *
5589 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5590 * which enables swtching between low and high refresh rates,
5591 * dynamically, based on the usage scenario. This feature is applicable
5592 * for internal panels.
5593 *
5594 * Indication that the panel supports DRRS is given by the panel EDID, which
5595 * would list multiple refresh rates for one resolution.
5596 *
5597 * DRRS is of 2 types - static and seamless.
5598 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5599 * (may appear as a blink on screen) and is used in dock-undock scenario.
5600 * Seamless DRRS involves changing RR without any visual effect to the user
5601 * and can be used during normal system usage. This is done by programming
5602 * certain registers.
5603 *
5604 * Support for static/seamless DRRS may be indicated in the VBT based on
5605 * inputs from the panel spec.
5606 *
5607 * DRRS saves power by switching to low RR based on usage scenarios.
5608 *
5609 * eDP DRRS:-
5610 * The implementation is based on frontbuffer tracking implementation.
5611 * When there is a disturbance on the screen triggered by user activity or a
5612 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5613 * When there is no movement on screen, after a timeout of 1 second, a switch
5614 * to low RR is made.
5615 * For integration with frontbuffer tracking code,
5616 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5617 *
5618 * DRRS can be further extended to support other internal panels and also
5619 * the scenario of video playback wherein RR is set based on the rate
5620 * requested by userspace.
5621 */
5622
5623/**
5624 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5625 * @intel_connector: eDP connector
5626 * @fixed_mode: preferred mode of panel
5627 *
5628 * This function is called only once at driver load to initialize basic
5629 * DRRS stuff.
5630 *
5631 * Returns:
5632 * Downclock mode if panel supports it, else return NULL.
5633 * DRRS support is determined by the presence of downclock mode (apart
5634 * from VBT setting).
5635 */
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305636static struct drm_display_mode *
Vandana Kannan96178ee2015-01-10 02:25:56 +05305637intel_dp_drrs_init(struct intel_connector *intel_connector,
5638 struct drm_display_mode *fixed_mode)
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305639{
5640 struct drm_connector *connector = &intel_connector->base;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305641 struct drm_device *dev = connector->dev;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305642 struct drm_i915_private *dev_priv = dev->dev_private;
5643 struct drm_display_mode *downclock_mode = NULL;
5644
Daniel Vetter9da7d692015-04-09 16:44:15 +02005645 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5646 mutex_init(&dev_priv->drrs.mutex);
5647
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305648 if (INTEL_INFO(dev)->gen <= 6) {
5649 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5650 return NULL;
5651 }
5652
5653 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005654 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305655 return NULL;
5656 }
5657
5658 downclock_mode = intel_find_panel_downclock
5659 (dev, fixed_mode, connector);
5660
5661 if (!downclock_mode) {
Ramalingam Ca1d26342015-02-23 17:38:33 +05305662 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305663 return NULL;
5664 }
5665
Vandana Kannan96178ee2015-01-10 02:25:56 +05305666 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305667
Vandana Kannan96178ee2015-01-10 02:25:56 +05305668 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005669 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305670 return downclock_mode;
5671}
5672
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005673static bool intel_edp_init_connector(struct intel_dp *intel_dp,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005674 struct intel_connector *intel_connector)
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005675{
5676 struct drm_connector *connector = &intel_connector->base;
5677 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005678 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5679 struct drm_device *dev = intel_encoder->base.dev;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005680 struct drm_i915_private *dev_priv = dev->dev_private;
5681 struct drm_display_mode *fixed_mode = NULL;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305682 struct drm_display_mode *downclock_mode = NULL;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005683 bool has_dpcd;
5684 struct drm_display_mode *scan;
5685 struct edid *edid;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005686 enum pipe pipe = INVALID_PIPE;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005687
5688 if (!is_edp(intel_dp))
5689 return true;
5690
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02005691 pps_lock(intel_dp);
5692 intel_edp_panel_vdd_sanitize(intel_dp);
5693 pps_unlock(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005694
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005695 /* Cache DPCD and EDID for edp. */
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005696 has_dpcd = intel_dp_get_dpcd(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005697
5698 if (has_dpcd) {
5699 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5700 dev_priv->no_aux_handshake =
5701 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5702 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5703 } else {
5704 /* if this fails, presume the device is a ghost */
5705 DRM_INFO("failed to retrieve link info, disabling eDP\n");
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005706 return false;
5707 }
5708
5709 /* We now know it's not a ghost, init power sequence regs. */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005710 pps_lock(intel_dp);
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005711 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005712 pps_unlock(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005713
Daniel Vetter060c8772014-03-21 23:22:35 +01005714 mutex_lock(&dev->mode_config.mutex);
Jani Nikula0b998362014-03-14 16:51:17 +02005715 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005716 if (edid) {
5717 if (drm_add_edid_modes(connector, edid)) {
5718 drm_mode_connector_update_edid_property(connector,
5719 edid);
5720 drm_edid_to_eld(connector, edid);
5721 } else {
5722 kfree(edid);
5723 edid = ERR_PTR(-EINVAL);
5724 }
5725 } else {
5726 edid = ERR_PTR(-ENOENT);
5727 }
5728 intel_connector->edid = edid;
5729
5730 /* prefer fixed mode from EDID if available */
5731 list_for_each_entry(scan, &connector->probed_modes, head) {
5732 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5733 fixed_mode = drm_mode_duplicate(dev, scan);
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305734 downclock_mode = intel_dp_drrs_init(
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305735 intel_connector, fixed_mode);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005736 break;
5737 }
5738 }
5739
5740 /* fallback to VBT if available for eDP */
5741 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5742 fixed_mode = drm_mode_duplicate(dev,
5743 dev_priv->vbt.lfp_lvds_vbt_mode);
5744 if (fixed_mode)
5745 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5746 }
Daniel Vetter060c8772014-03-21 23:22:35 +01005747 mutex_unlock(&dev->mode_config.mutex);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005748
Clint Taylor01527b32014-07-07 13:01:46 -07005749 if (IS_VALLEYVIEW(dev)) {
5750 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5751 register_reboot_notifier(&intel_dp->edp_notifier);
Ville Syrjälä6517d272014-11-07 11:16:02 +02005752
5753 /*
5754 * Figure out the current pipe for the initial backlight setup.
5755 * If the current pipe isn't valid, try the PPS pipe, and if that
5756 * fails just assume pipe A.
5757 */
5758 if (IS_CHERRYVIEW(dev))
5759 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5760 else
5761 pipe = PORT_TO_PIPE(intel_dp->DP);
5762
5763 if (pipe != PIPE_A && pipe != PIPE_B)
5764 pipe = intel_dp->pps_pipe;
5765
5766 if (pipe != PIPE_A && pipe != PIPE_B)
5767 pipe = PIPE_A;
5768
5769 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5770 pipe_name(pipe));
Clint Taylor01527b32014-07-07 13:01:46 -07005771 }
5772
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305773 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
Jani Nikula5507fae2015-09-14 14:03:48 +03005774 intel_connector->panel.backlight.power = intel_edp_backlight_power;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005775 intel_panel_setup_backlight(connector, pipe);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005776
5777 return true;
5778}
5779
Paulo Zanoni16c25532013-06-12 17:27:25 -03005780bool
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005781intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5782 struct intel_connector *intel_connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005783{
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005784 struct drm_connector *connector = &intel_connector->base;
5785 struct intel_dp *intel_dp = &intel_dig_port->dp;
5786 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5787 struct drm_device *dev = intel_encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005788 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni174edf12012-10-26 19:05:50 -02005789 enum port port = intel_dig_port->port;
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02005790 int type, ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005791
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005792 intel_dp->pps_pipe = INVALID_PIPE;
5793
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005794 /* intel_dp vfuncs */
Damien Lespiaub6b5e382014-01-20 16:00:59 +00005795 if (INTEL_INFO(dev)->gen >= 9)
5796 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5797 else if (IS_VALLEYVIEW(dev))
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005798 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5799 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5800 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5801 else if (HAS_PCH_SPLIT(dev))
5802 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5803 else
5804 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5805
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +00005806 if (INTEL_INFO(dev)->gen >= 9)
5807 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5808 else
5809 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
Damien Lespiau153b1102014-01-21 13:37:15 +00005810
Ander Conselvan de Oliveiraad642172015-10-23 13:01:49 +03005811 if (HAS_DDI(dev))
5812 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5813
Daniel Vetter07679352012-09-06 22:15:42 +02005814 /* Preserve the current hw state. */
5815 intel_dp->DP = I915_READ(intel_dp->output_reg);
Jani Nikuladd06f902012-10-19 14:51:50 +03005816 intel_dp->attached_connector = intel_connector;
Chris Wilson3d3dc142011-02-12 10:33:12 +00005817
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005818 if (intel_dp_is_edp(dev, port))
Gajanan Bhat19c03922012-09-27 19:13:07 +05305819 type = DRM_MODE_CONNECTOR_eDP;
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005820 else
5821 type = DRM_MODE_CONNECTOR_DisplayPort;
Adam Jacksonb3295302010-07-16 14:46:28 -04005822
Imre Deakf7d24902013-05-08 13:14:05 +03005823 /*
5824 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5825 * for DP the encoder type can be set by the caller to
5826 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5827 */
5828 if (type == DRM_MODE_CONNECTOR_eDP)
5829 intel_encoder->type = INTEL_OUTPUT_EDP;
5830
Ville Syrjäläc17ed5b2014-10-16 21:27:27 +03005831 /* eDP only on port B and/or C on vlv/chv */
5832 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5833 port != PORT_B && port != PORT_C))
5834 return false;
5835
Imre Deake7281ea2013-05-08 13:14:08 +03005836 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5837 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5838 port_name(port));
5839
Adam Jacksonb3295302010-07-16 14:46:28 -04005840 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005841 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5842
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005843 connector->interlace_allowed = true;
5844 connector->doublescan_allowed = 0;
Ma Lingf8aed702009-08-24 13:50:24 +08005845
Daniel Vetter66a92782012-07-12 20:08:18 +02005846 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
Daniel Vetter4be73782014-01-17 14:39:48 +01005847 edp_panel_vdd_work);
Zhenyu Wang6251ec02010-01-12 05:38:32 +08005848
Chris Wilsondf0e9242010-09-09 16:20:55 +01005849 intel_connector_attach_encoder(intel_connector, intel_encoder);
Thomas Wood34ea3d32014-05-29 16:57:41 +01005850 drm_connector_register(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005851
Paulo Zanoniaffa9352012-11-23 15:30:39 -02005852 if (HAS_DDI(dev))
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005853 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5854 else
5855 intel_connector->get_hw_state = intel_connector_get_hw_state;
Imre Deak80f65de2014-02-11 17:12:49 +02005856 intel_connector->unregister = intel_dp_connector_unregister;
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005857
Jani Nikula0b998362014-03-14 16:51:17 +02005858 /* Set up the hotplug pin. */
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005859 switch (port) {
5860 case PORT_A:
Egbert Eich1d843f92013-02-25 12:06:49 -05005861 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005862 break;
5863 case PORT_B:
Egbert Eich1d843f92013-02-25 12:06:49 -05005864 intel_encoder->hpd_pin = HPD_PORT_B;
Jani Nikulae87a0052015-10-20 15:22:02 +03005865 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
Sonika Jindalcf1d5882015-08-10 10:35:36 +05305866 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005867 break;
5868 case PORT_C:
Egbert Eich1d843f92013-02-25 12:06:49 -05005869 intel_encoder->hpd_pin = HPD_PORT_C;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005870 break;
5871 case PORT_D:
Egbert Eich1d843f92013-02-25 12:06:49 -05005872 intel_encoder->hpd_pin = HPD_PORT_D;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005873 break;
Xiong Zhang26951ca2015-08-17 15:55:50 +08005874 case PORT_E:
5875 intel_encoder->hpd_pin = HPD_PORT_E;
5876 break;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005877 default:
Damien Lespiauad1c0b12013-03-07 15:30:28 +00005878 BUG();
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08005879 }
5880
Imre Deakdada1a92014-01-29 13:25:41 +02005881 if (is_edp(intel_dp)) {
Ville Syrjälä773538e82014-09-04 14:54:56 +03005882 pps_lock(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005883 intel_dp_init_panel_power_timestamps(intel_dp);
5884 if (IS_VALLEYVIEW(dev))
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005885 vlv_initial_power_sequencer_setup(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005886 else
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005887 intel_dp_init_panel_power_sequencer(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005888 pps_unlock(intel_dp);
Imre Deakdada1a92014-01-29 13:25:41 +02005889 }
Paulo Zanoni0095e6d2013-12-19 14:29:39 -02005890
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02005891 ret = intel_dp_aux_init(intel_dp, intel_connector);
5892 if (ret)
5893 goto fail;
Dave Airliec1f05262012-08-30 11:06:18 +10005894
Dave Airlie0e32b392014-05-02 14:02:48 +10005895 /* init MST on ports that can support it */
Jani Nikula0c9b3712015-05-18 17:10:01 +03005896 if (HAS_DP_MST(dev) &&
5897 (port == PORT_B || port == PORT_C || port == PORT_D))
5898 intel_dp_mst_encoder_init(intel_dig_port,
5899 intel_connector->base.base.id);
Dave Airlie0e32b392014-05-02 14:02:48 +10005900
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005901 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02005902 intel_dp_aux_fini(intel_dp);
5903 intel_dp_mst_encoder_cleanup(intel_dig_port);
5904 goto fail;
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005905 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08005906
Chris Wilsonf6849602010-09-19 09:29:33 +01005907 intel_dp_add_properties(intel_dp, connector);
5908
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005909 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5910 * 0xd. Failure to do so will result in spurious interrupts being
5911 * generated on the port when a cable is not attached.
5912 */
5913 if (IS_G4X(dev) && !IS_GM45(dev)) {
5914 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5915 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5916 }
Paulo Zanoni16c25532013-06-12 17:27:25 -03005917
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005918 i915_debugfs_connector_add(connector);
5919
Paulo Zanoni16c25532013-06-12 17:27:25 -03005920 return true;
Ville Syrjäläa121f4e2015-11-11 20:34:11 +02005921
5922fail:
5923 if (is_edp(intel_dp)) {
5924 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5925 /*
5926 * vdd might still be enabled do to the delayed vdd off.
5927 * Make sure vdd is actually turned off here.
5928 */
5929 pps_lock(intel_dp);
5930 edp_panel_vdd_off_sync(intel_dp);
5931 pps_unlock(intel_dp);
5932 }
5933 drm_connector_unregister(connector);
5934 drm_connector_cleanup(connector);
5935
5936 return false;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005937}
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005938
5939void
5940intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5941{
Dave Airlie13cf5502014-06-18 11:29:35 +10005942 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005943 struct intel_digital_port *intel_dig_port;
5944 struct intel_encoder *intel_encoder;
5945 struct drm_encoder *encoder;
5946 struct intel_connector *intel_connector;
5947
Daniel Vetterb14c5672013-09-19 12:18:32 +02005948 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005949 if (!intel_dig_port)
5950 return;
5951
Ander Conselvan de Oliveira08d9bc92015-04-10 10:59:10 +03005952 intel_connector = intel_connector_alloc();
Sudip Mukherjee11aee0f2015-10-08 19:27:59 +05305953 if (!intel_connector)
5954 goto err_connector_alloc;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005955
5956 intel_encoder = &intel_dig_port->base;
5957 encoder = &intel_encoder->base;
5958
5959 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5960 DRM_MODE_ENCODER_TMDS);
5961
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01005962 intel_encoder->compute_config = intel_dp_compute_config;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005963 intel_encoder->disable = intel_disable_dp;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005964 intel_encoder->get_hw_state = intel_dp_get_hw_state;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07005965 intel_encoder->get_config = intel_dp_get_config;
Imre Deak07f9cd02014-08-18 14:42:45 +03005966 intel_encoder->suspend = intel_dp_encoder_suspend;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005967 if (IS_CHERRYVIEW(dev)) {
Ville Syrjälä9197c882014-04-09 13:29:05 +03005968 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005969 intel_encoder->pre_enable = chv_pre_enable_dp;
5970 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä580d3812014-04-09 13:29:00 +03005971 intel_encoder->post_disable = chv_post_disable_dp;
Ville Syrjäläd6db9952015-07-08 23:45:49 +03005972 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005973 } else if (IS_VALLEYVIEW(dev)) {
Jani Nikulaecff4f32013-09-06 07:38:29 +03005974 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005975 intel_encoder->pre_enable = vlv_pre_enable_dp;
5976 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä49277c32014-03-31 18:21:26 +03005977 intel_encoder->post_disable = vlv_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005978 } else {
Jani Nikulaecff4f32013-09-06 07:38:29 +03005979 intel_encoder->pre_enable = g4x_pre_enable_dp;
5980 intel_encoder->enable = g4x_enable_dp;
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03005981 if (INTEL_INFO(dev)->gen >= 5)
5982 intel_encoder->post_disable = ilk_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005983 }
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005984
Paulo Zanoni174edf12012-10-26 19:05:50 -02005985 intel_dig_port->port = port;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005986 intel_dig_port->dp.output_reg = output_reg;
5987
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005988 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Ville Syrjälä882ec382014-04-28 14:07:43 +03005989 if (IS_CHERRYVIEW(dev)) {
5990 if (port == PORT_D)
5991 intel_encoder->crtc_mask = 1 << 2;
5992 else
5993 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5994 } else {
5995 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5996 }
Ville Syrjäläbc079e82014-03-03 16:15:28 +02005997 intel_encoder->cloneable = 0;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005998
Dave Airlie13cf5502014-06-18 11:29:35 +10005999 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
Jani Nikula5fcece82015-05-27 15:03:42 +03006000 dev_priv->hotplug.irq_port[port] = intel_dig_port;
Dave Airlie13cf5502014-06-18 11:29:35 +10006001
Sudip Mukherjee11aee0f2015-10-08 19:27:59 +05306002 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6003 goto err_init_connector;
6004
6005 return;
6006
6007err_init_connector:
6008 drm_encoder_cleanup(encoder);
6009 kfree(intel_connector);
6010err_connector_alloc:
6011 kfree(intel_dig_port);
6012
6013 return;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006014}
Dave Airlie0e32b392014-05-02 14:02:48 +10006015
6016void intel_dp_mst_suspend(struct drm_device *dev)
6017{
6018 struct drm_i915_private *dev_priv = dev->dev_private;
6019 int i;
6020
6021 /* disable MST */
6022 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006023 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006024 if (!intel_dig_port)
6025 continue;
6026
6027 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6028 if (!intel_dig_port->dp.can_mst)
6029 continue;
6030 if (intel_dig_port->dp.is_mst)
6031 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6032 }
6033 }
6034}
6035
6036void intel_dp_mst_resume(struct drm_device *dev)
6037{
6038 struct drm_i915_private *dev_priv = dev->dev_private;
6039 int i;
6040
6041 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006042 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006043 if (!intel_dig_port)
6044 continue;
6045 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6046 int ret;
6047
6048 if (!intel_dig_port->dp.can_mst)
6049 continue;
6050
6051 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6052 if (ret != 0) {
6053 intel_dp_check_mst_status(&intel_dig_port->dp);
6054 }
6055 }
6056 }
6057}