blob: 0a2e33fbf20dd2902817d85c10aa6b402d40e869 [file] [log] [blame]
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040030#include <linux/export.h>
Clint Taylor01527b32014-07-07 13:01:46 -070031#include <linux/notifier.h>
32#include <linux/reboot.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
Matt Roperc6f95f22015-01-22 16:50:32 -080034#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010035#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070038#include "intel_drv.h"
David Howells760285e2012-10-02 18:01:07 +010039#include <drm/i915_drm.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070040#include "i915_drv.h"
Keith Packarda4fc5ed2009-04-07 16:16:42 -070041
Keith Packarda4fc5ed2009-04-07 16:16:42 -070042#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
Todd Previte559be302015-05-04 07:48:20 -070044/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080050struct dp_link_dpll {
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030051 int clock;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080052 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030056 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080057 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030058 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080059 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030063 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080064 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030065 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080066 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080069static const struct dp_link_dpll vlv_dpll[] = {
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030070 { 162000,
Chon Ming Lee58f6e632013-09-25 15:47:51 +080071 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030072 { 270000,
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080073 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
Chon Ming Leeef9348c2014-04-09 13:28:18 +030076/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030086 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030087 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030088 { 270000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030089 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030090 { 540000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030091 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
Sonika Jindal637a9c62015-05-07 09:52:08 +053093
Sonika Jindal64987fc2015-05-26 17:50:13 +053094static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
Sonika Jindal637a9c62015-05-07 09:52:08 +053096static const int skl_rates[] = { 162000, 216000, 270000,
Ville Syrjäläf4896f12015-03-12 17:10:27 +020097 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
Chon Ming Leeef9348c2014-04-09 13:28:18 +030099
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
Paulo Zanonida63a9f2012-10-26 19:05:46 -0200109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700112}
113
Imre Deak68b4d822013-05-08 13:14:06 +0300114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700115{
Imre Deak68b4d822013-05-08 13:14:06 +0300116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700119}
120
Chris Wilsondf0e9242010-09-09 16:20:55 +0100121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
Paulo Zanonifa90ece2012-10-26 19:05:44 -0200123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Chris Wilsondf0e9242010-09-09 16:20:55 +0100124}
125
Chris Wilsonea5b2132010-08-04 13:50:23 +0100126static void intel_dp_link_down(struct intel_dp *intel_dp);
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +0100128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
Ville Syrjälä093e3f12014-10-16 21:27:33 +0300129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700132
Ville Syrjäläed4e9c12015-03-12 17:10:36 +0200133static int
134intel_dp_max_link_bw(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700135{
Jesse Barnes7183dc22011-07-07 11:10:58 -0700136 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700137
138 switch (max_link_bw) {
139 case DP_LINK_BW_1_62:
140 case DP_LINK_BW_2_7:
Ville Syrjälä1db10e22015-03-12 17:10:32 +0200141 case DP_LINK_BW_5_4:
Imre Deakd4eead52013-07-09 17:05:26 +0300142 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700143 default:
Imre Deakd4eead52013-07-09 17:05:26 +0300144 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
145 max_link_bw);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700146 max_link_bw = DP_LINK_BW_1_62;
147 break;
148 }
149 return max_link_bw;
150}
151
Paulo Zanonieeb63242014-05-06 14:56:50 +0300152static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
153{
154 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
155 struct drm_device *dev = intel_dig_port->base.base.dev;
156 u8 source_max, sink_max;
157
158 source_max = 4;
159 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
160 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
161 source_max = 2;
162
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165 return min(source_max, sink_max);
166}
167
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400168/*
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
171 *
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173 *
174 * 270000 * 1 * 8 / 10 == 216000
175 *
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
180 *
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
183 */
184
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700185static int
Keith Packardc8982612012-01-25 08:16:25 -0800186intel_dp_link_required(int pixel_clock, int bpp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700187{
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400188 return (pixel_clock * bpp + 9) / 10;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700189}
190
191static int
Dave Airliefe27d532010-06-30 11:46:17 +1000192intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193{
194 return (max_link_clock * max_lanes * 8) / 10;
195}
196
Damien Lespiauc19de8e2013-11-28 15:29:18 +0000197static enum drm_mode_status
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700198intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
200{
Chris Wilsondf0e9242010-09-09 16:20:55 +0100201 struct intel_dp *intel_dp = intel_attached_dp(connector);
Jani Nikuladd06f902012-10-19 14:51:50 +0300202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
Daniel Vetter36008362013-03-27 00:44:59 +0100204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700206
Jani Nikuladd06f902012-10-19 14:51:50 +0300207 if (is_edp(intel_dp) && fixed_mode) {
208 if (mode->hdisplay > fixed_mode->hdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100209 return MODE_PANEL;
210
Jani Nikuladd06f902012-10-19 14:51:50 +0300211 if (mode->vdisplay > fixed_mode->vdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100212 return MODE_PANEL;
Daniel Vetter03afc4a2013-04-02 23:42:31 +0200213
214 target_clock = fixed_mode->clock;
Zhao Yakui7de56f42010-07-19 09:43:14 +0100215 }
216
Ville Syrjälä50fec212015-03-12 17:10:34 +0200217 max_link_clock = intel_dp_max_link_rate(intel_dp);
Paulo Zanonieeb63242014-05-06 14:56:50 +0300218 max_lanes = intel_dp_max_lane_count(intel_dp);
Daniel Vetter36008362013-03-27 00:44:59 +0100219
220 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
221 mode_rate = intel_dp_link_required(target_clock, 18);
222
223 if (mode_rate > max_rate)
Daniel Vetterc4867932012-04-10 10:42:36 +0200224 return MODE_CLOCK_HIGH;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700225
226 if (mode->clock < 10000)
227 return MODE_CLOCK_LOW;
228
Daniel Vetter0af78a22012-05-23 11:30:55 +0200229 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
230 return MODE_H_ILLEGAL;
231
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700232 return MODE_OK;
233}
234
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800235uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700236{
237 int i;
238 uint32_t v = 0;
239
240 if (src_bytes > 4)
241 src_bytes = 4;
242 for (i = 0; i < src_bytes; i++)
243 v |= ((uint32_t) src[i]) << ((3-i) * 8);
244 return v;
245}
246
Damien Lespiauc2af70e2015-02-10 19:32:23 +0000247static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700248{
249 int i;
250 if (dst_bytes > 4)
251 dst_bytes = 4;
252 for (i = 0; i < dst_bytes; i++)
253 dst[i] = src >> ((3-i) * 8);
254}
255
Keith Packardfb0f8fb2009-06-11 22:31:31 -0700256/* hrawclock is 1/4 the FSB frequency */
257static int
258intel_hrawclk(struct drm_device *dev)
259{
260 struct drm_i915_private *dev_priv = dev->dev_private;
261 uint32_t clkcfg;
262
Vijay Purushothaman9473c8f2012-09-27 19:13:01 +0530263 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
264 if (IS_VALLEYVIEW(dev))
265 return 200;
266
Keith Packardfb0f8fb2009-06-11 22:31:31 -0700267 clkcfg = I915_READ(CLKCFG);
268 switch (clkcfg & CLKCFG_FSB_MASK) {
269 case CLKCFG_FSB_400:
270 return 100;
271 case CLKCFG_FSB_533:
272 return 133;
273 case CLKCFG_FSB_667:
274 return 166;
275 case CLKCFG_FSB_800:
276 return 200;
277 case CLKCFG_FSB_1067:
278 return 266;
279 case CLKCFG_FSB_1333:
280 return 333;
281 /* these two are just a guess; one of them might be right */
282 case CLKCFG_FSB_1600:
283 case CLKCFG_FSB_1600_ALT:
284 return 400;
285 default:
286 return 133;
287 }
288}
289
Jani Nikulabf13e812013-09-06 07:40:05 +0300290static void
291intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300292 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300293static void
294intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300295 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300296
Ville Syrjälä773538e82014-09-04 14:54:56 +0300297static void pps_lock(struct intel_dp *intel_dp)
298{
299 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300 struct intel_encoder *encoder = &intel_dig_port->base;
301 struct drm_device *dev = encoder->base.dev;
302 struct drm_i915_private *dev_priv = dev->dev_private;
303 enum intel_display_power_domain power_domain;
304
305 /*
306 * See vlv_power_sequencer_reset() why we need
307 * a power domain reference here.
308 */
309 power_domain = intel_display_port_power_domain(encoder);
310 intel_display_power_get(dev_priv, power_domain);
311
312 mutex_lock(&dev_priv->pps_mutex);
313}
314
315static void pps_unlock(struct intel_dp *intel_dp)
316{
317 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
318 struct intel_encoder *encoder = &intel_dig_port->base;
319 struct drm_device *dev = encoder->base.dev;
320 struct drm_i915_private *dev_priv = dev->dev_private;
321 enum intel_display_power_domain power_domain;
322
323 mutex_unlock(&dev_priv->pps_mutex);
324
325 power_domain = intel_display_port_power_domain(encoder);
326 intel_display_power_put(dev_priv, power_domain);
327}
328
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300329static void
330vlv_power_sequencer_kick(struct intel_dp *intel_dp)
331{
332 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
333 struct drm_device *dev = intel_dig_port->base.base.dev;
334 struct drm_i915_private *dev_priv = dev->dev_private;
335 enum pipe pipe = intel_dp->pps_pipe;
Ville Syrjäläd288f652014-10-28 13:20:22 +0200336 bool pll_enabled;
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300337 uint32_t DP;
338
339 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
340 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
341 pipe_name(pipe), port_name(intel_dig_port->port)))
342 return;
343
344 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
345 pipe_name(pipe), port_name(intel_dig_port->port));
346
347 /* Preserve the BIOS-computed detected bit. This is
348 * supposed to be read-only.
349 */
350 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
351 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
352 DP |= DP_PORT_WIDTH(1);
353 DP |= DP_LINK_TRAIN_PAT_1;
354
355 if (IS_CHERRYVIEW(dev))
356 DP |= DP_PIPE_SELECT_CHV(pipe);
357 else if (pipe == PIPE_B)
358 DP |= DP_PIPEB_SELECT;
359
Ville Syrjäläd288f652014-10-28 13:20:22 +0200360 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
361
362 /*
363 * The DPLL for the pipe must be enabled for this to work.
364 * So enable temporarily it if it's not already enabled.
365 */
366 if (!pll_enabled)
367 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
368 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
369
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300370 /*
371 * Similar magic as in intel_dp_enable_port().
372 * We _must_ do this port enable + disable trick
373 * to make this power seqeuencer lock onto the port.
374 * Otherwise even VDD force bit won't work.
375 */
376 I915_WRITE(intel_dp->output_reg, DP);
377 POSTING_READ(intel_dp->output_reg);
378
379 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
380 POSTING_READ(intel_dp->output_reg);
381
382 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200384
385 if (!pll_enabled)
386 vlv_force_pll_off(dev, pipe);
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300387}
388
Jani Nikulabf13e812013-09-06 07:40:05 +0300389static enum pipe
390vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
391{
392 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300393 struct drm_device *dev = intel_dig_port->base.base.dev;
394 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300395 struct intel_encoder *encoder;
396 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300397 enum pipe pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300398
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300399 lockdep_assert_held(&dev_priv->pps_mutex);
400
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300401 /* We should never land here with regular DP ports */
402 WARN_ON(!is_edp(intel_dp));
403
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300404 if (intel_dp->pps_pipe != INVALID_PIPE)
405 return intel_dp->pps_pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300406
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300407 /*
408 * We don't have power sequencer currently.
409 * Pick one that's not used by other ports.
410 */
411 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
412 base.head) {
413 struct intel_dp *tmp;
414
415 if (encoder->type != INTEL_OUTPUT_EDP)
416 continue;
417
418 tmp = enc_to_intel_dp(&encoder->base);
419
420 if (tmp->pps_pipe != INVALID_PIPE)
421 pipes &= ~(1 << tmp->pps_pipe);
422 }
423
424 /*
425 * Didn't find one. This should not happen since there
426 * are two power sequencers and up to two eDP ports.
427 */
428 if (WARN_ON(pipes == 0))
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300429 pipe = PIPE_A;
430 else
431 pipe = ffs(pipes) - 1;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300432
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300433 vlv_steal_power_sequencer(dev, pipe);
434 intel_dp->pps_pipe = pipe;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300435
436 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
437 pipe_name(intel_dp->pps_pipe),
438 port_name(intel_dig_port->port));
439
440 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300441 intel_dp_init_panel_power_sequencer(dev, intel_dp);
442 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300443
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300444 /*
445 * Even vdd force doesn't work until we've made
446 * the power sequencer lock in on the port.
447 */
448 vlv_power_sequencer_kick(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300449
450 return intel_dp->pps_pipe;
451}
452
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300453typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
454 enum pipe pipe);
455
456static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
457 enum pipe pipe)
458{
459 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
460}
461
462static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
463 enum pipe pipe)
464{
465 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
466}
467
468static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
469 enum pipe pipe)
470{
471 return true;
472}
473
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300474static enum pipe
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300475vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
476 enum port port,
477 vlv_pipe_check pipe_check)
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300478{
Jani Nikulabf13e812013-09-06 07:40:05 +0300479 enum pipe pipe;
480
Jani Nikulabf13e812013-09-06 07:40:05 +0300481 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
482 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
483 PANEL_PORT_SELECT_MASK;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300484
485 if (port_sel != PANEL_PORT_SELECT_VLV(port))
486 continue;
487
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300488 if (!pipe_check(dev_priv, pipe))
489 continue;
490
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300491 return pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300492 }
493
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300494 return INVALID_PIPE;
495}
496
497static void
498vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
499{
500 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
501 struct drm_device *dev = intel_dig_port->base.base.dev;
502 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300503 enum port port = intel_dig_port->port;
504
505 lockdep_assert_held(&dev_priv->pps_mutex);
506
507 /* try to find a pipe with this port selected */
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300508 /* first pick one where the panel is on */
509 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510 vlv_pipe_has_pp_on);
511 /* didn't find one? pick one where vdd is on */
512 if (intel_dp->pps_pipe == INVALID_PIPE)
513 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
514 vlv_pipe_has_vdd_on);
515 /* didn't find one? pick one with just the correct port */
516 if (intel_dp->pps_pipe == INVALID_PIPE)
517 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
518 vlv_pipe_any);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300519
520 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
521 if (intel_dp->pps_pipe == INVALID_PIPE) {
522 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
523 port_name(port));
524 return;
525 }
526
527 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
528 port_name(port), pipe_name(intel_dp->pps_pipe));
529
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300530 intel_dp_init_panel_power_sequencer(dev, intel_dp);
531 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300532}
533
Ville Syrjälä773538e82014-09-04 14:54:56 +0300534void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
535{
536 struct drm_device *dev = dev_priv->dev;
537 struct intel_encoder *encoder;
538
539 if (WARN_ON(!IS_VALLEYVIEW(dev)))
540 return;
541
542 /*
543 * We can't grab pps_mutex here due to deadlock with power_domain
544 * mutex when power_domain functions are called while holding pps_mutex.
545 * That also means that in order to use pps_pipe the code needs to
546 * hold both a power domain reference and pps_mutex, and the power domain
547 * reference get/put must be done while _not_ holding pps_mutex.
548 * pps_{lock,unlock}() do these steps in the correct order, so one
549 * should use them always.
550 */
551
552 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
553 struct intel_dp *intel_dp;
554
555 if (encoder->type != INTEL_OUTPUT_EDP)
556 continue;
557
558 intel_dp = enc_to_intel_dp(&encoder->base);
559 intel_dp->pps_pipe = INVALID_PIPE;
560 }
Jani Nikulabf13e812013-09-06 07:40:05 +0300561}
562
563static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
564{
565 struct drm_device *dev = intel_dp_to_dev(intel_dp);
566
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530567 if (IS_BROXTON(dev))
568 return BXT_PP_CONTROL(0);
569 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300570 return PCH_PP_CONTROL;
571 else
572 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
573}
574
575static u32 _pp_stat_reg(struct intel_dp *intel_dp)
576{
577 struct drm_device *dev = intel_dp_to_dev(intel_dp);
578
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530579 if (IS_BROXTON(dev))
580 return BXT_PP_STATUS(0);
581 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300582 return PCH_PP_STATUS;
583 else
584 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
585}
586
Clint Taylor01527b32014-07-07 13:01:46 -0700587/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
588 This function only applicable when panel PM state is not to be tracked */
589static int edp_notify_handler(struct notifier_block *this, unsigned long code,
590 void *unused)
591{
592 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
593 edp_notifier);
594 struct drm_device *dev = intel_dp_to_dev(intel_dp);
595 struct drm_i915_private *dev_priv = dev->dev_private;
596 u32 pp_div;
597 u32 pp_ctrl_reg, pp_div_reg;
Clint Taylor01527b32014-07-07 13:01:46 -0700598
599 if (!is_edp(intel_dp) || code != SYS_RESTART)
600 return 0;
601
Ville Syrjälä773538e82014-09-04 14:54:56 +0300602 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300603
Clint Taylor01527b32014-07-07 13:01:46 -0700604 if (IS_VALLEYVIEW(dev)) {
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300605 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
606
Clint Taylor01527b32014-07-07 13:01:46 -0700607 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
608 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
609 pp_div = I915_READ(pp_div_reg);
610 pp_div &= PP_REFERENCE_DIVIDER_MASK;
611
612 /* 0x1F write to PP_DIV_REG sets max cycle delay */
613 I915_WRITE(pp_div_reg, pp_div | 0x1F);
614 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
615 msleep(intel_dp->panel_power_cycle_delay);
616 }
617
Ville Syrjälä773538e82014-09-04 14:54:56 +0300618 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300619
Clint Taylor01527b32014-07-07 13:01:46 -0700620 return 0;
621}
622
Daniel Vetter4be73782014-01-17 14:39:48 +0100623static bool edp_have_panel_power(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700624{
Paulo Zanoni30add222012-10-26 19:05:45 -0200625 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700626 struct drm_i915_private *dev_priv = dev->dev_private;
627
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300628 lockdep_assert_held(&dev_priv->pps_mutex);
629
Ville Syrjälä9a423562014-10-16 21:29:48 +0300630 if (IS_VALLEYVIEW(dev) &&
631 intel_dp->pps_pipe == INVALID_PIPE)
632 return false;
633
Jani Nikulabf13e812013-09-06 07:40:05 +0300634 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
Keith Packardebf33b12011-09-29 15:53:27 -0700635}
636
Daniel Vetter4be73782014-01-17 14:39:48 +0100637static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700638{
Paulo Zanoni30add222012-10-26 19:05:45 -0200639 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700640 struct drm_i915_private *dev_priv = dev->dev_private;
641
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300642 lockdep_assert_held(&dev_priv->pps_mutex);
643
Ville Syrjälä9a423562014-10-16 21:29:48 +0300644 if (IS_VALLEYVIEW(dev) &&
645 intel_dp->pps_pipe == INVALID_PIPE)
646 return false;
647
Ville Syrjälä773538e82014-09-04 14:54:56 +0300648 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -0700649}
650
Keith Packard9b984da2011-09-19 13:54:47 -0700651static void
652intel_dp_check_edp(struct intel_dp *intel_dp)
653{
Paulo Zanoni30add222012-10-26 19:05:45 -0200654 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard9b984da2011-09-19 13:54:47 -0700655 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packardebf33b12011-09-29 15:53:27 -0700656
Keith Packard9b984da2011-09-19 13:54:47 -0700657 if (!is_edp(intel_dp))
658 return;
Jesse Barnes453c5422013-03-28 09:55:41 -0700659
Daniel Vetter4be73782014-01-17 14:39:48 +0100660 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
Keith Packard9b984da2011-09-19 13:54:47 -0700661 WARN(1, "eDP powered off while attempting aux channel communication.\n");
662 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
Jani Nikulabf13e812013-09-06 07:40:05 +0300663 I915_READ(_pp_stat_reg(intel_dp)),
664 I915_READ(_pp_ctrl_reg(intel_dp)));
Keith Packard9b984da2011-09-19 13:54:47 -0700665 }
666}
667
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100668static uint32_t
669intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
670{
671 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
672 struct drm_device *dev = intel_dig_port->base.base.dev;
673 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni9ed35ab2013-02-18 19:00:25 -0300674 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100675 uint32_t status;
676 bool done;
677
Daniel Vetteref04f002012-12-01 21:03:59 +0100678#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100679 if (has_aux_irq)
Paulo Zanonib18ac462013-02-18 19:00:24 -0300680 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
Imre Deak35987062013-05-21 20:03:20 +0300681 msecs_to_jiffies_timeout(10));
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100682 else
683 done = wait_for_atomic(C, 10) == 0;
684 if (!done)
685 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
686 has_aux_irq);
687#undef C
688
689 return status;
690}
691
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000692static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
693{
694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
695 struct drm_device *dev = intel_dig_port->base.base.dev;
696
697 /*
698 * The clock divider is based off the hrawclk, and would like to run at
699 * 2MHz. So, take the hrawclk value and divide by 2 and use that
700 */
701 return index ? 0 : intel_hrawclk(dev) / 2;
702}
703
704static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705{
706 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707 struct drm_device *dev = intel_dig_port->base.base.dev;
Ville Syrjälä469d4b22015-03-31 14:11:59 +0300708 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000709
710 if (index)
711 return 0;
712
713 if (intel_dig_port->port == PORT_A) {
Ville Syrjälä05024da2015-06-03 15:45:08 +0300714 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
715
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000716 } else {
717 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
718 }
719}
720
721static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300722{
723 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
724 struct drm_device *dev = intel_dig_port->base.base.dev;
725 struct drm_i915_private *dev_priv = dev->dev_private;
726
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000727 if (intel_dig_port->port == PORT_A) {
Chris Wilsonbc866252013-07-21 16:00:03 +0100728 if (index)
729 return 0;
Ville Syrjälä05024da2015-06-03 15:45:08 +0300730 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300731 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
732 /* Workaround for non-ULT HSW */
Chris Wilsonbc866252013-07-21 16:00:03 +0100733 switch (index) {
734 case 0: return 63;
735 case 1: return 72;
736 default: return 0;
737 }
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000738 } else {
Chris Wilsonbc866252013-07-21 16:00:03 +0100739 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300740 }
741}
742
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000743static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
744{
745 return index ? 0 : 100;
746}
747
Damien Lespiaub6b5e382014-01-20 16:00:59 +0000748static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
749{
750 /*
751 * SKL doesn't need us to program the AUX clock divider (Hardware will
752 * derive the clock from CDCLK automatically). We still implement the
753 * get_aux_clock_divider vfunc to plug-in into the existing code.
754 */
755 return index ? 0 : 1;
756}
757
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000758static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
759 bool has_aux_irq,
760 int send_bytes,
761 uint32_t aux_clock_divider)
762{
763 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
764 struct drm_device *dev = intel_dig_port->base.base.dev;
765 uint32_t precharge, timeout;
766
767 if (IS_GEN6(dev))
768 precharge = 3;
769 else
770 precharge = 5;
771
772 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
773 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
774 else
775 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
776
777 return DP_AUX_CH_CTL_SEND_BUSY |
Damien Lespiau788d4432014-01-20 15:52:31 +0000778 DP_AUX_CH_CTL_DONE |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000779 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000780 DP_AUX_CH_CTL_TIME_OUT_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000781 timeout |
Damien Lespiau788d4432014-01-20 15:52:31 +0000782 DP_AUX_CH_CTL_RECEIVE_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000783 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
784 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000785 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000786}
787
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +0000788static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
789 bool has_aux_irq,
790 int send_bytes,
791 uint32_t unused)
792{
793 return DP_AUX_CH_CTL_SEND_BUSY |
794 DP_AUX_CH_CTL_DONE |
795 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
796 DP_AUX_CH_CTL_TIME_OUT_ERROR |
797 DP_AUX_CH_CTL_TIME_OUT_1600us |
798 DP_AUX_CH_CTL_RECEIVE_ERROR |
799 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
800 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
801}
802
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700803static int
Chris Wilsonea5b2132010-08-04 13:50:23 +0100804intel_dp_aux_ch(struct intel_dp *intel_dp,
Daniel Vetterbd9f74a2014-10-02 09:45:35 +0200805 const uint8_t *send, int send_bytes,
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700806 uint8_t *recv, int recv_size)
807{
Paulo Zanoni174edf12012-10-26 19:05:50 -0200808 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
809 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700810 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni9ed35ab2013-02-18 19:00:25 -0300811 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700812 uint32_t ch_data = ch_ctl + 4;
Chris Wilsonbc866252013-07-21 16:00:03 +0100813 uint32_t aux_clock_divider;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100814 int i, ret, recv_bytes;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700815 uint32_t status;
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000816 int try, clock = 0;
Daniel Vetter4e6b7882014-02-07 16:33:20 +0100817 bool has_aux_irq = HAS_AUX_IRQ(dev);
Jani Nikula884f19e2014-03-14 16:51:14 +0200818 bool vdd;
819
Ville Syrjälä773538e82014-09-04 14:54:56 +0300820 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300821
Ville Syrjälä72c35002014-08-18 22:16:00 +0300822 /*
823 * We will be called with VDD already enabled for dpcd/edid/oui reads.
824 * In such cases we want to leave VDD enabled and it's up to upper layers
825 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
826 * ourselves.
827 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300828 vdd = edp_panel_vdd_on(intel_dp);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100829
830 /* dp aux is extremely sensitive to irq latency, hence request the
831 * lowest possible wakeup latency and so prevent the cpu from going into
832 * deep sleep states.
833 */
834 pm_qos_update_request(&dev_priv->pm_qos, 0);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700835
Keith Packard9b984da2011-09-19 13:54:47 -0700836 intel_dp_check_edp(intel_dp);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +0800837
Paulo Zanonic67a4702013-08-19 13:18:09 -0300838 intel_aux_display_runtime_get(dev_priv);
839
Jesse Barnes11bee432011-08-01 15:02:20 -0700840 /* Try to wait for any previous AUX channel activity */
841 for (try = 0; try < 3; try++) {
Daniel Vetteref04f002012-12-01 21:03:59 +0100842 status = I915_READ_NOTRACE(ch_ctl);
Jesse Barnes11bee432011-08-01 15:02:20 -0700843 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
844 break;
845 msleep(1);
846 }
847
848 if (try == 3) {
Mika Kuoppala02196c72015-08-06 16:48:58 +0300849 static u32 last_status = -1;
850 const u32 status = I915_READ(ch_ctl);
851
852 if (status != last_status) {
853 WARN(1, "dp_aux_ch not started status 0x%08x\n",
854 status);
855 last_status = status;
856 }
857
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100858 ret = -EBUSY;
859 goto out;
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100860 }
861
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300862 /* Only 5 data registers! */
863 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
864 ret = -E2BIG;
865 goto out;
866 }
867
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000868 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
Damien Lespiau153b1102014-01-21 13:37:15 +0000869 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
870 has_aux_irq,
871 send_bytes,
872 aux_clock_divider);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000873
Chris Wilsonbc866252013-07-21 16:00:03 +0100874 /* Must try at least 3 times according to DP spec */
875 for (try = 0; try < 5; try++) {
876 /* Load the send data into the aux channel data registers */
877 for (i = 0; i < send_bytes; i += 4)
878 I915_WRITE(ch_data + i,
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800879 intel_dp_pack_aux(send + i,
880 send_bytes - i));
Akshay Joshi0206e352011-08-16 15:34:10 -0400881
Chris Wilsonbc866252013-07-21 16:00:03 +0100882 /* Send the command and wait for it to complete */
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000883 I915_WRITE(ch_ctl, send_ctl);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100884
Chris Wilsonbc866252013-07-21 16:00:03 +0100885 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
Akshay Joshi0206e352011-08-16 15:34:10 -0400886
Chris Wilsonbc866252013-07-21 16:00:03 +0100887 /* Clear done status and any errors */
888 I915_WRITE(ch_ctl,
889 status |
890 DP_AUX_CH_CTL_DONE |
891 DP_AUX_CH_CTL_TIME_OUT_ERROR |
892 DP_AUX_CH_CTL_RECEIVE_ERROR);
Adam Jacksond7e96fe2011-07-26 15:39:46 -0400893
Todd Previte74ebf292015-04-15 08:38:41 -0700894 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
Chris Wilsonbc866252013-07-21 16:00:03 +0100895 continue;
Todd Previte74ebf292015-04-15 08:38:41 -0700896
897 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
898 * 400us delay required for errors and timeouts
899 * Timeout errors from the HW already meet this
900 * requirement so skip to next iteration
901 */
902 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
903 usleep_range(400, 500);
904 continue;
905 }
Chris Wilsonbc866252013-07-21 16:00:03 +0100906 if (status & DP_AUX_CH_CTL_DONE)
Jim Bridee058c942015-05-27 10:21:48 -0700907 goto done;
Chris Wilsonbc866252013-07-21 16:00:03 +0100908 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700909 }
910
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700911 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700912 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100913 ret = -EBUSY;
914 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700915 }
916
Jim Bridee058c942015-05-27 10:21:48 -0700917done:
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700918 /* Check for timeout or receive error.
919 * Timeouts occur when the sink is not connected
920 */
Keith Packarda5b3da52009-06-11 22:30:32 -0700921 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700922 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100923 ret = -EIO;
924 goto out;
Keith Packarda5b3da52009-06-11 22:30:32 -0700925 }
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700926
927 /* Timeouts occur when the device isn't connected, so they're
928 * "normal" -- don't fill the kernel log with these */
Keith Packarda5b3da52009-06-11 22:30:32 -0700929 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
Zhao Yakui28c97732009-10-09 11:39:41 +0800930 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100931 ret = -ETIMEDOUT;
932 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700933 }
934
935 /* Unload any bytes sent back from the other side */
936 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
937 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700938 if (recv_bytes > recv_size)
939 recv_bytes = recv_size;
Akshay Joshi0206e352011-08-16 15:34:10 -0400940
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100941 for (i = 0; i < recv_bytes; i += 4)
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800942 intel_dp_unpack_aux(I915_READ(ch_data + i),
943 recv + i, recv_bytes - i);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700944
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100945 ret = recv_bytes;
946out:
947 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
Paulo Zanonic67a4702013-08-19 13:18:09 -0300948 intel_aux_display_runtime_put(dev_priv);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100949
Jani Nikula884f19e2014-03-14 16:51:14 +0200950 if (vdd)
951 edp_panel_vdd_off(intel_dp, false);
952
Ville Syrjälä773538e82014-09-04 14:54:56 +0300953 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300954
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100955 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700956}
957
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300958#define BARE_ADDRESS_SIZE 3
959#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
Jani Nikula9d1a1032014-03-14 16:51:15 +0200960static ssize_t
961intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700962{
Jani Nikula9d1a1032014-03-14 16:51:15 +0200963 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
964 uint8_t txbuf[20], rxbuf[20];
965 size_t txsize, rxsize;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700966 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700967
Ville Syrjäläd2d9cbb2015-03-19 11:44:06 +0200968 txbuf[0] = (msg->request << 4) |
969 ((msg->address >> 16) & 0xf);
970 txbuf[1] = (msg->address >> 8) & 0xff;
Jani Nikula9d1a1032014-03-14 16:51:15 +0200971 txbuf[2] = msg->address & 0xff;
972 txbuf[3] = msg->size - 1;
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300973
Jani Nikula9d1a1032014-03-14 16:51:15 +0200974 switch (msg->request & ~DP_AUX_I2C_MOT) {
975 case DP_AUX_NATIVE_WRITE:
976 case DP_AUX_I2C_WRITE:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300977 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200978 rxsize = 2; /* 0 or 1 data bytes */
Jani Nikulaf51a44b2014-02-11 11:52:05 +0200979
Jani Nikula9d1a1032014-03-14 16:51:15 +0200980 if (WARN_ON(txsize > 20))
981 return -E2BIG;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700982
Jani Nikula9d1a1032014-03-14 16:51:15 +0200983 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700984
Jani Nikula9d1a1032014-03-14 16:51:15 +0200985 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
986 if (ret > 0) {
987 msg->reply = rxbuf[0] >> 4;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700988
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200989 if (ret > 1) {
990 /* Number of bytes written in a short write. */
991 ret = clamp_t(int, rxbuf[1], 0, msg->size);
992 } else {
993 /* Return payload size. */
994 ret = msg->size;
995 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700996 }
Jani Nikula9d1a1032014-03-14 16:51:15 +0200997 break;
998
999 case DP_AUX_NATIVE_READ:
1000 case DP_AUX_I2C_READ:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +03001001 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
Jani Nikula9d1a1032014-03-14 16:51:15 +02001002 rxsize = msg->size + 1;
1003
1004 if (WARN_ON(rxsize > 20))
1005 return -E2BIG;
1006
1007 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1008 if (ret > 0) {
1009 msg->reply = rxbuf[0] >> 4;
1010 /*
1011 * Assume happy day, and copy the data. The caller is
1012 * expected to check msg->reply before touching it.
1013 *
1014 * Return payload size.
1015 */
1016 ret--;
1017 memcpy(msg->buffer, rxbuf + 1, ret);
1018 }
1019 break;
1020
1021 default:
1022 ret = -EINVAL;
1023 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001024 }
Jani Nikulaf51a44b2014-02-11 11:52:05 +02001025
Jani Nikula9d1a1032014-03-14 16:51:15 +02001026 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001027}
1028
Jani Nikula9d1a1032014-03-14 16:51:15 +02001029static void
1030intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001031{
Jani Nikula9d1a1032014-03-14 16:51:15 +02001032 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001033 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikula33ad6622014-03-14 16:51:16 +02001034 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1035 enum port port = intel_dig_port->port;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001036 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
Jani Nikula0b998362014-03-14 16:51:17 +02001037 const char *name = NULL;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001038 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
Dave Airlieab2c0672009-12-04 10:55:24 +10001039 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001040
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001041 /* On SKL we don't have Aux for port E so we rely on VBT to set
1042 * a proper alternate aux channel.
1043 */
1044 if (IS_SKYLAKE(dev) && port == PORT_E) {
1045 switch (info->alternate_aux_channel) {
1046 case DP_AUX_B:
1047 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1048 break;
1049 case DP_AUX_C:
1050 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1051 break;
1052 case DP_AUX_D:
1053 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1054 break;
1055 case DP_AUX_A:
1056 default:
1057 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1058 }
1059 }
1060
Jani Nikula33ad6622014-03-14 16:51:16 +02001061 switch (port) {
1062 case PORT_A:
1063 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001064 name = "DPDDC-A";
Dave Airlieab2c0672009-12-04 10:55:24 +10001065 break;
Jani Nikula33ad6622014-03-14 16:51:16 +02001066 case PORT_B:
1067 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001068 name = "DPDDC-B";
Jani Nikula33ad6622014-03-14 16:51:16 +02001069 break;
1070 case PORT_C:
1071 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001072 name = "DPDDC-C";
Jani Nikula33ad6622014-03-14 16:51:16 +02001073 break;
1074 case PORT_D:
1075 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001076 name = "DPDDC-D";
Dave Airlieab2c0672009-12-04 10:55:24 +10001077 break;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001078 case PORT_E:
1079 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1080 name = "DPDDC-E";
1081 break;
Dave Airlieab2c0672009-12-04 10:55:24 +10001082 default:
Jani Nikula33ad6622014-03-14 16:51:16 +02001083 BUG();
Dave Airlieab2c0672009-12-04 10:55:24 +10001084 }
1085
Damien Lespiau1b1aad72013-12-03 13:56:29 +00001086 /*
1087 * The AUX_CTL register is usually DP_CTL + 0x10.
1088 *
1089 * On Haswell and Broadwell though:
1090 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1091 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1092 *
1093 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1094 */
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001095 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
Jani Nikula33ad6622014-03-14 16:51:16 +02001096 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
David Flynn8316f332010-12-08 16:10:21 +00001097
Jani Nikula0b998362014-03-14 16:51:17 +02001098 intel_dp->aux.name = name;
Jani Nikula9d1a1032014-03-14 16:51:15 +02001099 intel_dp->aux.dev = dev->dev;
1100 intel_dp->aux.transfer = intel_dp_aux_transfer;
David Flynn8316f332010-12-08 16:10:21 +00001101
Jani Nikula0b998362014-03-14 16:51:17 +02001102 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1103 connector->base.kdev->kobj.name);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001104
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001105 ret = drm_dp_aux_register(&intel_dp->aux);
Jani Nikula0b998362014-03-14 16:51:17 +02001106 if (ret < 0) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001107 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
Jani Nikula0b998362014-03-14 16:51:17 +02001108 name, ret);
1109 return;
Dave Airlieab2c0672009-12-04 10:55:24 +10001110 }
David Flynn8316f332010-12-08 16:10:21 +00001111
Jani Nikula0b998362014-03-14 16:51:17 +02001112 ret = sysfs_create_link(&connector->base.kdev->kobj,
1113 &intel_dp->aux.ddc.dev.kobj,
1114 intel_dp->aux.ddc.dev.kobj.name);
1115 if (ret < 0) {
1116 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001117 drm_dp_aux_unregister(&intel_dp->aux);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001118 }
1119}
1120
Imre Deak80f65de2014-02-11 17:12:49 +02001121static void
1122intel_dp_connector_unregister(struct intel_connector *intel_connector)
1123{
1124 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1125
Dave Airlie0e32b392014-05-02 14:02:48 +10001126 if (!intel_connector->mst_port)
1127 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1128 intel_dp->aux.ddc.dev.kobj.name);
Imre Deak80f65de2014-02-11 17:12:49 +02001129 intel_connector_unregister(intel_connector);
1130}
1131
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001132static void
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001133skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
Damien Lespiau5416d872014-11-14 17:24:33 +00001134{
1135 u32 ctrl1;
1136
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03001137 memset(&pipe_config->dpll_hw_state, 0,
1138 sizeof(pipe_config->dpll_hw_state));
1139
Damien Lespiau5416d872014-11-14 17:24:33 +00001140 pipe_config->ddi_pll_sel = SKL_DPLL0;
1141 pipe_config->dpll_hw_state.cfgcr1 = 0;
1142 pipe_config->dpll_hw_state.cfgcr2 = 0;
1143
1144 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001145 switch (pipe_config->port_clock / 2) {
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301146 case 81000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001147 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
Damien Lespiau5416d872014-11-14 17:24:33 +00001148 SKL_DPLL0);
1149 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301150 case 135000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001151 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
Damien Lespiau5416d872014-11-14 17:24:33 +00001152 SKL_DPLL0);
1153 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301154 case 270000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001155 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
Damien Lespiau5416d872014-11-14 17:24:33 +00001156 SKL_DPLL0);
1157 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301158 case 162000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001159 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301160 SKL_DPLL0);
1161 break;
1162 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1163 results in CDCLK change. Need to handle the change of CDCLK by
1164 disabling pipes and re-enabling them */
1165 case 108000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001166 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301167 SKL_DPLL0);
1168 break;
1169 case 216000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001170 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301171 SKL_DPLL0);
1172 break;
1173
Damien Lespiau5416d872014-11-14 17:24:33 +00001174 }
1175 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1176}
1177
Ander Conselvan de Oliveira6fa2d192015-08-31 11:23:28 +03001178void
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001179hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
Daniel Vetter0e503382014-07-04 11:26:04 -03001180{
Ander Conselvan de Oliveiraee46f3c72015-06-30 16:10:38 +03001181 memset(&pipe_config->dpll_hw_state, 0,
1182 sizeof(pipe_config->dpll_hw_state));
1183
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001184 switch (pipe_config->port_clock / 2) {
1185 case 81000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001186 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1187 break;
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001188 case 135000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001189 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1190 break;
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001191 case 270000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001192 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1193 break;
1194 }
1195}
1196
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301197static int
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001198intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301199{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001200 if (intel_dp->num_sink_rates) {
1201 *sink_rates = intel_dp->sink_rates;
1202 return intel_dp->num_sink_rates;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301203 }
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001204
1205 *sink_rates = default_rates;
1206
1207 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301208}
1209
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301210static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1211{
1212 /* WaDisableHBR2:skl */
1213 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1214 return false;
1215
1216 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1217 (INTEL_INFO(dev)->gen >= 9))
1218 return true;
1219 else
1220 return false;
1221}
1222
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301223static int
Ville Syrjälä1db10e22015-03-12 17:10:32 +02001224intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301225{
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301226 int size;
1227
Sonika Jindal64987fc2015-05-26 17:50:13 +05301228 if (IS_BROXTON(dev)) {
1229 *source_rates = bxt_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301230 size = ARRAY_SIZE(bxt_rates);
Sonika Jindal64987fc2015-05-26 17:50:13 +05301231 } else if (IS_SKYLAKE(dev)) {
Sonika Jindal637a9c62015-05-07 09:52:08 +05301232 *source_rates = skl_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301233 size = ARRAY_SIZE(skl_rates);
1234 } else {
1235 *source_rates = default_rates;
1236 size = ARRAY_SIZE(default_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301237 }
Ville Syrjälä636280b2015-03-12 17:10:29 +02001238
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301239 /* This depends on the fact that 5.4 is last value in the array */
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301240 if (!intel_dp_source_supports_hbr2(dev))
1241 size--;
1242
1243 return size;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301244}
1245
Daniel Vetter0e503382014-07-04 11:26:04 -03001246static void
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001247intel_dp_set_clock(struct intel_encoder *encoder,
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001248 struct intel_crtc_state *pipe_config)
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001249{
1250 struct drm_device *dev = encoder->base.dev;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001251 const struct dp_link_dpll *divisor = NULL;
1252 int i, count = 0;
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001253
1254 if (IS_G4X(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001255 divisor = gen4_dpll;
1256 count = ARRAY_SIZE(gen4_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001257 } else if (HAS_PCH_SPLIT(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001258 divisor = pch_dpll;
1259 count = ARRAY_SIZE(pch_dpll);
Chon Ming Leeef9348c2014-04-09 13:28:18 +03001260 } else if (IS_CHERRYVIEW(dev)) {
1261 divisor = chv_dpll;
1262 count = ARRAY_SIZE(chv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001263 } else if (IS_VALLEYVIEW(dev)) {
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +08001264 divisor = vlv_dpll;
1265 count = ARRAY_SIZE(vlv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001266 }
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001267
1268 if (divisor && count) {
1269 for (i = 0; i < count; i++) {
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001270 if (pipe_config->port_clock == divisor[i].clock) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001271 pipe_config->dpll = divisor[i].dpll;
1272 pipe_config->clock_set = true;
1273 break;
1274 }
1275 }
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001276 }
1277}
1278
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001279static int intersect_rates(const int *source_rates, int source_len,
1280 const int *sink_rates, int sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001281 int *common_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301282{
1283 int i = 0, j = 0, k = 0;
1284
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301285 while (i < source_len && j < sink_len) {
1286 if (source_rates[i] == sink_rates[j]) {
Ville Syrjäläe6bda3e2015-03-12 17:10:37 +02001287 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1288 return k;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001289 common_rates[k] = source_rates[i];
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301290 ++k;
1291 ++i;
1292 ++j;
1293 } else if (source_rates[i] < sink_rates[j]) {
1294 ++i;
1295 } else {
1296 ++j;
1297 }
1298 }
1299 return k;
1300}
1301
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001302static int intel_dp_common_rates(struct intel_dp *intel_dp,
1303 int *common_rates)
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001304{
1305 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1306 const int *source_rates, *sink_rates;
1307 int source_len, sink_len;
1308
1309 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1310 source_len = intel_dp_source_rates(dev, &source_rates);
1311
1312 return intersect_rates(source_rates, source_len,
1313 sink_rates, sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001314 common_rates);
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001315}
1316
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001317static void snprintf_int_array(char *str, size_t len,
1318 const int *array, int nelem)
1319{
1320 int i;
1321
1322 str[0] = '\0';
1323
1324 for (i = 0; i < nelem; i++) {
Jani Nikulab2f505b2015-05-18 16:01:45 +03001325 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001326 if (r >= len)
1327 return;
1328 str += r;
1329 len -= r;
1330 }
1331}
1332
1333static void intel_dp_print_rates(struct intel_dp *intel_dp)
1334{
1335 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1336 const int *source_rates, *sink_rates;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001337 int source_len, sink_len, common_len;
1338 int common_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001339 char str[128]; /* FIXME: too big for stack? */
1340
1341 if ((drm_debug & DRM_UT_KMS) == 0)
1342 return;
1343
1344 source_len = intel_dp_source_rates(dev, &source_rates);
1345 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1346 DRM_DEBUG_KMS("source rates: %s\n", str);
1347
1348 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1349 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1350 DRM_DEBUG_KMS("sink rates: %s\n", str);
1351
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001352 common_len = intel_dp_common_rates(intel_dp, common_rates);
1353 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1354 DRM_DEBUG_KMS("common rates: %s\n", str);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001355}
1356
Ville Syrjäläf4896f12015-03-12 17:10:27 +02001357static int rate_to_index(int find, const int *rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301358{
1359 int i = 0;
1360
1361 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1362 if (find == rates[i])
1363 break;
1364
1365 return i;
1366}
1367
Ville Syrjälä50fec212015-03-12 17:10:34 +02001368int
1369intel_dp_max_link_rate(struct intel_dp *intel_dp)
1370{
1371 int rates[DP_MAX_SUPPORTED_RATES] = {};
1372 int len;
1373
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001374 len = intel_dp_common_rates(intel_dp, rates);
Ville Syrjälä50fec212015-03-12 17:10:34 +02001375 if (WARN_ON(len <= 0))
1376 return 162000;
1377
1378 return rates[rate_to_index(0, rates) - 1];
1379}
1380
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001381int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1382{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001383 return rate_to_index(rate, intel_dp->sink_rates);
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001384}
1385
Paulo Zanoni00c09d72012-10-26 19:05:52 -02001386bool
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001387intel_dp_compute_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02001388 struct intel_crtc_state *pipe_config)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001389{
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001390 struct drm_device *dev = encoder->base.dev;
Daniel Vetter36008362013-03-27 00:44:59 +01001391 struct drm_i915_private *dev_priv = dev->dev_private;
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02001392 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001393 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001394 enum port port = dp_to_dig_port(intel_dp)->port;
Ander Conselvan de Oliveira84556d52015-03-20 16:18:10 +02001395 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
Jani Nikuladd06f902012-10-19 14:51:50 +03001396 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001397 int lane_count, clock;
Jani Nikula56071a22014-05-06 14:56:52 +03001398 int min_lane_count = 1;
Paulo Zanonieeb63242014-05-06 14:56:50 +03001399 int max_lane_count = intel_dp_max_lane_count(intel_dp);
Todd Previte06ea66b2014-01-20 10:19:39 -07001400 /* Conveniently, the link BW constants become indices with a shift...*/
Jani Nikula56071a22014-05-06 14:56:52 +03001401 int min_clock = 0;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301402 int max_clock;
Daniel Vetter083f9562012-04-20 20:23:49 +02001403 int bpp, mode_rate;
Daniel Vetterff9a6752013-06-01 17:16:21 +02001404 int link_avail, link_clock;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001405 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1406 int common_len;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301407
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001408 common_len = intel_dp_common_rates(intel_dp, common_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301409
1410 /* No common link rates between source and sink */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001411 WARN_ON(common_len <= 0);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301412
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001413 max_clock = common_len - 1;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001414
Imre Deakbc7d38a2013-05-16 14:40:36 +03001415 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001416 pipe_config->has_pch_encoder = true;
1417
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001418 pipe_config->has_dp_encoder = true;
Vandana Kannanf769cd22014-08-05 07:51:22 -07001419 pipe_config->has_drrs = false;
Jani Nikula9fcb1702015-05-05 16:32:12 +03001420 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001421
Jani Nikuladd06f902012-10-19 14:51:50 +03001422 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1423 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1424 adjusted_mode);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001425
1426 if (INTEL_INFO(dev)->gen >= 9) {
1427 int ret;
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02001428 ret = skl_update_scaler_crtc(pipe_config);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001429 if (ret)
1430 return ret;
1431 }
1432
Jesse Barnes2dd24552013-04-25 12:55:01 -07001433 if (!HAS_PCH_SPLIT(dev))
1434 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1435 intel_connector->panel.fitting_mode);
1436 else
Jesse Barnesb074cec2013-04-25 12:55:02 -07001437 intel_pch_panel_fitting(intel_crtc, pipe_config,
1438 intel_connector->panel.fitting_mode);
Zhao Yakui0d3a1be2010-07-19 09:43:13 +01001439 }
1440
Daniel Vettercb1793c2012-06-04 18:39:21 +02001441 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
Daniel Vetter0af78a22012-05-23 11:30:55 +02001442 return false;
1443
Daniel Vetter083f9562012-04-20 20:23:49 +02001444 DRM_DEBUG_KMS("DP link computation with max lane count %i "
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301445 "max bw %d pixel clock %iKHz\n",
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001446 max_lane_count, common_rates[max_clock],
Damien Lespiau241bfc32013-09-25 16:45:37 +01001447 adjusted_mode->crtc_clock);
Daniel Vetter083f9562012-04-20 20:23:49 +02001448
Daniel Vetter36008362013-03-27 00:44:59 +01001449 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1450 * bpc in between. */
Daniel Vetter3e7ca982013-06-01 19:45:56 +02001451 bpp = pipe_config->pipe_bpp;
Jani Nikula56071a22014-05-06 14:56:52 +03001452 if (is_edp(intel_dp)) {
Thulasimani,Sivakumar22ce5622015-07-31 11:05:27 +05301453
1454 /* Get bpp from vbt only for panels that dont have bpp in edid */
1455 if (intel_connector->base.display_info.bpc == 0 &&
1456 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
Jani Nikula56071a22014-05-06 14:56:52 +03001457 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1458 dev_priv->vbt.edp_bpp);
1459 bpp = dev_priv->vbt.edp_bpp;
1460 }
1461
Jani Nikula344c5bb2014-09-09 11:25:13 +03001462 /*
1463 * Use the maximum clock and number of lanes the eDP panel
1464 * advertizes being capable of. The panels are generally
1465 * designed to support only a single clock and lane
1466 * configuration, and typically these values correspond to the
1467 * native resolution of the panel.
1468 */
1469 min_lane_count = max_lane_count;
1470 min_clock = max_clock;
Imre Deak79842112013-07-18 17:44:13 +03001471 }
Daniel Vetter657445f2013-05-04 10:09:18 +02001472
Daniel Vetter36008362013-03-27 00:44:59 +01001473 for (; bpp >= 6*3; bpp -= 2*3) {
Damien Lespiau241bfc32013-09-25 16:45:37 +01001474 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1475 bpp);
Daniel Vetterc4867932012-04-10 10:42:36 +02001476
Dave Airliec6930992014-07-14 11:04:39 +10001477 for (clock = min_clock; clock <= max_clock; clock++) {
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301478 for (lane_count = min_lane_count;
1479 lane_count <= max_lane_count;
1480 lane_count <<= 1) {
1481
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001482 link_clock = common_rates[clock];
Daniel Vetter36008362013-03-27 00:44:59 +01001483 link_avail = intel_dp_max_data_rate(link_clock,
1484 lane_count);
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001485
Daniel Vetter36008362013-03-27 00:44:59 +01001486 if (mode_rate <= link_avail) {
1487 goto found;
1488 }
1489 }
1490 }
1491 }
1492
1493 return false;
1494
1495found:
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001496 if (intel_dp->color_range_auto) {
1497 /*
1498 * See:
1499 * CEA-861-E - 5.1 Default Encoding Parameters
1500 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1501 */
Thierry Reding18316c82012-12-20 15:41:44 +01001502 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001503 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1504 else
1505 intel_dp->color_range = 0;
1506 }
1507
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001508 if (intel_dp->color_range)
Daniel Vetter50f3b012013-03-27 00:44:56 +01001509 pipe_config->limited_color_range = true;
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001510
Daniel Vetter36008362013-03-27 00:44:59 +01001511 intel_dp->lane_count = lane_count;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301512
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001513 if (intel_dp->num_sink_rates) {
Ville Syrjäläbc27b7d2015-03-12 17:10:35 +02001514 intel_dp->link_bw = 0;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301515 intel_dp->rate_select =
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001516 intel_dp_rate_select(intel_dp, common_rates[clock]);
Ville Syrjäläbc27b7d2015-03-12 17:10:35 +02001517 } else {
1518 intel_dp->link_bw =
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001519 drm_dp_link_rate_to_bw_code(common_rates[clock]);
Ville Syrjäläbc27b7d2015-03-12 17:10:35 +02001520 intel_dp->rate_select = 0;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301521 }
1522
Daniel Vetter657445f2013-05-04 10:09:18 +02001523 pipe_config->pipe_bpp = bpp;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001524 pipe_config->port_clock = common_rates[clock];
Daniel Vetterc4867932012-04-10 10:42:36 +02001525
Daniel Vetter36008362013-03-27 00:44:59 +01001526 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1527 intel_dp->link_bw, intel_dp->lane_count,
Daniel Vetterff9a6752013-06-01 17:16:21 +02001528 pipe_config->port_clock, bpp);
Daniel Vetter36008362013-03-27 00:44:59 +01001529 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1530 mode_rate, link_avail);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001531
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001532 intel_link_compute_m_n(bpp, lane_count,
Damien Lespiau241bfc32013-09-25 16:45:37 +01001533 adjusted_mode->crtc_clock,
1534 pipe_config->port_clock,
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001535 &pipe_config->dp_m_n);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001536
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301537 if (intel_connector->panel.downclock_mode != NULL &&
Vandana Kannan96178ee2015-01-10 02:25:56 +05301538 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
Vandana Kannanf769cd22014-08-05 07:51:22 -07001539 pipe_config->has_drrs = true;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301540 intel_link_compute_m_n(bpp, lane_count,
1541 intel_connector->panel.downclock_mode->clock,
1542 pipe_config->port_clock,
1543 &pipe_config->dp_m2_n2);
1544 }
1545
Damien Lespiau5416d872014-11-14 17:24:33 +00001546 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001547 skl_edp_set_pll_config(pipe_config);
Satheeshakrishna M977bb382014-08-22 09:49:12 +05301548 else if (IS_BROXTON(dev))
1549 /* handled in ddi */;
Damien Lespiau5416d872014-11-14 17:24:33 +00001550 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001551 hsw_dp_set_ddi_pll_sel(pipe_config);
Daniel Vetter0e503382014-07-04 11:26:04 -03001552 else
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001553 intel_dp_set_clock(encoder, pipe_config);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001554
Daniel Vetter36008362013-03-27 00:44:59 +01001555 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001556}
1557
Daniel Vetter7c62a162013-06-01 17:16:20 +02001558static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
Daniel Vetterea9b6002012-11-29 15:59:31 +01001559{
Daniel Vetter7c62a162013-06-01 17:16:20 +02001560 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1561 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1562 struct drm_device *dev = crtc->base.dev;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001563 struct drm_i915_private *dev_priv = dev->dev_private;
1564 u32 dpa_ctl;
1565
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001566 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1567 crtc->config->port_clock);
Daniel Vetterea9b6002012-11-29 15:59:31 +01001568 dpa_ctl = I915_READ(DP_A);
1569 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1570
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001571 if (crtc->config->port_clock == 162000) {
Daniel Vetter1ce17032012-11-29 15:59:32 +01001572 /* For a long time we've carried around a ILK-DevA w/a for the
1573 * 160MHz clock. If we're really unlucky, it's still required.
1574 */
1575 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
Daniel Vetterea9b6002012-11-29 15:59:31 +01001576 dpa_ctl |= DP_PLL_FREQ_160MHZ;
Daniel Vetter7c62a162013-06-01 17:16:20 +02001577 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001578 } else {
1579 dpa_ctl |= DP_PLL_FREQ_270MHZ;
Daniel Vetter7c62a162013-06-01 17:16:20 +02001580 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001581 }
Daniel Vetter1ce17032012-11-29 15:59:32 +01001582
Daniel Vetterea9b6002012-11-29 15:59:31 +01001583 I915_WRITE(DP_A, dpa_ctl);
1584
1585 POSTING_READ(DP_A);
1586 udelay(500);
1587}
1588
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02001589static void intel_dp_prepare(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001590{
Daniel Vetterb934223d2013-07-21 21:37:05 +02001591 struct drm_device *dev = encoder->base.dev;
Keith Packard417e8222011-11-01 19:54:11 -07001592 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001593 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001594 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001595 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001596 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001597
Keith Packard417e8222011-11-01 19:54:11 -07001598 /*
Keith Packard1a2eb462011-11-16 16:26:07 -08001599 * There are four kinds of DP registers:
Keith Packard417e8222011-11-01 19:54:11 -07001600 *
1601 * IBX PCH
Keith Packard1a2eb462011-11-16 16:26:07 -08001602 * SNB CPU
1603 * IVB CPU
Keith Packard417e8222011-11-01 19:54:11 -07001604 * CPT PCH
1605 *
1606 * IBX PCH and CPU are the same for almost everything,
1607 * except that the CPU DP PLL is configured in this
1608 * register
1609 *
1610 * CPT PCH is quite different, having many bits moved
1611 * to the TRANS_DP_CTL register instead. That
1612 * configuration happens (oddly) in ironlake_pch_enable
1613 */
Adam Jackson9c9e7922010-04-05 17:57:59 -04001614
Keith Packard417e8222011-11-01 19:54:11 -07001615 /* Preserve the BIOS-computed detected bit. This is
1616 * supposed to be read-only.
1617 */
1618 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001619
Keith Packard417e8222011-11-01 19:54:11 -07001620 /* Handle DP bits in common between all three register formats */
Keith Packard417e8222011-11-01 19:54:11 -07001621 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
Daniel Vetter17aa6be2013-04-30 14:01:40 +02001622 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001623
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001624 if (crtc->config->has_audio)
Chris Wilsonea5b2132010-08-04 13:50:23 +01001625 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
Paulo Zanoni247d89f2012-10-15 15:51:33 -03001626
Keith Packard417e8222011-11-01 19:54:11 -07001627 /* Split out the IBX/CPU vs CPT settings */
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001628
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001629 if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08001630 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1631 intel_dp->DP |= DP_SYNC_HS_HIGH;
1632 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1633 intel_dp->DP |= DP_SYNC_VS_HIGH;
1634 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1635
Jani Nikula6aba5b62013-10-04 15:08:10 +03001636 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard1a2eb462011-11-16 16:26:07 -08001637 intel_dp->DP |= DP_ENHANCED_FRAMING;
1638
Daniel Vetter7c62a162013-06-01 17:16:20 +02001639 intel_dp->DP |= crtc->pipe << 29;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001640 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001641 u32 trans_dp;
1642
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001643 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001644
1645 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1646 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1647 trans_dp |= TRANS_DP_ENH_FRAMING;
1648 else
1649 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1650 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001651 } else {
Jesse Barnesb2634012013-03-28 09:55:40 -07001652 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001653 intel_dp->DP |= intel_dp->color_range;
Keith Packard417e8222011-11-01 19:54:11 -07001654
1655 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1656 intel_dp->DP |= DP_SYNC_HS_HIGH;
1657 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1658 intel_dp->DP |= DP_SYNC_VS_HIGH;
1659 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1660
Jani Nikula6aba5b62013-10-04 15:08:10 +03001661 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard417e8222011-11-01 19:54:11 -07001662 intel_dp->DP |= DP_ENHANCED_FRAMING;
1663
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001664 if (IS_CHERRYVIEW(dev))
Chon Ming Lee44f37d12014-04-09 13:28:21 +03001665 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001666 else if (crtc->pipe == PIPE_B)
1667 intel_dp->DP |= DP_PIPEB_SELECT;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001668 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001669}
1670
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001671#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1672#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001673
Paulo Zanoni1a5ef5b2013-12-19 14:29:43 -02001674#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1675#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
Keith Packard99ea7122011-11-01 19:57:50 -07001676
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001677#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1678#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001679
Daniel Vetter4be73782014-01-17 14:39:48 +01001680static void wait_panel_status(struct intel_dp *intel_dp,
Keith Packard99ea7122011-11-01 19:57:50 -07001681 u32 mask,
1682 u32 value)
1683{
Paulo Zanoni30add222012-10-26 19:05:45 -02001684 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001685 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07001686 u32 pp_stat_reg, pp_ctrl_reg;
1687
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001688 lockdep_assert_held(&dev_priv->pps_mutex);
1689
Jani Nikulabf13e812013-09-06 07:40:05 +03001690 pp_stat_reg = _pp_stat_reg(intel_dp);
1691 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001692
1693 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001694 mask, value,
1695 I915_READ(pp_stat_reg),
1696 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001697
Jesse Barnes453c5422013-03-28 09:55:41 -07001698 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
Keith Packard99ea7122011-11-01 19:57:50 -07001699 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001700 I915_READ(pp_stat_reg),
1701 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001702 }
Chris Wilson54c136d2013-12-02 09:57:16 +00001703
1704 DRM_DEBUG_KMS("Wait complete\n");
Keith Packard99ea7122011-11-01 19:57:50 -07001705}
1706
Daniel Vetter4be73782014-01-17 14:39:48 +01001707static void wait_panel_on(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001708{
1709 DRM_DEBUG_KMS("Wait for panel power on\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001710 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001711}
1712
Daniel Vetter4be73782014-01-17 14:39:48 +01001713static void wait_panel_off(struct intel_dp *intel_dp)
Keith Packardbd943152011-09-18 23:09:52 -07001714{
Keith Packardbd943152011-09-18 23:09:52 -07001715 DRM_DEBUG_KMS("Wait for panel power off time\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001716 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
Keith Packardbd943152011-09-18 23:09:52 -07001717}
Keith Packardbd943152011-09-18 23:09:52 -07001718
Daniel Vetter4be73782014-01-17 14:39:48 +01001719static void wait_panel_power_cycle(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001720{
1721 DRM_DEBUG_KMS("Wait for panel power cycle\n");
Paulo Zanonidce56b32013-12-19 14:29:40 -02001722
1723 /* When we disable the VDD override bit last we have to do the manual
1724 * wait. */
1725 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1726 intel_dp->panel_power_cycle_delay);
1727
Daniel Vetter4be73782014-01-17 14:39:48 +01001728 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001729}
Keith Packardbd943152011-09-18 23:09:52 -07001730
Daniel Vetter4be73782014-01-17 14:39:48 +01001731static void wait_backlight_on(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001732{
1733 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1734 intel_dp->backlight_on_delay);
1735}
1736
Daniel Vetter4be73782014-01-17 14:39:48 +01001737static void edp_wait_backlight_off(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001738{
1739 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1740 intel_dp->backlight_off_delay);
1741}
Keith Packard99ea7122011-11-01 19:57:50 -07001742
Keith Packard832dd3c2011-11-01 19:34:06 -07001743/* Read the current pp_control value, unlocking the register if it
1744 * is locked
1745 */
1746
Jesse Barnes453c5422013-03-28 09:55:41 -07001747static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
Keith Packard832dd3c2011-11-01 19:34:06 -07001748{
Jesse Barnes453c5422013-03-28 09:55:41 -07001749 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1750 struct drm_i915_private *dev_priv = dev->dev_private;
1751 u32 control;
Jesse Barnes453c5422013-03-28 09:55:41 -07001752
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001753 lockdep_assert_held(&dev_priv->pps_mutex);
1754
Jani Nikulabf13e812013-09-06 07:40:05 +03001755 control = I915_READ(_pp_ctrl_reg(intel_dp));
Vandana Kannanb0a08be2015-06-18 11:00:55 +05301756 if (!IS_BROXTON(dev)) {
1757 control &= ~PANEL_UNLOCK_MASK;
1758 control |= PANEL_UNLOCK_REGS;
1759 }
Keith Packard832dd3c2011-11-01 19:34:06 -07001760 return control;
Keith Packardbd943152011-09-18 23:09:52 -07001761}
1762
Ville Syrjälä951468f2014-09-04 14:55:31 +03001763/*
1764 * Must be paired with edp_panel_vdd_off().
1765 * Must hold pps_mutex around the whole on/off sequence.
1766 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1767 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +03001768static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001769{
Paulo Zanoni30add222012-10-26 19:05:45 -02001770 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001771 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1772 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Jesse Barnes5d613502011-01-24 17:10:54 -08001773 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02001774 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001775 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001776 u32 pp_stat_reg, pp_ctrl_reg;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001777 bool need_to_disable = !intel_dp->want_panel_vdd;
Jesse Barnes5d613502011-01-24 17:10:54 -08001778
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001779 lockdep_assert_held(&dev_priv->pps_mutex);
1780
Keith Packard97af61f572011-09-28 16:23:51 -07001781 if (!is_edp(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001782 return false;
Keith Packardbd943152011-09-18 23:09:52 -07001783
Egbert Eich2c623c12014-11-25 12:54:57 +01001784 cancel_delayed_work(&intel_dp->panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001785 intel_dp->want_panel_vdd = true;
Keith Packard99ea7122011-11-01 19:57:50 -07001786
Daniel Vetter4be73782014-01-17 14:39:48 +01001787 if (edp_have_panel_vdd(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001788 return need_to_disable;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001789
Imre Deak4e6e1a52014-03-27 17:45:11 +02001790 power_domain = intel_display_port_power_domain(intel_encoder);
1791 intel_display_power_get(dev_priv, power_domain);
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001792
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001793 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1794 port_name(intel_dig_port->port));
Keith Packardbd943152011-09-18 23:09:52 -07001795
Daniel Vetter4be73782014-01-17 14:39:48 +01001796 if (!edp_have_panel_power(intel_dp))
1797 wait_panel_power_cycle(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001798
Jesse Barnes453c5422013-03-28 09:55:41 -07001799 pp = ironlake_get_pp_control(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001800 pp |= EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -07001801
Jani Nikulabf13e812013-09-06 07:40:05 +03001802 pp_stat_reg = _pp_stat_reg(intel_dp);
1803 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001804
1805 I915_WRITE(pp_ctrl_reg, pp);
1806 POSTING_READ(pp_ctrl_reg);
1807 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1808 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Keith Packardebf33b12011-09-29 15:53:27 -07001809 /*
1810 * If the panel wasn't on, delay before accessing aux channel
1811 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001812 if (!edp_have_panel_power(intel_dp)) {
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001813 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1814 port_name(intel_dig_port->port));
Keith Packardf01eca22011-09-28 16:48:10 -07001815 msleep(intel_dp->panel_power_up_delay);
Keith Packardf01eca22011-09-28 16:48:10 -07001816 }
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001817
1818 return need_to_disable;
1819}
1820
Ville Syrjälä951468f2014-09-04 14:55:31 +03001821/*
1822 * Must be paired with intel_edp_panel_vdd_off() or
1823 * intel_edp_panel_off().
1824 * Nested calls to these functions are not allowed since
1825 * we drop the lock. Caller must use some higher level
1826 * locking to prevent nested calls from other threads.
1827 */
Daniel Vetterb80d6c72014-03-19 15:54:37 +01001828void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001829{
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001830 bool vdd;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001831
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001832 if (!is_edp(intel_dp))
1833 return;
1834
Ville Syrjälä773538e82014-09-04 14:54:56 +03001835 pps_lock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001836 vdd = edp_panel_vdd_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001837 pps_unlock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001838
Rob Clarke2c719b2014-12-15 13:56:32 -05001839 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001840 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes5d613502011-01-24 17:10:54 -08001841}
1842
Daniel Vetter4be73782014-01-17 14:39:48 +01001843static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001844{
Paulo Zanoni30add222012-10-26 19:05:45 -02001845 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001846 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001847 struct intel_digital_port *intel_dig_port =
1848 dp_to_dig_port(intel_dp);
1849 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1850 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001851 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001852 u32 pp_stat_reg, pp_ctrl_reg;
Jesse Barnes5d613502011-01-24 17:10:54 -08001853
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001854 lockdep_assert_held(&dev_priv->pps_mutex);
Daniel Vettera0e99e62012-12-02 01:05:46 +01001855
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001856 WARN_ON(intel_dp->want_panel_vdd);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001857
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001858 if (!edp_have_panel_vdd(intel_dp))
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001859 return;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001860
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001861 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1862 port_name(intel_dig_port->port));
Jesse Barnes453c5422013-03-28 09:55:41 -07001863
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001864 pp = ironlake_get_pp_control(intel_dp);
1865 pp &= ~EDP_FORCE_VDD;
Jesse Barnes453c5422013-03-28 09:55:41 -07001866
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001867 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1868 pp_stat_reg = _pp_stat_reg(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001869
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001870 I915_WRITE(pp_ctrl_reg, pp);
1871 POSTING_READ(pp_ctrl_reg);
Paulo Zanoni90791a52013-12-06 17:32:42 -02001872
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001873 /* Make sure sequencer is idle before allowing subsequent activity */
1874 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1875 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001876
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001877 if ((pp & POWER_TARGET_ON) == 0)
1878 intel_dp->last_power_cycle = jiffies;
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001879
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001880 power_domain = intel_display_port_power_domain(intel_encoder);
1881 intel_display_power_put(dev_priv, power_domain);
Keith Packardbd943152011-09-18 23:09:52 -07001882}
1883
Daniel Vetter4be73782014-01-17 14:39:48 +01001884static void edp_panel_vdd_work(struct work_struct *__work)
Keith Packardbd943152011-09-18 23:09:52 -07001885{
1886 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1887 struct intel_dp, panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001888
Ville Syrjälä773538e82014-09-04 14:54:56 +03001889 pps_lock(intel_dp);
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001890 if (!intel_dp->want_panel_vdd)
1891 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001892 pps_unlock(intel_dp);
Keith Packardbd943152011-09-18 23:09:52 -07001893}
1894
Imre Deakaba86892014-07-30 15:57:31 +03001895static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1896{
1897 unsigned long delay;
1898
1899 /*
1900 * Queue the timer to fire a long time from now (relative to the power
1901 * down delay) to keep the panel power up across a sequence of
1902 * operations.
1903 */
1904 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1905 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1906}
1907
Ville Syrjälä951468f2014-09-04 14:55:31 +03001908/*
1909 * Must be paired with edp_panel_vdd_on().
1910 * Must hold pps_mutex around the whole on/off sequence.
1911 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1912 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001913static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
Keith Packardbd943152011-09-18 23:09:52 -07001914{
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001915 struct drm_i915_private *dev_priv =
1916 intel_dp_to_dev(intel_dp)->dev_private;
1917
1918 lockdep_assert_held(&dev_priv->pps_mutex);
1919
Keith Packard97af61f572011-09-28 16:23:51 -07001920 if (!is_edp(intel_dp))
1921 return;
Jesse Barnes5d613502011-01-24 17:10:54 -08001922
Rob Clarke2c719b2014-12-15 13:56:32 -05001923 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001924 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packardf2e8b182011-11-01 20:01:35 -07001925
Keith Packardbd943152011-09-18 23:09:52 -07001926 intel_dp->want_panel_vdd = false;
1927
Imre Deakaba86892014-07-30 15:57:31 +03001928 if (sync)
Daniel Vetter4be73782014-01-17 14:39:48 +01001929 edp_panel_vdd_off_sync(intel_dp);
Imre Deakaba86892014-07-30 15:57:31 +03001930 else
1931 edp_panel_vdd_schedule_off(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001932}
1933
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001934static void edp_panel_on(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07001935{
Paulo Zanoni30add222012-10-26 19:05:45 -02001936 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001937 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packard99ea7122011-11-01 19:57:50 -07001938 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001939 u32 pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07001940
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001941 lockdep_assert_held(&dev_priv->pps_mutex);
1942
Keith Packard97af61f572011-09-28 16:23:51 -07001943 if (!is_edp(intel_dp))
Keith Packardbd943152011-09-18 23:09:52 -07001944 return;
Keith Packard99ea7122011-11-01 19:57:50 -07001945
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001946 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1947 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packard99ea7122011-11-01 19:57:50 -07001948
Ville Syrjäläe7a89ac2014-10-16 21:30:07 +03001949 if (WARN(edp_have_panel_power(intel_dp),
1950 "eDP port %c panel power already on\n",
1951 port_name(dp_to_dig_port(intel_dp)->port)))
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001952 return;
Jesse Barnes9934c132010-07-22 13:18:19 -07001953
Daniel Vetter4be73782014-01-17 14:39:48 +01001954 wait_panel_power_cycle(intel_dp);
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07001955
Jani Nikulabf13e812013-09-06 07:40:05 +03001956 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001957 pp = ironlake_get_pp_control(intel_dp);
Keith Packard05ce1a42011-09-29 16:33:01 -07001958 if (IS_GEN5(dev)) {
1959 /* ILK workaround: disable reset around power sequence */
1960 pp &= ~PANEL_POWER_RESET;
Jani Nikulabf13e812013-09-06 07:40:05 +03001961 I915_WRITE(pp_ctrl_reg, pp);
1962 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07001963 }
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07001964
Keith Packard1c0ae802011-09-19 13:59:29 -07001965 pp |= POWER_TARGET_ON;
Keith Packard99ea7122011-11-01 19:57:50 -07001966 if (!IS_GEN5(dev))
1967 pp |= PANEL_POWER_RESET;
1968
Jesse Barnes453c5422013-03-28 09:55:41 -07001969 I915_WRITE(pp_ctrl_reg, pp);
1970 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07001971
Daniel Vetter4be73782014-01-17 14:39:48 +01001972 wait_panel_on(intel_dp);
Paulo Zanonidce56b32013-12-19 14:29:40 -02001973 intel_dp->last_power_on = jiffies;
Jesse Barnes9934c132010-07-22 13:18:19 -07001974
Keith Packard05ce1a42011-09-29 16:33:01 -07001975 if (IS_GEN5(dev)) {
1976 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
Jani Nikulabf13e812013-09-06 07:40:05 +03001977 I915_WRITE(pp_ctrl_reg, pp);
1978 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07001979 }
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001980}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001981
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001982void intel_edp_panel_on(struct intel_dp *intel_dp)
1983{
1984 if (!is_edp(intel_dp))
1985 return;
1986
1987 pps_lock(intel_dp);
1988 edp_panel_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001989 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001990}
1991
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001992
1993static void edp_panel_off(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07001994{
Imre Deak4e6e1a52014-03-27 17:45:11 +02001995 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1996 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanoni30add222012-10-26 19:05:45 -02001997 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001998 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02001999 enum intel_display_power_domain power_domain;
Keith Packard99ea7122011-11-01 19:57:50 -07002000 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002001 u32 pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07002002
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002003 lockdep_assert_held(&dev_priv->pps_mutex);
2004
Keith Packard97af61f572011-09-28 16:23:51 -07002005 if (!is_edp(intel_dp))
2006 return;
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002007
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002008 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2009 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002010
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002011 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2012 port_name(dp_to_dig_port(intel_dp)->port));
Jani Nikula24f3e092014-03-17 16:43:36 +02002013
Jesse Barnes453c5422013-03-28 09:55:41 -07002014 pp = ironlake_get_pp_control(intel_dp);
Daniel Vetter35a38552012-08-12 22:17:14 +02002015 /* We need to switch off panel power _and_ force vdd, for otherwise some
2016 * panels get very unhappy and cease to work. */
Patrik Jakobssonb3064152014-03-04 00:42:44 +01002017 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2018 EDP_BLC_ENABLE);
Jesse Barnes453c5422013-03-28 09:55:41 -07002019
Jani Nikulabf13e812013-09-06 07:40:05 +03002020 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002021
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002022 intel_dp->want_panel_vdd = false;
2023
Jesse Barnes453c5422013-03-28 09:55:41 -07002024 I915_WRITE(pp_ctrl_reg, pp);
2025 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07002026
Paulo Zanonidce56b32013-12-19 14:29:40 -02002027 intel_dp->last_power_cycle = jiffies;
Daniel Vetter4be73782014-01-17 14:39:48 +01002028 wait_panel_off(intel_dp);
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002029
2030 /* We got a reference when we enabled the VDD. */
Imre Deak4e6e1a52014-03-27 17:45:11 +02002031 power_domain = intel_display_port_power_domain(intel_encoder);
2032 intel_display_power_put(dev_priv, power_domain);
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002033}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002034
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002035void intel_edp_panel_off(struct intel_dp *intel_dp)
2036{
2037 if (!is_edp(intel_dp))
2038 return;
2039
2040 pps_lock(intel_dp);
2041 edp_panel_off(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002042 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002043}
2044
Jani Nikula1250d102014-08-12 17:11:39 +03002045/* Enable backlight in the panel power control. */
2046static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002047{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002048 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2049 struct drm_device *dev = intel_dig_port->base.base.dev;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002050 struct drm_i915_private *dev_priv = dev->dev_private;
2051 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002052 u32 pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002053
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07002054 /*
2055 * If we enable the backlight right away following a panel power
2056 * on, we may see slight flicker as the panel syncs with the eDP
2057 * link. So delay a bit to make sure the image is solid before
2058 * allowing it to appear.
2059 */
Daniel Vetter4be73782014-01-17 14:39:48 +01002060 wait_backlight_on(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002061
Ville Syrjälä773538e82014-09-04 14:54:56 +03002062 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002063
Jesse Barnes453c5422013-03-28 09:55:41 -07002064 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002065 pp |= EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002066
Jani Nikulabf13e812013-09-06 07:40:05 +03002067 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002068
2069 I915_WRITE(pp_ctrl_reg, pp);
2070 POSTING_READ(pp_ctrl_reg);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002071
Ville Syrjälä773538e82014-09-04 14:54:56 +03002072 pps_unlock(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002073}
2074
Jani Nikula1250d102014-08-12 17:11:39 +03002075/* Enable backlight PWM and backlight PP control. */
2076void intel_edp_backlight_on(struct intel_dp *intel_dp)
2077{
2078 if (!is_edp(intel_dp))
2079 return;
2080
2081 DRM_DEBUG_KMS("\n");
2082
2083 intel_panel_enable_backlight(intel_dp->attached_connector);
2084 _intel_edp_backlight_on(intel_dp);
2085}
2086
2087/* Disable backlight in the panel power control. */
2088static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002089{
Paulo Zanoni30add222012-10-26 19:05:45 -02002090 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002091 struct drm_i915_private *dev_priv = dev->dev_private;
2092 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002093 u32 pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002094
Keith Packardf01eca22011-09-28 16:48:10 -07002095 if (!is_edp(intel_dp))
2096 return;
2097
Ville Syrjälä773538e82014-09-04 14:54:56 +03002098 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002099
Jesse Barnes453c5422013-03-28 09:55:41 -07002100 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002101 pp &= ~EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002102
Jani Nikulabf13e812013-09-06 07:40:05 +03002103 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002104
2105 I915_WRITE(pp_ctrl_reg, pp);
2106 POSTING_READ(pp_ctrl_reg);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002107
Ville Syrjälä773538e82014-09-04 14:54:56 +03002108 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002109
Paulo Zanonidce56b32013-12-19 14:29:40 -02002110 intel_dp->last_backlight_off = jiffies;
Jesse Barnesf7d23232014-03-31 11:13:56 -07002111 edp_wait_backlight_off(intel_dp);
Jani Nikula1250d102014-08-12 17:11:39 +03002112}
Jesse Barnesf7d23232014-03-31 11:13:56 -07002113
Jani Nikula1250d102014-08-12 17:11:39 +03002114/* Disable backlight PP control and backlight PWM. */
2115void intel_edp_backlight_off(struct intel_dp *intel_dp)
2116{
2117 if (!is_edp(intel_dp))
2118 return;
2119
2120 DRM_DEBUG_KMS("\n");
2121
2122 _intel_edp_backlight_off(intel_dp);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002123 intel_panel_disable_backlight(intel_dp->attached_connector);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002124}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002125
Jani Nikula73580fb72014-08-12 17:11:41 +03002126/*
2127 * Hook for controlling the panel power control backlight through the bl_power
2128 * sysfs attribute. Take care to handle multiple calls.
2129 */
2130static void intel_edp_backlight_power(struct intel_connector *connector,
2131 bool enable)
2132{
2133 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002134 bool is_enabled;
2135
Ville Syrjälä773538e82014-09-04 14:54:56 +03002136 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002137 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002138 pps_unlock(intel_dp);
Jani Nikula73580fb72014-08-12 17:11:41 +03002139
2140 if (is_enabled == enable)
2141 return;
2142
Jani Nikula23ba9372014-08-27 14:08:43 +03002143 DRM_DEBUG_KMS("panel power control backlight %s\n",
2144 enable ? "enable" : "disable");
Jani Nikula73580fb72014-08-12 17:11:41 +03002145
2146 if (enable)
2147 _intel_edp_backlight_on(intel_dp);
2148 else
2149 _intel_edp_backlight_off(intel_dp);
2150}
2151
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002152static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002153{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002154 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2155 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2156 struct drm_device *dev = crtc->dev;
Jesse Barnesd240f202010-08-13 15:43:26 -07002157 struct drm_i915_private *dev_priv = dev->dev_private;
2158 u32 dpa_ctl;
2159
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002160 assert_pipe_disabled(dev_priv,
2161 to_intel_crtc(crtc)->pipe);
2162
Jesse Barnesd240f202010-08-13 15:43:26 -07002163 DRM_DEBUG_KMS("\n");
2164 dpa_ctl = I915_READ(DP_A);
Daniel Vetter07679352012-09-06 22:15:42 +02002165 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2166 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2167
2168 /* We don't adjust intel_dp->DP while tearing down the link, to
2169 * facilitate link retraining (e.g. after hotplug). Hence clear all
2170 * enable bits here to ensure that we don't enable too much. */
2171 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2172 intel_dp->DP |= DP_PLL_ENABLE;
2173 I915_WRITE(DP_A, intel_dp->DP);
Jesse Barnes298b0b32010-10-07 16:01:24 -07002174 POSTING_READ(DP_A);
2175 udelay(200);
Jesse Barnesd240f202010-08-13 15:43:26 -07002176}
2177
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002178static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002179{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002180 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2181 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2182 struct drm_device *dev = crtc->dev;
Jesse Barnesd240f202010-08-13 15:43:26 -07002183 struct drm_i915_private *dev_priv = dev->dev_private;
2184 u32 dpa_ctl;
2185
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002186 assert_pipe_disabled(dev_priv,
2187 to_intel_crtc(crtc)->pipe);
2188
Jesse Barnesd240f202010-08-13 15:43:26 -07002189 dpa_ctl = I915_READ(DP_A);
Daniel Vetter07679352012-09-06 22:15:42 +02002190 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2191 "dp pll off, should be on\n");
2192 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2193
2194 /* We can't rely on the value tracked for the DP register in
2195 * intel_dp->DP because link_down must not change that (otherwise link
2196 * re-training will fail. */
Jesse Barnes298b0b32010-10-07 16:01:24 -07002197 dpa_ctl &= ~DP_PLL_ENABLE;
Jesse Barnesd240f202010-08-13 15:43:26 -07002198 I915_WRITE(DP_A, dpa_ctl);
Chris Wilson1af5fa12010-09-08 21:07:28 +01002199 POSTING_READ(DP_A);
Jesse Barnesd240f202010-08-13 15:43:26 -07002200 udelay(200);
2201}
2202
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002203/* If the sink supports it, try to set the power state appropriately */
Paulo Zanonic19b0662012-10-15 15:51:41 -03002204void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002205{
2206 int ret, i;
2207
2208 /* Should have a valid DPCD by this point */
2209 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2210 return;
2211
2212 if (mode != DRM_MODE_DPMS_ON) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002213 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2214 DP_SET_POWER_D3);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002215 } else {
2216 /*
2217 * When turning on, we need to retry for 1ms to give the sink
2218 * time to wake up.
2219 */
2220 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002221 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2222 DP_SET_POWER_D0);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002223 if (ret == 1)
2224 break;
2225 msleep(1);
2226 }
2227 }
Jani Nikulaf9cac722014-09-02 16:33:52 +03002228
2229 if (ret != 1)
2230 DRM_DEBUG_KMS("failed to %s sink power state\n",
2231 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002232}
2233
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002234static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2235 enum pipe *pipe)
Jesse Barnesd240f202010-08-13 15:43:26 -07002236{
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002237 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03002238 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002239 struct drm_device *dev = encoder->base.dev;
2240 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak6d129be2014-03-05 16:20:54 +02002241 enum intel_display_power_domain power_domain;
2242 u32 tmp;
2243
2244 power_domain = intel_display_port_power_domain(encoder);
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002245 if (!intel_display_power_is_enabled(dev_priv, power_domain))
Imre Deak6d129be2014-03-05 16:20:54 +02002246 return false;
2247
2248 tmp = I915_READ(intel_dp->output_reg);
Jesse Barnesd240f202010-08-13 15:43:26 -07002249
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002250 if (!(tmp & DP_PORT_EN))
2251 return false;
2252
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002253 if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002254 *pipe = PORT_TO_PIPE_CPT(tmp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002255 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002256 enum pipe p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002257
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002258 for_each_pipe(dev_priv, p) {
2259 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2260 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2261 *pipe = p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002262 return true;
2263 }
2264 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002265
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002266 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2267 intel_dp->output_reg);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002268 } else if (IS_CHERRYVIEW(dev)) {
2269 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2270 } else {
2271 *pipe = PORT_TO_PIPE(tmp);
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002272 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002273
2274 return true;
2275}
2276
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002277static void intel_dp_get_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02002278 struct intel_crtc_state *pipe_config)
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002279{
2280 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002281 u32 tmp, flags = 0;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002282 struct drm_device *dev = encoder->base.dev;
2283 struct drm_i915_private *dev_priv = dev->dev_private;
2284 enum port port = dp_to_dig_port(intel_dp)->port;
2285 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjälä18442d02013-09-13 16:00:08 +03002286 int dotclock;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002287
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002288 tmp = I915_READ(intel_dp->output_reg);
Jani Nikula9fcb1702015-05-05 16:32:12 +03002289
2290 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002291
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002292 if (HAS_PCH_CPT(dev) && port != PORT_A) {
Xiong Zhang63000ef2013-06-28 12:59:06 +08002293 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2294 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2295 flags |= DRM_MODE_FLAG_PHSYNC;
2296 else
2297 flags |= DRM_MODE_FLAG_NHSYNC;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002298
Xiong Zhang63000ef2013-06-28 12:59:06 +08002299 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2300 flags |= DRM_MODE_FLAG_PVSYNC;
2301 else
2302 flags |= DRM_MODE_FLAG_NVSYNC;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002303 } else {
2304 if (tmp & DP_SYNC_HS_HIGH)
2305 flags |= DRM_MODE_FLAG_PHSYNC;
2306 else
2307 flags |= DRM_MODE_FLAG_NHSYNC;
2308
2309 if (tmp & DP_SYNC_VS_HIGH)
2310 flags |= DRM_MODE_FLAG_PVSYNC;
2311 else
2312 flags |= DRM_MODE_FLAG_NVSYNC;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002313 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002314
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002315 pipe_config->base.adjusted_mode.flags |= flags;
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002316
Ville Syrjälä8c875fc2014-09-12 15:46:29 +03002317 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2318 tmp & DP_COLOR_RANGE_16_235)
2319 pipe_config->limited_color_range = true;
2320
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002321 pipe_config->has_dp_encoder = true;
2322
2323 intel_dp_get_m_n(crtc, pipe_config);
2324
Ville Syrjälä18442d02013-09-13 16:00:08 +03002325 if (port == PORT_A) {
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002326 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2327 pipe_config->port_clock = 162000;
2328 else
2329 pipe_config->port_clock = 270000;
2330 }
Ville Syrjälä18442d02013-09-13 16:00:08 +03002331
2332 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2333 &pipe_config->dp_m_n);
2334
2335 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2336 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2337
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002338 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
Daniel Vetter7f16e5c2013-11-04 16:28:47 +01002339
Jani Nikulac6cd2ee2013-10-21 10:52:07 +03002340 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2341 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2342 /*
2343 * This is a big fat ugly hack.
2344 *
2345 * Some machines in UEFI boot mode provide us a VBT that has 18
2346 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2347 * unknown we fail to light up. Yet the same BIOS boots up with
2348 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2349 * max, not what it tells us to use.
2350 *
2351 * Note: This will still be broken if the eDP panel is not lit
2352 * up by the BIOS, and thus we can't get the mode at module
2353 * load.
2354 */
2355 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2356 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2357 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2358 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002359}
2360
Daniel Vettere8cb4552012-07-01 13:05:48 +02002361static void intel_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002362{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002363 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002364 struct drm_device *dev = encoder->base.dev;
Jani Nikula495a5bb2014-10-27 16:26:55 +02002365 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2366
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002367 if (crtc->config->has_audio)
Jani Nikula495a5bb2014-10-27 16:26:55 +02002368 intel_audio_codec_disable(encoder);
Daniel Vetter6cb49832012-05-20 17:14:50 +02002369
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002370 if (HAS_PSR(dev) && !HAS_DDI(dev))
2371 intel_psr_disable(intel_dp);
2372
Daniel Vetter6cb49832012-05-20 17:14:50 +02002373 /* Make sure the panel is off before trying to change the mode. But also
2374 * ensure that we have vdd while we switch off the panel. */
Jani Nikula24f3e092014-03-17 16:43:36 +02002375 intel_edp_panel_vdd_on(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01002376 intel_edp_backlight_off(intel_dp);
Jani Nikulafdbc3b12013-11-12 17:10:13 +02002377 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
Daniel Vetter4be73782014-01-17 14:39:48 +01002378 intel_edp_panel_off(intel_dp);
Daniel Vetter37398502012-09-06 22:15:44 +02002379
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002380 /* disable the port before the pipe on g4x */
2381 if (INTEL_INFO(dev)->gen < 5)
Daniel Vetter37398502012-09-06 22:15:44 +02002382 intel_dp_link_down(intel_dp);
Jesse Barnesd240f202010-08-13 15:43:26 -07002383}
2384
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002385static void ilk_post_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002386{
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002387 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002388 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002389
Ville Syrjälä49277c32014-03-31 18:21:26 +03002390 intel_dp_link_down(intel_dp);
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002391 if (port == PORT_A)
2392 ironlake_edp_pll_off(intel_dp);
Ville Syrjälä49277c32014-03-31 18:21:26 +03002393}
2394
2395static void vlv_post_disable_dp(struct intel_encoder *encoder)
2396{
2397 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2398
2399 intel_dp_link_down(intel_dp);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002400}
2401
Ville Syrjälä580d3812014-04-09 13:29:00 +03002402static void chv_post_disable_dp(struct intel_encoder *encoder)
2403{
2404 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2405 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2406 struct drm_device *dev = encoder->base.dev;
2407 struct drm_i915_private *dev_priv = dev->dev_private;
2408 struct intel_crtc *intel_crtc =
2409 to_intel_crtc(encoder->base.crtc);
2410 enum dpio_channel ch = vlv_dport_to_channel(dport);
2411 enum pipe pipe = intel_crtc->pipe;
2412 u32 val;
2413
2414 intel_dp_link_down(intel_dp);
2415
Ville Syrjäläa5805162015-05-26 20:42:30 +03002416 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002417
2418 /* Propagate soft reset to data lane reset */
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002419 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
Ville Syrjäläd2152b22014-04-28 14:15:24 +03002420 val |= CHV_PCS_REQ_SOFTRESET_EN;
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002421 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
Ville Syrjäläd2152b22014-04-28 14:15:24 +03002422
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002423 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2424 val |= CHV_PCS_REQ_SOFTRESET_EN;
2425 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2426
2427 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
Ville Syrjälä580d3812014-04-09 13:29:00 +03002428 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002429 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2430
2431 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2432 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2433 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002434
Ville Syrjäläa5805162015-05-26 20:42:30 +03002435 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002436}
2437
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002438static void
2439_intel_dp_set_link_train(struct intel_dp *intel_dp,
2440 uint32_t *DP,
2441 uint8_t dp_train_pat)
2442{
2443 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2444 struct drm_device *dev = intel_dig_port->base.base.dev;
2445 struct drm_i915_private *dev_priv = dev->dev_private;
2446 enum port port = intel_dig_port->port;
2447
2448 if (HAS_DDI(dev)) {
2449 uint32_t temp = I915_READ(DP_TP_CTL(port));
2450
2451 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2452 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2453 else
2454 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2455
2456 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2457 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2458 case DP_TRAINING_PATTERN_DISABLE:
2459 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2460
2461 break;
2462 case DP_TRAINING_PATTERN_1:
2463 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2464 break;
2465 case DP_TRAINING_PATTERN_2:
2466 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2467 break;
2468 case DP_TRAINING_PATTERN_3:
2469 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2470 break;
2471 }
2472 I915_WRITE(DP_TP_CTL(port), temp);
2473
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002474 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2475 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002476 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2477
2478 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2479 case DP_TRAINING_PATTERN_DISABLE:
2480 *DP |= DP_LINK_TRAIN_OFF_CPT;
2481 break;
2482 case DP_TRAINING_PATTERN_1:
2483 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2484 break;
2485 case DP_TRAINING_PATTERN_2:
2486 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2487 break;
2488 case DP_TRAINING_PATTERN_3:
2489 DRM_ERROR("DP training pattern 3 not supported\n");
2490 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2491 break;
2492 }
2493
2494 } else {
2495 if (IS_CHERRYVIEW(dev))
2496 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2497 else
2498 *DP &= ~DP_LINK_TRAIN_MASK;
2499
2500 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2501 case DP_TRAINING_PATTERN_DISABLE:
2502 *DP |= DP_LINK_TRAIN_OFF;
2503 break;
2504 case DP_TRAINING_PATTERN_1:
2505 *DP |= DP_LINK_TRAIN_PAT_1;
2506 break;
2507 case DP_TRAINING_PATTERN_2:
2508 *DP |= DP_LINK_TRAIN_PAT_2;
2509 break;
2510 case DP_TRAINING_PATTERN_3:
2511 if (IS_CHERRYVIEW(dev)) {
2512 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2513 } else {
2514 DRM_ERROR("DP training pattern 3 not supported\n");
2515 *DP |= DP_LINK_TRAIN_PAT_2;
2516 }
2517 break;
2518 }
2519 }
2520}
2521
2522static void intel_dp_enable_port(struct intel_dp *intel_dp)
2523{
2524 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2525 struct drm_i915_private *dev_priv = dev->dev_private;
2526
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002527 /* enable with pattern 1 (as per spec) */
2528 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2529 DP_TRAINING_PATTERN_1);
2530
2531 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2532 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b713f52014-10-16 21:27:35 +03002533
2534 /*
2535 * Magic for VLV/CHV. We _must_ first set up the register
2536 * without actually enabling the port, and then do another
2537 * write to enable the port. Otherwise link training will
2538 * fail when the power sequencer is freshly used for this port.
2539 */
2540 intel_dp->DP |= DP_PORT_EN;
2541
2542 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2543 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002544}
2545
Daniel Vettere8cb4552012-07-01 13:05:48 +02002546static void intel_enable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002547{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002548 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2549 struct drm_device *dev = encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002550 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulac1dec792014-10-27 16:26:56 +02002551 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Chris Wilsonea5b2132010-08-04 13:50:23 +01002552 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03002553 unsigned int lane_mask = 0x0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002554
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02002555 if (WARN_ON(dp_reg & DP_PORT_EN))
2556 return;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002557
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002558 pps_lock(intel_dp);
2559
2560 if (IS_VALLEYVIEW(dev))
2561 vlv_init_panel_power_sequencer(intel_dp);
2562
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002563 intel_dp_enable_port(intel_dp);
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002564
2565 edp_panel_vdd_on(intel_dp);
2566 edp_panel_on(intel_dp);
2567 edp_panel_vdd_off(intel_dp, true);
2568
2569 pps_unlock(intel_dp);
2570
Ville Syrjälä61234fa2014-10-16 21:27:34 +03002571 if (IS_VALLEYVIEW(dev))
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03002572 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2573 lane_mask);
Ville Syrjälä61234fa2014-10-16 21:27:34 +03002574
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002575 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2576 intel_dp_start_link_train(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002577 intel_dp_complete_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03002578 intel_dp_stop_link_train(intel_dp);
Jani Nikulac1dec792014-10-27 16:26:56 +02002579
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002580 if (crtc->config->has_audio) {
Jani Nikulac1dec792014-10-27 16:26:56 +02002581 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2582 pipe_name(crtc->pipe));
2583 intel_audio_codec_enable(encoder);
2584 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002585}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002586
Jani Nikulaecff4f32013-09-06 07:38:29 +03002587static void g4x_enable_dp(struct intel_encoder *encoder)
2588{
Jani Nikula828f5c62013-09-05 16:44:45 +03002589 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2590
Jani Nikulaecff4f32013-09-06 07:38:29 +03002591 intel_enable_dp(encoder);
Daniel Vetter4be73782014-01-17 14:39:48 +01002592 intel_edp_backlight_on(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002593}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002594
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002595static void vlv_enable_dp(struct intel_encoder *encoder)
2596{
Jani Nikula828f5c62013-09-05 16:44:45 +03002597 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2598
Daniel Vetter4be73782014-01-17 14:39:48 +01002599 intel_edp_backlight_on(intel_dp);
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002600 intel_psr_enable(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002601}
2602
Jani Nikulaecff4f32013-09-06 07:38:29 +03002603static void g4x_pre_enable_dp(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002604{
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002605 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03002606 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002607
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002608 intel_dp_prepare(encoder);
2609
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002610 /* Only ilk+ has port A */
2611 if (dport->port == PORT_A) {
2612 ironlake_set_pll_cpu_edp(intel_dp);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002613 ironlake_edp_pll_on(intel_dp);
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002614 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002615}
2616
Ville Syrjälä83b84592014-10-16 21:29:51 +03002617static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2618{
2619 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2620 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2621 enum pipe pipe = intel_dp->pps_pipe;
2622 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2623
2624 edp_panel_vdd_off_sync(intel_dp);
2625
2626 /*
2627 * VLV seems to get confused when multiple power seqeuencers
2628 * have the same port selected (even if only one has power/vdd
2629 * enabled). The failure manifests as vlv_wait_port_ready() failing
2630 * CHV on the other hand doesn't seem to mind having the same port
2631 * selected in multiple power seqeuencers, but let's clear the
2632 * port select always when logically disconnecting a power sequencer
2633 * from a port.
2634 */
2635 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2636 pipe_name(pipe), port_name(intel_dig_port->port));
2637 I915_WRITE(pp_on_reg, 0);
2638 POSTING_READ(pp_on_reg);
2639
2640 intel_dp->pps_pipe = INVALID_PIPE;
2641}
2642
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002643static void vlv_steal_power_sequencer(struct drm_device *dev,
2644 enum pipe pipe)
2645{
2646 struct drm_i915_private *dev_priv = dev->dev_private;
2647 struct intel_encoder *encoder;
2648
2649 lockdep_assert_held(&dev_priv->pps_mutex);
2650
Ville Syrjäläac3c12e2014-10-16 21:29:56 +03002651 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2652 return;
2653
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002654 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2655 base.head) {
2656 struct intel_dp *intel_dp;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002657 enum port port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002658
2659 if (encoder->type != INTEL_OUTPUT_EDP)
2660 continue;
2661
2662 intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002663 port = dp_to_dig_port(intel_dp)->port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002664
2665 if (intel_dp->pps_pipe != pipe)
2666 continue;
2667
2668 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
Ville Syrjälä773538e82014-09-04 14:54:56 +03002669 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002670
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02002671 WARN(encoder->base.crtc,
Ville Syrjälä034e43c2014-10-16 21:27:28 +03002672 "stealing pipe %c power sequencer from active eDP port %c\n",
2673 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002674
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002675 /* make sure vdd is off before we steal it */
Ville Syrjälä83b84592014-10-16 21:29:51 +03002676 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002677 }
2678}
2679
2680static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2681{
2682 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2683 struct intel_encoder *encoder = &intel_dig_port->base;
2684 struct drm_device *dev = encoder->base.dev;
2685 struct drm_i915_private *dev_priv = dev->dev_private;
2686 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002687
2688 lockdep_assert_held(&dev_priv->pps_mutex);
2689
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002690 if (!is_edp(intel_dp))
2691 return;
2692
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002693 if (intel_dp->pps_pipe == crtc->pipe)
2694 return;
2695
2696 /*
2697 * If another power sequencer was being used on this
2698 * port previously make sure to turn off vdd there while
2699 * we still have control of it.
2700 */
2701 if (intel_dp->pps_pipe != INVALID_PIPE)
Ville Syrjälä83b84592014-10-16 21:29:51 +03002702 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002703
2704 /*
2705 * We may be stealing the power
2706 * sequencer from another port.
2707 */
2708 vlv_steal_power_sequencer(dev, crtc->pipe);
2709
2710 /* now it's all ours */
2711 intel_dp->pps_pipe = crtc->pipe;
2712
2713 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2714 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2715
2716 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03002717 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2718 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002719}
2720
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002721static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2722{
2723 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2724 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Jesse Barnesb2634012013-03-28 09:55:40 -07002725 struct drm_device *dev = encoder->base.dev;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002726 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002727 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002728 enum dpio_channel port = vlv_dport_to_channel(dport);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002729 int pipe = intel_crtc->pipe;
2730 u32 val;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002731
Ville Syrjäläa5805162015-05-26 20:42:30 +03002732 mutex_lock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002733
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002734 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002735 val = 0;
2736 if (pipe)
2737 val |= (1<<21);
2738 else
2739 val &= ~(1<<21);
2740 val |= 0x001000c4;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002741 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2742 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2743 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002744
Ville Syrjäläa5805162015-05-26 20:42:30 +03002745 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002746
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002747 intel_enable_dp(encoder);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002748}
2749
Jani Nikulaecff4f32013-09-06 07:38:29 +03002750static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
Jesse Barnes89b667f2013-04-18 14:51:36 -07002751{
2752 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2753 struct drm_device *dev = encoder->base.dev;
2754 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002755 struct intel_crtc *intel_crtc =
2756 to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002757 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002758 int pipe = intel_crtc->pipe;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002759
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002760 intel_dp_prepare(encoder);
2761
Jesse Barnes89b667f2013-04-18 14:51:36 -07002762 /* Program Tx lane resets to default */
Ville Syrjäläa5805162015-05-26 20:42:30 +03002763 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002764 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002765 DPIO_PCS_TX_LANE2_RESET |
2766 DPIO_PCS_TX_LANE1_RESET);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002767 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002768 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2769 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2770 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2771 DPIO_PCS_CLK_SOFT_RESET);
2772
2773 /* Fix up inter-pair skew failure */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002774 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2775 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2776 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03002777 mutex_unlock(&dev_priv->sb_lock);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002778}
2779
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002780static void chv_pre_enable_dp(struct intel_encoder *encoder)
2781{
2782 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2783 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2784 struct drm_device *dev = encoder->base.dev;
2785 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002786 struct intel_crtc *intel_crtc =
2787 to_intel_crtc(encoder->base.crtc);
2788 enum dpio_channel ch = vlv_dport_to_channel(dport);
2789 int pipe = intel_crtc->pipe;
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002790 int data, i, stagger;
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002791 u32 val;
2792
Ville Syrjäläa5805162015-05-26 20:42:30 +03002793 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002794
Ville Syrjälä570e2a72014-08-18 14:42:46 +03002795 /* allow hardware to manage TX FIFO reset source */
2796 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2797 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2798 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2799
2800 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2801 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2802 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2803
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002804 /* Deassert soft data lane reset*/
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002805 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
Ville Syrjäläd2152b22014-04-28 14:15:24 +03002806 val |= CHV_PCS_REQ_SOFTRESET_EN;
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002807 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
Ville Syrjäläd2152b22014-04-28 14:15:24 +03002808
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002809 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2810 val |= CHV_PCS_REQ_SOFTRESET_EN;
2811 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2812
2813 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002814 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002815 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2816
2817 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2818 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2819 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002820
2821 /* Program Tx lane latency optimal setting*/
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002822 for (i = 0; i < 4; i++) {
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002823 /* Set the upar bit */
2824 data = (i == 1) ? 0x0 : 0x1;
2825 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2826 data << DPIO_UPAR_SHIFT);
2827 }
2828
2829 /* Data lane stagger programming */
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002830 if (intel_crtc->config->port_clock > 270000)
2831 stagger = 0x18;
2832 else if (intel_crtc->config->port_clock > 135000)
2833 stagger = 0xd;
2834 else if (intel_crtc->config->port_clock > 67500)
2835 stagger = 0x7;
2836 else if (intel_crtc->config->port_clock > 33750)
2837 stagger = 0x4;
2838 else
2839 stagger = 0x2;
2840
2841 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2842 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2843 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2844
2845 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2846 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2847 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2848
2849 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2850 DPIO_LANESTAGGER_STRAP(stagger) |
2851 DPIO_LANESTAGGER_STRAP_OVRD |
2852 DPIO_TX1_STAGGER_MASK(0x1f) |
2853 DPIO_TX1_STAGGER_MULT(6) |
2854 DPIO_TX2_STAGGER_MULT(0));
2855
2856 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2857 DPIO_LANESTAGGER_STRAP(stagger) |
2858 DPIO_LANESTAGGER_STRAP_OVRD |
2859 DPIO_TX1_STAGGER_MASK(0x1f) |
2860 DPIO_TX1_STAGGER_MULT(7) |
2861 DPIO_TX2_STAGGER_MULT(5));
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002862
Ville Syrjäläa5805162015-05-26 20:42:30 +03002863 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002864
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002865 intel_enable_dp(encoder);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002866}
2867
Ville Syrjälä9197c882014-04-09 13:29:05 +03002868static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2869{
2870 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2871 struct drm_device *dev = encoder->base.dev;
2872 struct drm_i915_private *dev_priv = dev->dev_private;
2873 struct intel_crtc *intel_crtc =
2874 to_intel_crtc(encoder->base.crtc);
2875 enum dpio_channel ch = vlv_dport_to_channel(dport);
2876 enum pipe pipe = intel_crtc->pipe;
2877 u32 val;
2878
Ville Syrjälä625695f2014-06-28 02:04:02 +03002879 intel_dp_prepare(encoder);
2880
Ville Syrjäläa5805162015-05-26 20:42:30 +03002881 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002882
Ville Syrjäläb9e5ac32014-05-27 16:30:18 +03002883 /* program left/right clock distribution */
2884 if (pipe != PIPE_B) {
2885 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2886 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2887 if (ch == DPIO_CH0)
2888 val |= CHV_BUFLEFTENA1_FORCE;
2889 if (ch == DPIO_CH1)
2890 val |= CHV_BUFRIGHTENA1_FORCE;
2891 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2892 } else {
2893 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2894 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2895 if (ch == DPIO_CH0)
2896 val |= CHV_BUFLEFTENA2_FORCE;
2897 if (ch == DPIO_CH1)
2898 val |= CHV_BUFRIGHTENA2_FORCE;
2899 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2900 }
2901
Ville Syrjälä9197c882014-04-09 13:29:05 +03002902 /* program clock channel usage */
2903 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2904 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2905 if (pipe != PIPE_B)
2906 val &= ~CHV_PCS_USEDCLKCHANNEL;
2907 else
2908 val |= CHV_PCS_USEDCLKCHANNEL;
2909 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2910
2911 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2912 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2913 if (pipe != PIPE_B)
2914 val &= ~CHV_PCS_USEDCLKCHANNEL;
2915 else
2916 val |= CHV_PCS_USEDCLKCHANNEL;
2917 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2918
2919 /*
2920 * This a a bit weird since generally CL
2921 * matches the pipe, but here we need to
2922 * pick the CL based on the port.
2923 */
2924 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2925 if (pipe != PIPE_B)
2926 val &= ~CHV_CMN_USEDCLKCHANNEL;
2927 else
2928 val |= CHV_CMN_USEDCLKCHANNEL;
2929 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2930
Ville Syrjäläa5805162015-05-26 20:42:30 +03002931 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002932}
2933
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002934/*
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002935 * Native read with retry for link status and receiver capability reads for
2936 * cases where the sink may still be asleep.
Jani Nikula9d1a1032014-03-14 16:51:15 +02002937 *
2938 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2939 * supposed to retry 3 times per the spec.
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002940 */
Jani Nikula9d1a1032014-03-14 16:51:15 +02002941static ssize_t
2942intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2943 void *buffer, size_t size)
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002944{
Jani Nikula9d1a1032014-03-14 16:51:15 +02002945 ssize_t ret;
2946 int i;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002947
Ville Syrjäläf6a19062014-10-16 20:46:09 +03002948 /*
2949 * Sometime we just get the same incorrect byte repeated
2950 * over the entire buffer. Doing just one throw away read
2951 * initially seems to "solve" it.
2952 */
2953 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2954
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002955 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002956 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2957 if (ret == size)
2958 return ret;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002959 msleep(1);
2960 }
2961
Jani Nikula9d1a1032014-03-14 16:51:15 +02002962 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002963}
2964
2965/*
2966 * Fetch AUX CH registers 0x202 - 0x207 which contain
2967 * link status information
2968 */
2969static bool
Keith Packard93f62da2011-11-01 19:45:03 -07002970intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002971{
Jani Nikula9d1a1032014-03-14 16:51:15 +02002972 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2973 DP_LANE0_1_STATUS,
2974 link_status,
2975 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002976}
2977
Paulo Zanoni11002442014-06-13 18:45:41 -03002978/* These are source-specific values. */
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002979static uint8_t
Keith Packard1a2eb462011-11-16 16:26:07 -08002980intel_dp_voltage_max(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002981{
Paulo Zanoni30add222012-10-26 19:05:45 -02002982 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Sonika Jindal7ad14a22015-02-25 10:29:12 +05302983 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deakbc7d38a2013-05-16 14:40:36 +03002984 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08002985
Vandana Kannan93147262014-11-18 15:45:29 +05302986 if (IS_BROXTON(dev))
2987 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2988 else if (INTEL_INFO(dev)->gen >= 9) {
Sonika Jindal9e458032015-05-06 17:35:48 +05302989 if (dev_priv->edp_low_vswing && port == PORT_A)
Sonika Jindal7ad14a22015-02-25 10:29:12 +05302990 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00002991 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05302992 } else if (IS_VALLEYVIEW(dev))
Sonika Jindalbd600182014-08-08 16:23:41 +05302993 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Imre Deakbc7d38a2013-05-16 14:40:36 +03002994 else if (IS_GEN7(dev) && port == PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05302995 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Imre Deakbc7d38a2013-05-16 14:40:36 +03002996 else if (HAS_PCH_CPT(dev) && port != PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05302997 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Keith Packard1a2eb462011-11-16 16:26:07 -08002998 else
Sonika Jindalbd600182014-08-08 16:23:41 +05302999 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Keith Packard1a2eb462011-11-16 16:26:07 -08003000}
3001
3002static uint8_t
3003intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3004{
Paulo Zanoni30add222012-10-26 19:05:45 -02003005 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003006 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003007
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003008 if (INTEL_INFO(dev)->gen >= 9) {
3009 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3010 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3011 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3012 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3013 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3014 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3015 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303016 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3017 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003018 default:
3019 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3020 }
3021 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003022 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303023 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3024 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3025 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3026 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3027 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3028 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3029 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003030 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303031 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003032 }
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003033 } else if (IS_VALLEYVIEW(dev)) {
3034 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303035 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3036 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3037 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3038 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3039 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3040 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3041 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003042 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303043 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003044 }
Imre Deakbc7d38a2013-05-16 14:40:36 +03003045 } else if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08003046 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303047 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3048 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3049 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3050 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3051 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Keith Packard1a2eb462011-11-16 16:26:07 -08003052 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303053 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003054 }
3055 } else {
3056 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303057 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3058 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3060 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3061 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3062 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3063 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packard1a2eb462011-11-16 16:26:07 -08003064 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303065 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003066 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003067 }
3068}
3069
Daniel Vetter5829975c2015-04-16 11:36:52 +02003070static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003071{
3072 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3073 struct drm_i915_private *dev_priv = dev->dev_private;
3074 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003075 struct intel_crtc *intel_crtc =
3076 to_intel_crtc(dport->base.base.crtc);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003077 unsigned long demph_reg_value, preemph_reg_value,
3078 uniqtranscale_reg_value;
3079 uint8_t train_set = intel_dp->train_set[0];
Chon Ming Leee4607fc2013-11-06 14:36:35 +08003080 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003081 int pipe = intel_crtc->pipe;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003082
3083 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303084 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003085 preemph_reg_value = 0x0004000;
3086 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303087 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003088 demph_reg_value = 0x2B405555;
3089 uniqtranscale_reg_value = 0x552AB83A;
3090 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003092 demph_reg_value = 0x2B404040;
3093 uniqtranscale_reg_value = 0x5548B83A;
3094 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303095 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003096 demph_reg_value = 0x2B245555;
3097 uniqtranscale_reg_value = 0x5560B83A;
3098 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303099 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003100 demph_reg_value = 0x2B405555;
3101 uniqtranscale_reg_value = 0x5598DA3A;
3102 break;
3103 default:
3104 return 0;
3105 }
3106 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303107 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003108 preemph_reg_value = 0x0002000;
3109 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303110 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003111 demph_reg_value = 0x2B404040;
3112 uniqtranscale_reg_value = 0x5552B83A;
3113 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303114 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003115 demph_reg_value = 0x2B404848;
3116 uniqtranscale_reg_value = 0x5580B83A;
3117 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303118 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003119 demph_reg_value = 0x2B404040;
3120 uniqtranscale_reg_value = 0x55ADDA3A;
3121 break;
3122 default:
3123 return 0;
3124 }
3125 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303126 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003127 preemph_reg_value = 0x0000000;
3128 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003130 demph_reg_value = 0x2B305555;
3131 uniqtranscale_reg_value = 0x5570B83A;
3132 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303133 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003134 demph_reg_value = 0x2B2B4040;
3135 uniqtranscale_reg_value = 0x55ADDA3A;
3136 break;
3137 default:
3138 return 0;
3139 }
3140 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303141 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003142 preemph_reg_value = 0x0006000;
3143 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303144 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003145 demph_reg_value = 0x1B405555;
3146 uniqtranscale_reg_value = 0x55ADDA3A;
3147 break;
3148 default:
3149 return 0;
3150 }
3151 break;
3152 default:
3153 return 0;
3154 }
3155
Ville Syrjäläa5805162015-05-26 20:42:30 +03003156 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003157 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3158 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3159 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003160 uniqtranscale_reg_value);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003161 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3162 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3163 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3164 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03003165 mutex_unlock(&dev_priv->sb_lock);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003166
3167 return 0;
3168}
3169
Daniel Vetter5829975c2015-04-16 11:36:52 +02003170static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003171{
3172 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3173 struct drm_i915_private *dev_priv = dev->dev_private;
3174 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3175 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003176 u32 deemph_reg_value, margin_reg_value, val;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003177 uint8_t train_set = intel_dp->train_set[0];
3178 enum dpio_channel ch = vlv_dport_to_channel(dport);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003179 enum pipe pipe = intel_crtc->pipe;
3180 int i;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003181
3182 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303183 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003184 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303185 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003186 deemph_reg_value = 128;
3187 margin_reg_value = 52;
3188 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303189 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003190 deemph_reg_value = 128;
3191 margin_reg_value = 77;
3192 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303193 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003194 deemph_reg_value = 128;
3195 margin_reg_value = 102;
3196 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303197 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003198 deemph_reg_value = 128;
3199 margin_reg_value = 154;
3200 /* FIXME extra to set for 1200 */
3201 break;
3202 default:
3203 return 0;
3204 }
3205 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303206 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003207 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303208 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003209 deemph_reg_value = 85;
3210 margin_reg_value = 78;
3211 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303212 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003213 deemph_reg_value = 85;
3214 margin_reg_value = 116;
3215 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303216 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003217 deemph_reg_value = 85;
3218 margin_reg_value = 154;
3219 break;
3220 default:
3221 return 0;
3222 }
3223 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303224 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003225 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303226 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003227 deemph_reg_value = 64;
3228 margin_reg_value = 104;
3229 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303230 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003231 deemph_reg_value = 64;
3232 margin_reg_value = 154;
3233 break;
3234 default:
3235 return 0;
3236 }
3237 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303238 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003239 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303240 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003241 deemph_reg_value = 43;
3242 margin_reg_value = 154;
3243 break;
3244 default:
3245 return 0;
3246 }
3247 break;
3248 default:
3249 return 0;
3250 }
3251
Ville Syrjäläa5805162015-05-26 20:42:30 +03003252 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003253
3254 /* Clear calc init */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003255 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3256 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003257 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3258 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
Ville Syrjälä1966e592014-04-09 13:29:04 +03003259 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3260
3261 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3262 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003263 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3264 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
Ville Syrjälä1966e592014-04-09 13:29:04 +03003265 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003266
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003267 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3268 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3269 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3270 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3271
3272 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3273 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3274 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3275 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3276
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003277 /* Program swing deemph */
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003278 for (i = 0; i < 4; i++) {
3279 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3280 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3281 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3282 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3283 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003284
3285 /* Program swing margin */
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003286 for (i = 0; i < 4; i++) {
3287 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
Ville Syrjälä1fb44502014-06-28 02:04:03 +03003288 val &= ~DPIO_SWING_MARGIN000_MASK;
3289 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003290 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3291 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003292
3293 /* Disable unique transition scale */
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003294 for (i = 0; i < 4; i++) {
3295 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3296 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3297 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3298 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003299
3300 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
Sonika Jindalbd600182014-08-08 16:23:41 +05303301 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003302 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
Sonika Jindalbd600182014-08-08 16:23:41 +05303303 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003304
3305 /*
3306 * The document said it needs to set bit 27 for ch0 and bit 26
3307 * for ch1. Might be a typo in the doc.
3308 * For now, for this unique transition scale selection, set bit
3309 * 27 for ch0 and ch1.
3310 */
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003311 for (i = 0; i < 4; i++) {
3312 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3313 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3314 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3315 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003316
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003317 for (i = 0; i < 4; i++) {
3318 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3319 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3320 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3321 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3322 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003323 }
3324
3325 /* Start swing calculation */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003326 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3327 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3328 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3329
3330 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3331 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3332 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003333
3334 /* LRC Bypass */
3335 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3336 val |= DPIO_LRC_BYPASS;
3337 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3338
Ville Syrjäläa5805162015-05-26 20:42:30 +03003339 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003340
3341 return 0;
3342}
3343
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003344static void
Jani Nikula0301b3a2013-10-15 09:36:08 +03003345intel_get_adjust_train(struct intel_dp *intel_dp,
3346 const uint8_t link_status[DP_LINK_STATUS_SIZE])
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003347{
3348 uint8_t v = 0;
3349 uint8_t p = 0;
3350 int lane;
Keith Packard1a2eb462011-11-16 16:26:07 -08003351 uint8_t voltage_max;
3352 uint8_t preemph_max;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003353
Jesse Barnes33a34e42010-09-08 12:42:02 -07003354 for (lane = 0; lane < intel_dp->lane_count; lane++) {
Daniel Vetter0f037bd2012-10-18 10:15:27 +02003355 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3356 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003357
3358 if (this_v > v)
3359 v = this_v;
3360 if (this_p > p)
3361 p = this_p;
3362 }
3363
Keith Packard1a2eb462011-11-16 16:26:07 -08003364 voltage_max = intel_dp_voltage_max(intel_dp);
Keith Packard417e8222011-11-01 19:54:11 -07003365 if (v >= voltage_max)
3366 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003367
Keith Packard1a2eb462011-11-16 16:26:07 -08003368 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3369 if (p >= preemph_max)
3370 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003371
3372 for (lane = 0; lane < 4; lane++)
Jesse Barnes33a34e42010-09-08 12:42:02 -07003373 intel_dp->train_set[lane] = v | p;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003374}
3375
3376static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003377gen4_signal_levels(uint8_t train_set)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003378{
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003379 uint32_t signal_levels = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003380
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003381 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303382 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003383 default:
3384 signal_levels |= DP_VOLTAGE_0_4;
3385 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303386 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003387 signal_levels |= DP_VOLTAGE_0_6;
3388 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303389 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003390 signal_levels |= DP_VOLTAGE_0_8;
3391 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303392 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003393 signal_levels |= DP_VOLTAGE_1_2;
3394 break;
3395 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003396 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303397 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003398 default:
3399 signal_levels |= DP_PRE_EMPHASIS_0;
3400 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303401 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003402 signal_levels |= DP_PRE_EMPHASIS_3_5;
3403 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303404 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003405 signal_levels |= DP_PRE_EMPHASIS_6;
3406 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303407 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003408 signal_levels |= DP_PRE_EMPHASIS_9_5;
3409 break;
3410 }
3411 return signal_levels;
3412}
3413
Zhenyu Wange3421a12010-04-08 09:43:27 +08003414/* Gen6's DP voltage swing and pre-emphasis control */
3415static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003416gen6_edp_signal_levels(uint8_t train_set)
Zhenyu Wange3421a12010-04-08 09:43:27 +08003417{
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003418 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3419 DP_TRAIN_PRE_EMPHASIS_MASK);
3420 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303421 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3422 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003423 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303424 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003425 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303426 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003428 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303429 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3430 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003431 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303432 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3433 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003434 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003435 default:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003436 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3437 "0x%x\n", signal_levels);
3438 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003439 }
3440}
3441
Keith Packard1a2eb462011-11-16 16:26:07 -08003442/* Gen7's DP voltage swing and pre-emphasis control */
3443static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003444gen7_edp_signal_levels(uint8_t train_set)
Keith Packard1a2eb462011-11-16 16:26:07 -08003445{
3446 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3447 DP_TRAIN_PRE_EMPHASIS_MASK);
3448 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303449 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003450 return EDP_LINK_TRAIN_400MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303451 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003452 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303453 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packard1a2eb462011-11-16 16:26:07 -08003454 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3455
Sonika Jindalbd600182014-08-08 16:23:41 +05303456 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003457 return EDP_LINK_TRAIN_600MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303458 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003459 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3460
Sonika Jindalbd600182014-08-08 16:23:41 +05303461 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003462 return EDP_LINK_TRAIN_800MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003464 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3465
3466 default:
3467 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3468 "0x%x\n", signal_levels);
3469 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3470 }
3471}
3472
Paulo Zanonif0a34242012-12-06 16:51:50 -02003473/* Properly updates "DP" with the correct signal levels. */
3474static void
3475intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3476{
3477 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003478 enum port port = intel_dig_port->port;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003479 struct drm_device *dev = intel_dig_port->base.base.dev;
David Weinehallf8896f52015-06-25 11:11:03 +03003480 uint32_t signal_levels, mask = 0;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003481 uint8_t train_set = intel_dp->train_set[0];
3482
David Weinehallf8896f52015-06-25 11:11:03 +03003483 if (HAS_DDI(dev)) {
3484 signal_levels = ddi_signal_levels(intel_dp);
3485
3486 if (IS_BROXTON(dev))
3487 signal_levels = 0;
3488 else
3489 mask = DDI_BUF_EMP_MASK;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003490 } else if (IS_CHERRYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003491 signal_levels = chv_signal_levels(intel_dp);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003492 } else if (IS_VALLEYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003493 signal_levels = vlv_signal_levels(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003494 } else if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003495 signal_levels = gen7_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003496 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003497 } else if (IS_GEN6(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003498 signal_levels = gen6_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003499 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3500 } else {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003501 signal_levels = gen4_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003502 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3503 }
3504
Vandana Kannan96fb9f92014-11-18 15:45:27 +05303505 if (mask)
3506 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3507
3508 DRM_DEBUG_KMS("Using vswing level %d\n",
3509 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3510 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3511 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3512 DP_TRAIN_PRE_EMPHASIS_SHIFT);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003513
3514 *DP = (*DP & ~mask) | signal_levels;
3515}
3516
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003517static bool
Chris Wilsonea5b2132010-08-04 13:50:23 +01003518intel_dp_set_link_train(struct intel_dp *intel_dp,
Jani Nikula70aff662013-09-27 15:10:44 +03003519 uint32_t *DP,
Chris Wilson58e10eb2010-10-03 10:56:11 +01003520 uint8_t dp_train_pat)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003521{
Paulo Zanoni174edf12012-10-26 19:05:50 -02003522 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3523 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003524 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003525 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3526 int ret, len;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003527
Ville Syrjälä7b13b582014-08-18 22:16:08 +03003528 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003529
Jani Nikula70aff662013-09-27 15:10:44 +03003530 I915_WRITE(intel_dp->output_reg, *DP);
Chris Wilsonea5b2132010-08-04 13:50:23 +01003531 POSTING_READ(intel_dp->output_reg);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003532
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003533 buf[0] = dp_train_pat;
3534 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003535 DP_TRAINING_PATTERN_DISABLE) {
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003536 /* don't write DP_TRAINING_LANEx_SET on disable */
3537 len = 1;
3538 } else {
3539 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3540 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3541 len = intel_dp->lane_count + 1;
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003542 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003543
Jani Nikula9d1a1032014-03-14 16:51:15 +02003544 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3545 buf, len);
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003546
3547 return ret == len;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003548}
3549
Jani Nikula70aff662013-09-27 15:10:44 +03003550static bool
3551intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3552 uint8_t dp_train_pat)
3553{
Mika Kahola4e96c972015-04-29 09:17:39 +03003554 if (!intel_dp->train_set_valid)
3555 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
Jani Nikula70aff662013-09-27 15:10:44 +03003556 intel_dp_set_signal_levels(intel_dp, DP);
3557 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3558}
3559
3560static bool
3561intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
Jani Nikula0301b3a2013-10-15 09:36:08 +03003562 const uint8_t link_status[DP_LINK_STATUS_SIZE])
Jani Nikula70aff662013-09-27 15:10:44 +03003563{
3564 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3565 struct drm_device *dev = intel_dig_port->base.base.dev;
3566 struct drm_i915_private *dev_priv = dev->dev_private;
3567 int ret;
3568
3569 intel_get_adjust_train(intel_dp, link_status);
3570 intel_dp_set_signal_levels(intel_dp, DP);
3571
3572 I915_WRITE(intel_dp->output_reg, *DP);
3573 POSTING_READ(intel_dp->output_reg);
3574
Jani Nikula9d1a1032014-03-14 16:51:15 +02003575 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3576 intel_dp->train_set, intel_dp->lane_count);
Jani Nikula70aff662013-09-27 15:10:44 +03003577
3578 return ret == intel_dp->lane_count;
3579}
3580
Imre Deak3ab9c632013-05-03 12:57:41 +03003581static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3582{
3583 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3584 struct drm_device *dev = intel_dig_port->base.base.dev;
3585 struct drm_i915_private *dev_priv = dev->dev_private;
3586 enum port port = intel_dig_port->port;
3587 uint32_t val;
3588
3589 if (!HAS_DDI(dev))
3590 return;
3591
3592 val = I915_READ(DP_TP_CTL(port));
3593 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3594 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3595 I915_WRITE(DP_TP_CTL(port), val);
3596
3597 /*
3598 * On PORT_A we can have only eDP in SST mode. There the only reason
3599 * we need to set idle transmission mode is to work around a HW issue
3600 * where we enable the pipe while not in idle link-training mode.
3601 * In this case there is requirement to wait for a minimum number of
3602 * idle patterns to be sent.
3603 */
3604 if (port == PORT_A)
3605 return;
3606
3607 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3608 1))
3609 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3610}
3611
Jesse Barnes33a34e42010-09-08 12:42:02 -07003612/* Enable corresponding port and start training pattern 1 */
Paulo Zanonic19b0662012-10-15 15:51:41 -03003613void
Jesse Barnes33a34e42010-09-08 12:42:02 -07003614intel_dp_start_link_train(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003615{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003616 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
Paulo Zanonic19b0662012-10-15 15:51:41 -03003617 struct drm_device *dev = encoder->dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003618 int i;
3619 uint8_t voltage;
Keith Packardcdb0e952011-11-01 20:00:06 -07003620 int voltage_tries, loop_tries;
Chris Wilsonea5b2132010-08-04 13:50:23 +01003621 uint32_t DP = intel_dp->DP;
Jani Nikula6aba5b62013-10-04 15:08:10 +03003622 uint8_t link_config[2];
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003623
Paulo Zanoniaffa9352012-11-23 15:30:39 -02003624 if (HAS_DDI(dev))
Paulo Zanonic19b0662012-10-15 15:51:41 -03003625 intel_ddi_prepare_link_retrain(encoder);
3626
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003627 /* Write the link configuration data */
Jani Nikula6aba5b62013-10-04 15:08:10 +03003628 link_config[0] = intel_dp->link_bw;
3629 link_config[1] = intel_dp->lane_count;
3630 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3631 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
Jani Nikula9d1a1032014-03-14 16:51:15 +02003632 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003633 if (intel_dp->num_sink_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05303634 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3635 &intel_dp->rate_select, 1);
Jani Nikula6aba5b62013-10-04 15:08:10 +03003636
3637 link_config[0] = 0;
3638 link_config[1] = DP_SET_ANSI_8B10B;
Jani Nikula9d1a1032014-03-14 16:51:15 +02003639 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003640
3641 DP |= DP_PORT_EN;
Keith Packard1a2eb462011-11-16 16:26:07 -08003642
Jani Nikula70aff662013-09-27 15:10:44 +03003643 /* clock recovery */
3644 if (!intel_dp_reset_link_train(intel_dp, &DP,
3645 DP_TRAINING_PATTERN_1 |
3646 DP_LINK_SCRAMBLING_DISABLE)) {
3647 DRM_ERROR("failed to enable link training\n");
3648 return;
3649 }
3650
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003651 voltage = 0xff;
Keith Packardcdb0e952011-11-01 20:00:06 -07003652 voltage_tries = 0;
3653 loop_tries = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003654 for (;;) {
Jani Nikula70aff662013-09-27 15:10:44 +03003655 uint8_t link_status[DP_LINK_STATUS_SIZE];
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003656
Daniel Vettera7c96552012-10-18 10:15:30 +02003657 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
Keith Packard93f62da2011-11-01 19:45:03 -07003658 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3659 DRM_ERROR("failed to get link status\n");
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003660 break;
Keith Packard93f62da2011-11-01 19:45:03 -07003661 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003662
Daniel Vetter01916272012-10-18 10:15:25 +02003663 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
Keith Packard93f62da2011-11-01 19:45:03 -07003664 DRM_DEBUG_KMS("clock recovery OK\n");
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003665 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003666 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003667
Mika Kahola4e96c972015-04-29 09:17:39 +03003668 /*
3669 * if we used previously trained voltage and pre-emphasis values
3670 * and we don't get clock recovery, reset link training values
3671 */
3672 if (intel_dp->train_set_valid) {
3673 DRM_DEBUG_KMS("clock recovery not ok, reset");
3674 /* clear the flag as we are not reusing train set */
3675 intel_dp->train_set_valid = false;
3676 if (!intel_dp_reset_link_train(intel_dp, &DP,
3677 DP_TRAINING_PATTERN_1 |
3678 DP_LINK_SCRAMBLING_DISABLE)) {
3679 DRM_ERROR("failed to enable link training\n");
3680 return;
3681 }
3682 continue;
3683 }
3684
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003685 /* Check to see if we've tried the max voltage */
3686 for (i = 0; i < intel_dp->lane_count; i++)
3687 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3688 break;
Takashi Iwai3b4f8192013-03-11 18:40:16 +01003689 if (i == intel_dp->lane_count) {
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003690 ++loop_tries;
3691 if (loop_tries == 5) {
Jani Nikula3def84b2013-10-05 16:13:56 +03003692 DRM_ERROR("too many full retries, give up\n");
Keith Packardcdb0e952011-11-01 20:00:06 -07003693 break;
3694 }
Jani Nikula70aff662013-09-27 15:10:44 +03003695 intel_dp_reset_link_train(intel_dp, &DP,
3696 DP_TRAINING_PATTERN_1 |
3697 DP_LINK_SCRAMBLING_DISABLE);
Keith Packardcdb0e952011-11-01 20:00:06 -07003698 voltage_tries = 0;
3699 continue;
3700 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003701
3702 /* Check to see if we've tried the same voltage 5 times */
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003703 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
Chris Wilson24773672012-09-26 16:48:30 +01003704 ++voltage_tries;
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003705 if (voltage_tries == 5) {
Jani Nikula3def84b2013-10-05 16:13:56 +03003706 DRM_ERROR("too many voltage retries, give up\n");
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003707 break;
3708 }
3709 } else
3710 voltage_tries = 0;
3711 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003712
Jani Nikula70aff662013-09-27 15:10:44 +03003713 /* Update training set as requested by target */
3714 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3715 DRM_ERROR("failed to update link training\n");
3716 break;
3717 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003718 }
3719
Jesse Barnes33a34e42010-09-08 12:42:02 -07003720 intel_dp->DP = DP;
3721}
3722
Paulo Zanonic19b0662012-10-15 15:51:41 -03003723void
Jesse Barnes33a34e42010-09-08 12:42:02 -07003724intel_dp_complete_link_train(struct intel_dp *intel_dp)
3725{
Jesse Barnes33a34e42010-09-08 12:42:02 -07003726 bool channel_eq = false;
Jesse Barnes37f80972011-01-05 14:45:24 -08003727 int tries, cr_tries;
Jesse Barnes33a34e42010-09-08 12:42:02 -07003728 uint32_t DP = intel_dp->DP;
Todd Previte06ea66b2014-01-20 10:19:39 -07003729 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3730
3731 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3732 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3733 training_pattern = DP_TRAINING_PATTERN_3;
Jesse Barnes33a34e42010-09-08 12:42:02 -07003734
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003735 /* channel equalization */
Jani Nikula70aff662013-09-27 15:10:44 +03003736 if (!intel_dp_set_link_train(intel_dp, &DP,
Todd Previte06ea66b2014-01-20 10:19:39 -07003737 training_pattern |
Jani Nikula70aff662013-09-27 15:10:44 +03003738 DP_LINK_SCRAMBLING_DISABLE)) {
3739 DRM_ERROR("failed to start channel equalization\n");
3740 return;
3741 }
3742
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003743 tries = 0;
Jesse Barnes37f80972011-01-05 14:45:24 -08003744 cr_tries = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003745 channel_eq = false;
3746 for (;;) {
Jani Nikula70aff662013-09-27 15:10:44 +03003747 uint8_t link_status[DP_LINK_STATUS_SIZE];
Zhenyu Wange3421a12010-04-08 09:43:27 +08003748
Jesse Barnes37f80972011-01-05 14:45:24 -08003749 if (cr_tries > 5) {
3750 DRM_ERROR("failed to train DP, aborting\n");
Jesse Barnes37f80972011-01-05 14:45:24 -08003751 break;
3752 }
3753
Daniel Vettera7c96552012-10-18 10:15:30 +02003754 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
Jani Nikula70aff662013-09-27 15:10:44 +03003755 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3756 DRM_ERROR("failed to get link status\n");
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003757 break;
Jani Nikula70aff662013-09-27 15:10:44 +03003758 }
Jesse Barnes869184a2010-10-07 16:01:22 -07003759
Jesse Barnes37f80972011-01-05 14:45:24 -08003760 /* Make sure clock is still ok */
Daniel Vetter01916272012-10-18 10:15:25 +02003761 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
Mika Kahola4e96c972015-04-29 09:17:39 +03003762 intel_dp->train_set_valid = false;
Jesse Barnes37f80972011-01-05 14:45:24 -08003763 intel_dp_start_link_train(intel_dp);
Jani Nikula70aff662013-09-27 15:10:44 +03003764 intel_dp_set_link_train(intel_dp, &DP,
Todd Previte06ea66b2014-01-20 10:19:39 -07003765 training_pattern |
Jani Nikula70aff662013-09-27 15:10:44 +03003766 DP_LINK_SCRAMBLING_DISABLE);
Jesse Barnes37f80972011-01-05 14:45:24 -08003767 cr_tries++;
3768 continue;
3769 }
3770
Daniel Vetter1ffdff12012-10-18 10:15:24 +02003771 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003772 channel_eq = true;
3773 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003774 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003775
Jesse Barnes37f80972011-01-05 14:45:24 -08003776 /* Try 5 times, then try clock recovery if that fails */
3777 if (tries > 5) {
Mika Kahola4e96c972015-04-29 09:17:39 +03003778 intel_dp->train_set_valid = false;
Jesse Barnes37f80972011-01-05 14:45:24 -08003779 intel_dp_start_link_train(intel_dp);
Jani Nikula70aff662013-09-27 15:10:44 +03003780 intel_dp_set_link_train(intel_dp, &DP,
Todd Previte06ea66b2014-01-20 10:19:39 -07003781 training_pattern |
Jani Nikula70aff662013-09-27 15:10:44 +03003782 DP_LINK_SCRAMBLING_DISABLE);
Jesse Barnes37f80972011-01-05 14:45:24 -08003783 tries = 0;
3784 cr_tries++;
3785 continue;
3786 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003787
Jani Nikula70aff662013-09-27 15:10:44 +03003788 /* Update training set as requested by target */
3789 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3790 DRM_ERROR("failed to update link training\n");
3791 break;
3792 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003793 ++tries;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003794 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003795
Imre Deak3ab9c632013-05-03 12:57:41 +03003796 intel_dp_set_idle_link_train(intel_dp);
3797
3798 intel_dp->DP = DP;
3799
Mika Kahola4e96c972015-04-29 09:17:39 +03003800 if (channel_eq) {
Mika Kahola5fa836a2015-04-29 09:17:40 +03003801 intel_dp->train_set_valid = true;
Masanari Iida07f42252013-03-20 11:00:34 +09003802 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
Mika Kahola4e96c972015-04-29 09:17:39 +03003803 }
Imre Deak3ab9c632013-05-03 12:57:41 +03003804}
3805
3806void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3807{
Jani Nikula70aff662013-09-27 15:10:44 +03003808 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
Imre Deak3ab9c632013-05-03 12:57:41 +03003809 DP_TRAINING_PATTERN_DISABLE);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003810}
3811
3812static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01003813intel_dp_link_down(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003814{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003815 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003816 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003817 enum port port = intel_dig_port->port;
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003818 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003819 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonea5b2132010-08-04 13:50:23 +01003820 uint32_t DP = intel_dp->DP;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003821
Daniel Vetterbc76e3202014-05-20 22:46:50 +02003822 if (WARN_ON(HAS_DDI(dev)))
Paulo Zanonic19b0662012-10-15 15:51:41 -03003823 return;
3824
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02003825 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
Chris Wilson1b39d6f2010-12-06 11:20:45 +00003826 return;
3827
Zhao Yakui28c97732009-10-09 11:39:41 +08003828 DRM_DEBUG_KMS("\n");
Zhenyu Wang32f9d652009-07-24 01:00:32 +08003829
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03003830 if ((IS_GEN7(dev) && port == PORT_A) ||
3831 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Zhenyu Wange3421a12010-04-08 09:43:27 +08003832 DP &= ~DP_LINK_TRAIN_MASK_CPT;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003833 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003834 } else {
Ville Syrjäläaad3d142014-06-28 02:04:25 +03003835 if (IS_CHERRYVIEW(dev))
3836 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3837 else
3838 DP &= ~DP_LINK_TRAIN_MASK;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003839 DP |= DP_LINK_TRAIN_PAT_IDLE;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003840 }
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003841 I915_WRITE(intel_dp->output_reg, DP);
Chris Wilsonfe255d02010-09-11 21:37:48 +01003842 POSTING_READ(intel_dp->output_reg);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08003843
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003844 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3845 I915_WRITE(intel_dp->output_reg, DP);
3846 POSTING_READ(intel_dp->output_reg);
3847
3848 /*
3849 * HW workaround for IBX, we need to move the port
3850 * to transcoder A after disabling it to allow the
3851 * matching HDMI port to be enabled on transcoder A.
3852 */
3853 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3854 /* always enable with pattern 1 (as per spec) */
3855 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3856 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3857 I915_WRITE(intel_dp->output_reg, DP);
3858 POSTING_READ(intel_dp->output_reg);
3859
3860 DP &= ~DP_PORT_EN;
Eric Anholt5bddd172010-11-18 09:32:59 +08003861 I915_WRITE(intel_dp->output_reg, DP);
Daniel Vetter0ca09682014-11-24 16:54:11 +01003862 POSTING_READ(intel_dp->output_reg);
Eric Anholt5bddd172010-11-18 09:32:59 +08003863 }
3864
Keith Packardf01eca22011-09-28 16:48:10 -07003865 msleep(intel_dp->panel_power_down_delay);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003866}
3867
Keith Packard26d61aa2011-07-25 20:01:09 -07003868static bool
3869intel_dp_get_dpcd(struct intel_dp *intel_dp)
Keith Packard92fd8fd2011-07-25 19:50:10 -07003870{
Rodrigo Vivia031d702013-10-03 16:15:06 -03003871 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3872 struct drm_device *dev = dig_port->base.base.dev;
3873 struct drm_i915_private *dev_priv = dev->dev_private;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303874 uint8_t rev;
Rodrigo Vivia031d702013-10-03 16:15:06 -03003875
Jani Nikula9d1a1032014-03-14 16:51:15 +02003876 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3877 sizeof(intel_dp->dpcd)) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003878 return false; /* aux transfer failed */
Keith Packard92fd8fd2011-07-25 19:50:10 -07003879
Andy Shevchenkoa8e98152014-09-01 14:12:01 +03003880 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
Damien Lespiau577c7a52012-12-13 16:09:02 +00003881
Adam Jacksonedb39242012-09-18 10:58:49 -04003882 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3883 return false; /* DPCD not present */
3884
Shobhit Kumar2293bb52013-07-11 18:44:56 -03003885 /* Check if the panel supports PSR */
3886 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
Jani Nikula50003932013-09-20 16:42:17 +03003887 if (is_edp(intel_dp)) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003888 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3889 intel_dp->psr_dpcd,
3890 sizeof(intel_dp->psr_dpcd));
Rodrigo Vivia031d702013-10-03 16:15:06 -03003891 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3892 dev_priv->psr.sink_support = true;
Jani Nikula50003932013-09-20 16:42:17 +03003893 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
Rodrigo Vivia031d702013-10-03 16:15:06 -03003894 }
Sonika Jindal474d1ec2015-04-02 11:02:44 +05303895
3896 if (INTEL_INFO(dev)->gen >= 9 &&
3897 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3898 uint8_t frame_sync_cap;
3899
3900 dev_priv->psr.sink_support = true;
3901 intel_dp_dpcd_read_wake(&intel_dp->aux,
3902 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3903 &frame_sync_cap, 1);
3904 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3905 /* PSR2 needs frame sync as well */
3906 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3907 DRM_DEBUG_KMS("PSR2 %s on sink",
3908 dev_priv->psr.psr2_support ? "supported" : "not supported");
3909 }
Jani Nikula50003932013-09-20 16:42:17 +03003910 }
3911
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05303912 /* Training Pattern 3 support, Intel platforms that support HBR2 alone
3913 * have support for TP3 hence that check is used along with dpcd check
3914 * to ensure TP3 can be enabled.
3915 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
3916 * supported but still not enabled.
3917 */
Todd Previte06ea66b2014-01-20 10:19:39 -07003918 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
Jani Nikula7809a612014-10-29 11:03:26 +02003919 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05303920 intel_dp_source_supports_hbr2(dev)) {
Todd Previte06ea66b2014-01-20 10:19:39 -07003921 intel_dp->use_tps3 = true;
Jani Nikulaf8d8a672014-09-05 16:19:18 +03003922 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
Todd Previte06ea66b2014-01-20 10:19:39 -07003923 } else
3924 intel_dp->use_tps3 = false;
3925
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303926 /* Intermediate frequency support */
3927 if (is_edp(intel_dp) &&
3928 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3929 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3930 (rev >= 0x03)) { /* eDp v1.4 or higher */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003931 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003932 int i;
3933
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303934 intel_dp_dpcd_read_wake(&intel_dp->aux,
3935 DP_SUPPORTED_LINK_RATES,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003936 sink_rates,
3937 sizeof(sink_rates));
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003938
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003939 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3940 int val = le16_to_cpu(sink_rates[i]);
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003941
3942 if (val == 0)
3943 break;
3944
Sonika Jindalaf77b972015-05-07 13:59:28 +05303945 /* Value read is in kHz while drm clock is saved in deca-kHz */
3946 intel_dp->sink_rates[i] = (val * 200) / 10;
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003947 }
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003948 intel_dp->num_sink_rates = i;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303949 }
Ville Syrjälä0336400e2015-03-12 17:10:39 +02003950
3951 intel_dp_print_rates(intel_dp);
3952
Adam Jacksonedb39242012-09-18 10:58:49 -04003953 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3954 DP_DWN_STRM_PORT_PRESENT))
3955 return true; /* native DP sink */
3956
3957 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3958 return true; /* no per-port downstream info */
3959
Jani Nikula9d1a1032014-03-14 16:51:15 +02003960 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3961 intel_dp->downstream_ports,
3962 DP_MAX_DOWNSTREAM_PORTS) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003963 return false; /* downstream port status fetch failed */
3964
3965 return true;
Keith Packard92fd8fd2011-07-25 19:50:10 -07003966}
3967
Adam Jackson0d198322012-05-14 16:05:47 -04003968static void
3969intel_dp_probe_oui(struct intel_dp *intel_dp)
3970{
3971 u8 buf[3];
3972
3973 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3974 return;
3975
Jani Nikula9d1a1032014-03-14 16:51:15 +02003976 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04003977 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3978 buf[0], buf[1], buf[2]);
3979
Jani Nikula9d1a1032014-03-14 16:51:15 +02003980 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04003981 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3982 buf[0], buf[1], buf[2]);
3983}
3984
Dave Airlie0e32b392014-05-02 14:02:48 +10003985static bool
3986intel_dp_probe_mst(struct intel_dp *intel_dp)
3987{
3988 u8 buf[1];
3989
3990 if (!intel_dp->can_mst)
3991 return false;
3992
3993 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3994 return false;
3995
Dave Airlie0e32b392014-05-02 14:02:48 +10003996 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3997 if (buf[0] & DP_MST_CAP) {
3998 DRM_DEBUG_KMS("Sink is MST capable\n");
3999 intel_dp->is_mst = true;
4000 } else {
4001 DRM_DEBUG_KMS("Sink is not MST capable\n");
4002 intel_dp->is_mst = false;
4003 }
4004 }
Dave Airlie0e32b392014-05-02 14:02:48 +10004005
4006 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4007 return intel_dp->is_mst;
4008}
4009
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004010static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004011{
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004012 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4013 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004014 u8 buf;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004015
4016 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004017 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4018 return;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004019 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004020
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004021 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004022 buf & ~DP_TEST_SINK_START) < 0)
4023 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4024
4025 hsw_enable_ips(intel_crtc);
4026}
4027
4028static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4029{
4030 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4031 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4032 u8 buf;
4033
4034 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4035 return -EIO;
4036
4037 if (!(buf & DP_TEST_CRC_SUPPORTED))
4038 return -ENOTTY;
4039
4040 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4041 return -EIO;
4042
4043 hsw_disable_ips(intel_crtc);
4044
4045 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4046 buf | DP_TEST_SINK_START) < 0) {
4047 hsw_enable_ips(intel_crtc);
4048 return -EIO;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004049 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004050
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004051 return 0;
4052}
4053
4054int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4055{
4056 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4057 struct drm_device *dev = dig_port->base.base.dev;
4058 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4059 u8 buf;
4060 int test_crc_count;
4061 int attempts = 6;
4062 int ret;
4063
4064 ret = intel_dp_sink_crc_start(intel_dp);
4065 if (ret)
4066 return ret;
4067
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004068 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4069 ret = -EIO;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004070 goto stop;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004071 }
4072
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004073 test_crc_count = buf & DP_TEST_COUNT_MASK;
4074
4075 do {
Rodrigo Vivi1dda5f92014-10-01 07:32:37 -07004076 if (drm_dp_dpcd_readb(&intel_dp->aux,
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004077 DP_TEST_SINK_MISC, &buf) < 0) {
4078 ret = -EIO;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004079 goto stop;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004080 }
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004081 intel_wait_for_vblank(dev, intel_crtc->pipe);
4082 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4083
4084 if (attempts == 0) {
Daniel Vetter90bd1f42014-11-19 11:18:47 +01004085 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004086 ret = -ETIMEDOUT;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004087 goto stop;
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004088 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004089
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004090 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004091 ret = -EIO;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004092stop:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004093 intel_dp_sink_crc_stop(intel_dp);
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004094 return ret;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004095}
4096
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004097static bool
4098intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4099{
Jani Nikula9d1a1032014-03-14 16:51:15 +02004100 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4101 DP_DEVICE_SERVICE_IRQ_VECTOR,
4102 sink_irq_vector, 1) == 1;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004103}
4104
Dave Airlie0e32b392014-05-02 14:02:48 +10004105static bool
4106intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4107{
4108 int ret;
4109
4110 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4111 DP_SINK_COUNT_ESI,
4112 sink_irq_vector, 14);
4113 if (ret != 14)
4114 return false;
4115
4116 return true;
4117}
4118
Todd Previtec5d5ab72015-04-15 08:38:38 -07004119static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004120{
Todd Previtec5d5ab72015-04-15 08:38:38 -07004121 uint8_t test_result = DP_TEST_ACK;
4122 return test_result;
4123}
4124
4125static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4126{
4127 uint8_t test_result = DP_TEST_NAK;
4128 return test_result;
4129}
4130
4131static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4132{
4133 uint8_t test_result = DP_TEST_NAK;
Todd Previte559be302015-05-04 07:48:20 -07004134 struct intel_connector *intel_connector = intel_dp->attached_connector;
4135 struct drm_connector *connector = &intel_connector->base;
4136
4137 if (intel_connector->detect_edid == NULL ||
Daniel Vetterac6f2e22015-05-08 16:15:41 +02004138 connector->edid_corrupt ||
Todd Previte559be302015-05-04 07:48:20 -07004139 intel_dp->aux.i2c_defer_count > 6) {
4140 /* Check EDID read for NACKs, DEFERs and corruption
4141 * (DP CTS 1.2 Core r1.1)
4142 * 4.2.2.4 : Failed EDID read, I2C_NAK
4143 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4144 * 4.2.2.6 : EDID corruption detected
4145 * Use failsafe mode for all cases
4146 */
4147 if (intel_dp->aux.i2c_nack_count > 0 ||
4148 intel_dp->aux.i2c_defer_count > 0)
4149 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4150 intel_dp->aux.i2c_nack_count,
4151 intel_dp->aux.i2c_defer_count);
4152 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4153 } else {
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304154 struct edid *block = intel_connector->detect_edid;
4155
4156 /* We have to write the checksum
4157 * of the last block read
4158 */
4159 block += intel_connector->detect_edid->extensions;
4160
Todd Previte559be302015-05-04 07:48:20 -07004161 if (!drm_dp_dpcd_write(&intel_dp->aux,
4162 DP_TEST_EDID_CHECKSUM,
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304163 &block->checksum,
Dan Carpenter5a1cc652015-05-12 21:07:37 +03004164 1))
Todd Previte559be302015-05-04 07:48:20 -07004165 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4166
4167 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4168 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4169 }
4170
4171 /* Set test active flag here so userspace doesn't interrupt things */
4172 intel_dp->compliance_test_active = 1;
4173
Todd Previtec5d5ab72015-04-15 08:38:38 -07004174 return test_result;
4175}
4176
4177static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4178{
4179 uint8_t test_result = DP_TEST_NAK;
4180 return test_result;
4181}
4182
4183static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4184{
4185 uint8_t response = DP_TEST_NAK;
4186 uint8_t rxdata = 0;
4187 int status = 0;
4188
Todd Previte559be302015-05-04 07:48:20 -07004189 intel_dp->compliance_test_active = 0;
Todd Previtec5d5ab72015-04-15 08:38:38 -07004190 intel_dp->compliance_test_type = 0;
Todd Previte559be302015-05-04 07:48:20 -07004191 intel_dp->compliance_test_data = 0;
4192
Todd Previtec5d5ab72015-04-15 08:38:38 -07004193 intel_dp->aux.i2c_nack_count = 0;
4194 intel_dp->aux.i2c_defer_count = 0;
4195
4196 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4197 if (status <= 0) {
4198 DRM_DEBUG_KMS("Could not read test request from sink\n");
4199 goto update_status;
4200 }
4201
4202 switch (rxdata) {
4203 case DP_TEST_LINK_TRAINING:
4204 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4205 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4206 response = intel_dp_autotest_link_training(intel_dp);
4207 break;
4208 case DP_TEST_LINK_VIDEO_PATTERN:
4209 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4210 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4211 response = intel_dp_autotest_video_pattern(intel_dp);
4212 break;
4213 case DP_TEST_LINK_EDID_READ:
4214 DRM_DEBUG_KMS("EDID test requested\n");
4215 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4216 response = intel_dp_autotest_edid(intel_dp);
4217 break;
4218 case DP_TEST_LINK_PHY_TEST_PATTERN:
4219 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4220 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4221 response = intel_dp_autotest_phy_pattern(intel_dp);
4222 break;
4223 default:
4224 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4225 break;
4226 }
4227
4228update_status:
4229 status = drm_dp_dpcd_write(&intel_dp->aux,
4230 DP_TEST_RESPONSE,
4231 &response, 1);
4232 if (status <= 0)
4233 DRM_DEBUG_KMS("Could not write test response to sink\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004234}
4235
Dave Airlie0e32b392014-05-02 14:02:48 +10004236static int
4237intel_dp_check_mst_status(struct intel_dp *intel_dp)
4238{
4239 bool bret;
4240
4241 if (intel_dp->is_mst) {
4242 u8 esi[16] = { 0 };
4243 int ret = 0;
4244 int retry;
4245 bool handled;
4246 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4247go_again:
4248 if (bret == true) {
4249
4250 /* check link status - esi[10] = 0x200c */
4251 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4252 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4253 intel_dp_start_link_train(intel_dp);
4254 intel_dp_complete_link_train(intel_dp);
4255 intel_dp_stop_link_train(intel_dp);
4256 }
4257
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004258 DRM_DEBUG_KMS("got esi %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004259 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4260
4261 if (handled) {
4262 for (retry = 0; retry < 3; retry++) {
4263 int wret;
4264 wret = drm_dp_dpcd_write(&intel_dp->aux,
4265 DP_SINK_COUNT_ESI+1,
4266 &esi[1], 3);
4267 if (wret == 3) {
4268 break;
4269 }
4270 }
4271
4272 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4273 if (bret == true) {
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004274 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004275 goto go_again;
4276 }
4277 } else
4278 ret = 0;
4279
4280 return ret;
4281 } else {
4282 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4283 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4284 intel_dp->is_mst = false;
4285 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4286 /* send a hotplug event */
4287 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4288 }
4289 }
4290 return -EINVAL;
4291}
4292
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004293/*
4294 * According to DP spec
4295 * 5.1.2:
4296 * 1. Read DPCD
4297 * 2. Configure link according to Receiver Capabilities
4298 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4299 * 4. Check link status on receipt of hot-plug interrupt
4300 */
Damien Lespiaua5146202015-02-10 19:32:22 +00004301static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01004302intel_dp_check_link_status(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004303{
Dave Airlie5b215bc2014-08-05 10:40:20 +10004304 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004305 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004306 u8 sink_irq_vector;
Keith Packard93f62da2011-11-01 19:45:03 -07004307 u8 link_status[DP_LINK_STATUS_SIZE];
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004308
Dave Airlie5b215bc2014-08-05 10:40:20 +10004309 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4310
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02004311 if (!intel_encoder->base.crtc)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004312 return;
4313
Imre Deak1a125d82014-08-18 14:42:46 +03004314 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4315 return;
4316
Keith Packard92fd8fd2011-07-25 19:50:10 -07004317 /* Try to read receiver status if the link appears to be up */
Keith Packard93f62da2011-11-01 19:45:03 -07004318 if (!intel_dp_get_link_status(intel_dp, link_status)) {
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004319 return;
4320 }
4321
Keith Packard92fd8fd2011-07-25 19:50:10 -07004322 /* Now read the DPCD to see if it's actually running */
Keith Packard26d61aa2011-07-25 20:01:09 -07004323 if (!intel_dp_get_dpcd(intel_dp)) {
Jesse Barnes59cd09e2011-07-07 11:10:59 -07004324 return;
4325 }
4326
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004327 /* Try to read the source of the interrupt */
4328 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4329 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4330 /* Clear interrupt source */
Jani Nikula9d1a1032014-03-14 16:51:15 +02004331 drm_dp_dpcd_writeb(&intel_dp->aux,
4332 DP_DEVICE_SERVICE_IRQ_VECTOR,
4333 sink_irq_vector);
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004334
4335 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
Todd Previte09b1eb12015-04-20 15:27:34 -07004336 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004337 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4338 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4339 }
4340
Daniel Vetter1ffdff12012-10-18 10:15:24 +02004341 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
Keith Packard92fd8fd2011-07-25 19:50:10 -07004342 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
Jani Nikula8e329a02014-06-03 14:56:21 +03004343 intel_encoder->base.name);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004344 intel_dp_start_link_train(intel_dp);
4345 intel_dp_complete_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03004346 intel_dp_stop_link_train(intel_dp);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004347 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004348}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004349
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004350/* XXX this is probably wrong for multiple downstream ports */
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004351static enum drm_connector_status
Keith Packard26d61aa2011-07-25 20:01:09 -07004352intel_dp_detect_dpcd(struct intel_dp *intel_dp)
Adam Jackson71ba90002011-07-12 17:38:04 -04004353{
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004354 uint8_t *dpcd = intel_dp->dpcd;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004355 uint8_t type;
4356
4357 if (!intel_dp_get_dpcd(intel_dp))
4358 return connector_status_disconnected;
4359
4360 /* if there's no downstream port, we're done */
4361 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
Keith Packard26d61aa2011-07-25 20:01:09 -07004362 return connector_status_connected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004363
4364 /* If we're HPD-aware, SINK_COUNT changes dynamically */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004365 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4366 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
Adam Jackson23235172012-09-20 16:42:45 -04004367 uint8_t reg;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004368
4369 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4370 &reg, 1) < 0)
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004371 return connector_status_unknown;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004372
Adam Jackson23235172012-09-20 16:42:45 -04004373 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4374 : connector_status_disconnected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004375 }
4376
4377 /* If no HPD, poke DDC gently */
Jani Nikula0b998362014-03-14 16:51:17 +02004378 if (drm_probe_ddc(&intel_dp->aux.ddc))
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004379 return connector_status_connected;
4380
4381 /* Well we tried, say unknown for unreliable port types */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004382 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4383 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4384 if (type == DP_DS_PORT_TYPE_VGA ||
4385 type == DP_DS_PORT_TYPE_NON_EDID)
4386 return connector_status_unknown;
4387 } else {
4388 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4389 DP_DWN_STRM_PORT_TYPE_MASK;
4390 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4391 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4392 return connector_status_unknown;
4393 }
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004394
4395 /* Anything else is out of spec, warn and ignore */
4396 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
Keith Packard26d61aa2011-07-25 20:01:09 -07004397 return connector_status_disconnected;
Adam Jackson71ba90002011-07-12 17:38:04 -04004398}
4399
4400static enum drm_connector_status
Chris Wilsond410b562014-09-02 20:03:59 +01004401edp_detect(struct intel_dp *intel_dp)
4402{
4403 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4404 enum drm_connector_status status;
4405
4406 status = intel_panel_detect(dev);
4407 if (status == connector_status_unknown)
4408 status = connector_status_connected;
4409
4410 return status;
4411}
4412
4413static enum drm_connector_status
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004414ironlake_dp_detect(struct intel_dp *intel_dp)
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004415{
Paulo Zanoni30add222012-10-26 19:05:45 -02004416 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Damien Lespiau1b469632012-12-13 16:09:01 +00004417 struct drm_i915_private *dev_priv = dev->dev_private;
4418 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07004419
Damien Lespiau1b469632012-12-13 16:09:01 +00004420 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4421 return connector_status_disconnected;
4422
Keith Packard26d61aa2011-07-25 20:01:09 -07004423 return intel_dp_detect_dpcd(intel_dp);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004424}
4425
Dave Airlie2a592be2014-09-01 16:58:12 +10004426static int g4x_digital_port_connected(struct drm_device *dev,
4427 struct intel_digital_port *intel_dig_port)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004428{
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004429 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson10f76a32012-05-11 18:01:32 +01004430 uint32_t bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004431
Todd Previte232a6ee2014-01-23 00:13:41 -07004432 if (IS_VALLEYVIEW(dev)) {
4433 switch (intel_dig_port->port) {
4434 case PORT_B:
4435 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4436 break;
4437 case PORT_C:
4438 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4439 break;
4440 case PORT_D:
4441 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4442 break;
4443 default:
Dave Airlie2a592be2014-09-01 16:58:12 +10004444 return -EINVAL;
Todd Previte232a6ee2014-01-23 00:13:41 -07004445 }
4446 } else {
4447 switch (intel_dig_port->port) {
4448 case PORT_B:
4449 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4450 break;
4451 case PORT_C:
4452 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4453 break;
4454 case PORT_D:
4455 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4456 break;
4457 default:
Dave Airlie2a592be2014-09-01 16:58:12 +10004458 return -EINVAL;
Todd Previte232a6ee2014-01-23 00:13:41 -07004459 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004460 }
4461
Chris Wilson10f76a32012-05-11 18:01:32 +01004462 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
Dave Airlie2a592be2014-09-01 16:58:12 +10004463 return 0;
4464 return 1;
4465}
4466
4467static enum drm_connector_status
4468g4x_dp_detect(struct intel_dp *intel_dp)
4469{
4470 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4471 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4472 int ret;
4473
4474 /* Can't disconnect eDP, but you can close the lid... */
4475 if (is_edp(intel_dp)) {
4476 enum drm_connector_status status;
4477
4478 status = intel_panel_detect(dev);
4479 if (status == connector_status_unknown)
4480 status = connector_status_connected;
4481 return status;
4482 }
4483
4484 ret = g4x_digital_port_connected(dev, intel_dig_port);
4485 if (ret == -EINVAL)
4486 return connector_status_unknown;
4487 else if (ret == 0)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004488 return connector_status_disconnected;
4489
Keith Packard26d61aa2011-07-25 20:01:09 -07004490 return intel_dp_detect_dpcd(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004491}
4492
Keith Packard8c241fe2011-09-28 16:38:44 -07004493static struct edid *
Chris Wilsonbeb60602014-09-02 20:04:00 +01004494intel_dp_get_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004495{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004496 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packard8c241fe2011-09-28 16:38:44 -07004497
Jani Nikula9cd300e2012-10-19 14:51:52 +03004498 /* use cached edid if we have one */
4499 if (intel_connector->edid) {
Jani Nikula9cd300e2012-10-19 14:51:52 +03004500 /* invalid edid */
4501 if (IS_ERR(intel_connector->edid))
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004502 return NULL;
4503
Jani Nikula55e9ede2013-10-01 10:38:54 +03004504 return drm_edid_duplicate(intel_connector->edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004505 } else
4506 return drm_get_edid(&intel_connector->base,
4507 &intel_dp->aux.ddc);
Keith Packard8c241fe2011-09-28 16:38:44 -07004508}
4509
Chris Wilsonbeb60602014-09-02 20:04:00 +01004510static void
4511intel_dp_set_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004512{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004513 struct intel_connector *intel_connector = intel_dp->attached_connector;
4514 struct edid *edid;
Keith Packard8c241fe2011-09-28 16:38:44 -07004515
Chris Wilsonbeb60602014-09-02 20:04:00 +01004516 edid = intel_dp_get_edid(intel_dp);
4517 intel_connector->detect_edid = edid;
Jani Nikula9cd300e2012-10-19 14:51:52 +03004518
Chris Wilsonbeb60602014-09-02 20:04:00 +01004519 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4520 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4521 else
4522 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4523}
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004524
Chris Wilsonbeb60602014-09-02 20:04:00 +01004525static void
4526intel_dp_unset_edid(struct intel_dp *intel_dp)
4527{
4528 struct intel_connector *intel_connector = intel_dp->attached_connector;
4529
4530 kfree(intel_connector->detect_edid);
4531 intel_connector->detect_edid = NULL;
4532
4533 intel_dp->has_audio = false;
4534}
4535
4536static enum intel_display_power_domain
4537intel_dp_power_get(struct intel_dp *dp)
4538{
4539 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4540 enum intel_display_power_domain power_domain;
4541
4542 power_domain = intel_display_port_power_domain(encoder);
4543 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4544
4545 return power_domain;
4546}
4547
4548static void
4549intel_dp_power_put(struct intel_dp *dp,
4550 enum intel_display_power_domain power_domain)
4551{
4552 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4553 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
Keith Packard8c241fe2011-09-28 16:38:44 -07004554}
4555
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004556static enum drm_connector_status
4557intel_dp_detect(struct drm_connector *connector, bool force)
4558{
4559 struct intel_dp *intel_dp = intel_attached_dp(connector);
Paulo Zanonid63885d2012-10-26 19:05:49 -02004560 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4561 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004562 struct drm_device *dev = connector->dev;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004563 enum drm_connector_status status;
Imre Deak671dedd2014-03-05 16:20:53 +02004564 enum intel_display_power_domain power_domain;
Dave Airlie0e32b392014-05-02 14:02:48 +10004565 bool ret;
Todd Previte09b1eb12015-04-20 15:27:34 -07004566 u8 sink_irq_vector;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004567
Chris Wilson164c8592013-07-20 20:27:08 +01004568 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03004569 connector->base.id, connector->name);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004570 intel_dp_unset_edid(intel_dp);
Chris Wilson164c8592013-07-20 20:27:08 +01004571
Dave Airlie0e32b392014-05-02 14:02:48 +10004572 if (intel_dp->is_mst) {
4573 /* MST devices are disconnected from a monitor POV */
4574 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4575 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004576 return connector_status_disconnected;
Dave Airlie0e32b392014-05-02 14:02:48 +10004577 }
4578
Chris Wilsonbeb60602014-09-02 20:04:00 +01004579 power_domain = intel_dp_power_get(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004580
Chris Wilsond410b562014-09-02 20:03:59 +01004581 /* Can't disconnect eDP, but you can close the lid... */
4582 if (is_edp(intel_dp))
4583 status = edp_detect(intel_dp);
4584 else if (HAS_PCH_SPLIT(dev))
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004585 status = ironlake_dp_detect(intel_dp);
4586 else
4587 status = g4x_dp_detect(intel_dp);
4588 if (status != connector_status_connected)
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004589 goto out;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004590
Adam Jackson0d198322012-05-14 16:05:47 -04004591 intel_dp_probe_oui(intel_dp);
4592
Dave Airlie0e32b392014-05-02 14:02:48 +10004593 ret = intel_dp_probe_mst(intel_dp);
4594 if (ret) {
4595 /* if we are in MST mode then this connector
4596 won't appear connected or have anything with EDID on it */
4597 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4598 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4599 status = connector_status_disconnected;
4600 goto out;
4601 }
4602
Chris Wilsonbeb60602014-09-02 20:04:00 +01004603 intel_dp_set_edid(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004604
Paulo Zanonid63885d2012-10-26 19:05:49 -02004605 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4606 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004607 status = connector_status_connected;
4608
Todd Previte09b1eb12015-04-20 15:27:34 -07004609 /* Try to read the source of the interrupt */
4610 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4611 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4612 /* Clear interrupt source */
4613 drm_dp_dpcd_writeb(&intel_dp->aux,
4614 DP_DEVICE_SERVICE_IRQ_VECTOR,
4615 sink_irq_vector);
4616
4617 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4618 intel_dp_handle_test_request(intel_dp);
4619 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4620 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4621 }
4622
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004623out:
Chris Wilsonbeb60602014-09-02 20:04:00 +01004624 intel_dp_power_put(intel_dp, power_domain);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004625 return status;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004626}
4627
Chris Wilsonbeb60602014-09-02 20:04:00 +01004628static void
4629intel_dp_force(struct drm_connector *connector)
4630{
4631 struct intel_dp *intel_dp = intel_attached_dp(connector);
4632 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4633 enum intel_display_power_domain power_domain;
4634
4635 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4636 connector->base.id, connector->name);
4637 intel_dp_unset_edid(intel_dp);
4638
4639 if (connector->status != connector_status_connected)
4640 return;
4641
4642 power_domain = intel_dp_power_get(intel_dp);
4643
4644 intel_dp_set_edid(intel_dp);
4645
4646 intel_dp_power_put(intel_dp, power_domain);
4647
4648 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4649 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4650}
4651
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004652static int intel_dp_get_modes(struct drm_connector *connector)
4653{
Jani Nikuladd06f902012-10-19 14:51:50 +03004654 struct intel_connector *intel_connector = to_intel_connector(connector);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004655 struct edid *edid;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004656
Chris Wilsonbeb60602014-09-02 20:04:00 +01004657 edid = intel_connector->detect_edid;
4658 if (edid) {
4659 int ret = intel_connector_update_modes(connector, edid);
4660 if (ret)
4661 return ret;
4662 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004663
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004664 /* if eDP has no EDID, fall back to fixed mode */
Chris Wilsonbeb60602014-09-02 20:04:00 +01004665 if (is_edp(intel_attached_dp(connector)) &&
4666 intel_connector->panel.fixed_mode) {
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004667 struct drm_display_mode *mode;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004668
4669 mode = drm_mode_duplicate(connector->dev,
Jani Nikuladd06f902012-10-19 14:51:50 +03004670 intel_connector->panel.fixed_mode);
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004671 if (mode) {
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004672 drm_mode_probed_add(connector, mode);
4673 return 1;
4674 }
4675 }
Chris Wilsonbeb60602014-09-02 20:04:00 +01004676
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004677 return 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004678}
4679
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004680static bool
4681intel_dp_detect_audio(struct drm_connector *connector)
4682{
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004683 bool has_audio = false;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004684 struct edid *edid;
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004685
Chris Wilsonbeb60602014-09-02 20:04:00 +01004686 edid = to_intel_connector(connector)->detect_edid;
4687 if (edid)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004688 has_audio = drm_detect_monitor_audio(edid);
Imre Deak671dedd2014-03-05 16:20:53 +02004689
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004690 return has_audio;
4691}
4692
Chris Wilsonf6849602010-09-19 09:29:33 +01004693static int
4694intel_dp_set_property(struct drm_connector *connector,
4695 struct drm_property *property,
4696 uint64_t val)
4697{
Chris Wilsone953fd72011-02-21 22:23:52 +00004698 struct drm_i915_private *dev_priv = connector->dev->dev_private;
Yuly Novikov53b41832012-10-26 12:04:00 +03004699 struct intel_connector *intel_connector = to_intel_connector(connector);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004700 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4701 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonf6849602010-09-19 09:29:33 +01004702 int ret;
4703
Rob Clark662595d2012-10-11 20:36:04 -05004704 ret = drm_object_property_set_value(&connector->base, property, val);
Chris Wilsonf6849602010-09-19 09:29:33 +01004705 if (ret)
4706 return ret;
4707
Chris Wilson3f43c482011-05-12 22:17:24 +01004708 if (property == dev_priv->force_audio_property) {
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004709 int i = val;
4710 bool has_audio;
4711
4712 if (i == intel_dp->force_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004713 return 0;
4714
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004715 intel_dp->force_audio = i;
Chris Wilsonf6849602010-09-19 09:29:33 +01004716
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004717 if (i == HDMI_AUDIO_AUTO)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004718 has_audio = intel_dp_detect_audio(connector);
4719 else
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004720 has_audio = (i == HDMI_AUDIO_ON);
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004721
4722 if (has_audio == intel_dp->has_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004723 return 0;
4724
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004725 intel_dp->has_audio = has_audio;
Chris Wilsonf6849602010-09-19 09:29:33 +01004726 goto done;
4727 }
4728
Chris Wilsone953fd72011-02-21 22:23:52 +00004729 if (property == dev_priv->broadcast_rgb_property) {
Daniel Vetterae4edb82013-04-22 17:07:23 +02004730 bool old_auto = intel_dp->color_range_auto;
4731 uint32_t old_range = intel_dp->color_range;
4732
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004733 switch (val) {
4734 case INTEL_BROADCAST_RGB_AUTO:
4735 intel_dp->color_range_auto = true;
4736 break;
4737 case INTEL_BROADCAST_RGB_FULL:
4738 intel_dp->color_range_auto = false;
4739 intel_dp->color_range = 0;
4740 break;
4741 case INTEL_BROADCAST_RGB_LIMITED:
4742 intel_dp->color_range_auto = false;
4743 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4744 break;
4745 default:
4746 return -EINVAL;
4747 }
Daniel Vetterae4edb82013-04-22 17:07:23 +02004748
4749 if (old_auto == intel_dp->color_range_auto &&
4750 old_range == intel_dp->color_range)
4751 return 0;
4752
Chris Wilsone953fd72011-02-21 22:23:52 +00004753 goto done;
4754 }
4755
Yuly Novikov53b41832012-10-26 12:04:00 +03004756 if (is_edp(intel_dp) &&
4757 property == connector->dev->mode_config.scaling_mode_property) {
4758 if (val == DRM_MODE_SCALE_NONE) {
4759 DRM_DEBUG_KMS("no scaling not supported\n");
4760 return -EINVAL;
4761 }
4762
4763 if (intel_connector->panel.fitting_mode == val) {
4764 /* the eDP scaling property is not changed */
4765 return 0;
4766 }
4767 intel_connector->panel.fitting_mode = val;
4768
4769 goto done;
4770 }
4771
Chris Wilsonf6849602010-09-19 09:29:33 +01004772 return -EINVAL;
4773
4774done:
Chris Wilsonc0c36b942012-12-19 16:08:43 +00004775 if (intel_encoder->base.crtc)
4776 intel_crtc_restore_mode(intel_encoder->base.crtc);
Chris Wilsonf6849602010-09-19 09:29:33 +01004777
4778 return 0;
4779}
4780
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004781static void
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004782intel_dp_connector_destroy(struct drm_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004783{
Jani Nikula1d508702012-10-19 14:51:49 +03004784 struct intel_connector *intel_connector = to_intel_connector(connector);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004785
Chris Wilson10e972d2014-09-04 21:43:45 +01004786 kfree(intel_connector->detect_edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004787
Jani Nikula9cd300e2012-10-19 14:51:52 +03004788 if (!IS_ERR_OR_NULL(intel_connector->edid))
4789 kfree(intel_connector->edid);
4790
Paulo Zanoniacd8db102013-06-12 17:27:23 -03004791 /* Can't call is_edp() since the encoder may have been destroyed
4792 * already. */
4793 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
Jani Nikula1d508702012-10-19 14:51:49 +03004794 intel_panel_fini(&intel_connector->panel);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004795
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004796 drm_connector_cleanup(connector);
Zhenyu Wang55f78c42010-03-29 16:13:57 +08004797 kfree(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004798}
4799
Paulo Zanoni00c09d72012-10-26 19:05:52 -02004800void intel_dp_encoder_destroy(struct drm_encoder *encoder)
Daniel Vetter24d05922010-08-20 18:08:28 +02004801{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004802 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4803 struct intel_dp *intel_dp = &intel_dig_port->dp;
Daniel Vetter24d05922010-08-20 18:08:28 +02004804
Dave Airlie4f71d0c2014-06-04 16:02:28 +10004805 drm_dp_aux_unregister(&intel_dp->aux);
Dave Airlie0e32b392014-05-02 14:02:48 +10004806 intel_dp_mst_encoder_cleanup(intel_dig_port);
Keith Packardbd943152011-09-18 23:09:52 -07004807 if (is_edp(intel_dp)) {
4808 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03004809 /*
4810 * vdd might still be enabled do to the delayed vdd off.
4811 * Make sure vdd is actually turned off here.
4812 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03004813 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01004814 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004815 pps_unlock(intel_dp);
4816
Clint Taylor01527b32014-07-07 13:01:46 -07004817 if (intel_dp->edp_notifier.notifier_call) {
4818 unregister_reboot_notifier(&intel_dp->edp_notifier);
4819 intel_dp->edp_notifier.notifier_call = NULL;
4820 }
Keith Packardbd943152011-09-18 23:09:52 -07004821 }
Imre Deakc8bd0e42014-12-12 17:57:38 +02004822 drm_encoder_cleanup(encoder);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004823 kfree(intel_dig_port);
Daniel Vetter24d05922010-08-20 18:08:28 +02004824}
4825
Imre Deak07f9cd02014-08-18 14:42:45 +03004826static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4827{
4828 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4829
4830 if (!is_edp(intel_dp))
4831 return;
4832
Ville Syrjälä951468f2014-09-04 14:55:31 +03004833 /*
4834 * vdd might still be enabled do to the delayed vdd off.
4835 * Make sure vdd is actually turned off here.
4836 */
Ville Syrjäläafa4e532014-11-25 15:43:48 +02004837 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004838 pps_lock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004839 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004840 pps_unlock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004841}
4842
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004843static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4844{
4845 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4846 struct drm_device *dev = intel_dig_port->base.base.dev;
4847 struct drm_i915_private *dev_priv = dev->dev_private;
4848 enum intel_display_power_domain power_domain;
4849
4850 lockdep_assert_held(&dev_priv->pps_mutex);
4851
4852 if (!edp_have_panel_vdd(intel_dp))
4853 return;
4854
4855 /*
4856 * The VDD bit needs a power domain reference, so if the bit is
4857 * already enabled when we boot or resume, grab this reference and
4858 * schedule a vdd off, so we don't hold on to the reference
4859 * indefinitely.
4860 */
4861 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4862 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4863 intel_display_power_get(dev_priv, power_domain);
4864
4865 edp_panel_vdd_schedule_off(intel_dp);
4866}
4867
Imre Deak6d93c0c2014-07-31 14:03:36 +03004868static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4869{
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004870 struct intel_dp *intel_dp;
4871
4872 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4873 return;
4874
4875 intel_dp = enc_to_intel_dp(encoder);
4876
4877 pps_lock(intel_dp);
4878
4879 /*
4880 * Read out the current power sequencer assignment,
4881 * in case the BIOS did something with it.
4882 */
4883 if (IS_VALLEYVIEW(encoder->dev))
4884 vlv_initial_power_sequencer_setup(intel_dp);
4885
4886 intel_edp_panel_vdd_sanitize(intel_dp);
4887
4888 pps_unlock(intel_dp);
Imre Deak6d93c0c2014-07-31 14:03:36 +03004889}
4890
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004891static const struct drm_connector_funcs intel_dp_connector_funcs = {
Maarten Lankhorst4d688a22015-08-05 12:37:06 +02004892 .dpms = drm_atomic_helper_connector_dpms,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004893 .detect = intel_dp_detect,
Chris Wilsonbeb60602014-09-02 20:04:00 +01004894 .force = intel_dp_force,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004895 .fill_modes = drm_helper_probe_single_connector_modes,
Chris Wilsonf6849602010-09-19 09:29:33 +01004896 .set_property = intel_dp_set_property,
Matt Roper2545e4a2015-01-22 16:51:27 -08004897 .atomic_get_property = intel_connector_atomic_get_property,
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004898 .destroy = intel_dp_connector_destroy,
Matt Roperc6f95f22015-01-22 16:50:32 -08004899 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
Ander Conselvan de Oliveira98969722015-03-20 16:18:06 +02004900 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004901};
4902
4903static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4904 .get_modes = intel_dp_get_modes,
4905 .mode_valid = intel_dp_mode_valid,
Chris Wilsondf0e9242010-09-09 16:20:55 +01004906 .best_encoder = intel_best_encoder,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004907};
4908
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004909static const struct drm_encoder_funcs intel_dp_enc_funcs = {
Imre Deak6d93c0c2014-07-31 14:03:36 +03004910 .reset = intel_dp_encoder_reset,
Daniel Vetter24d05922010-08-20 18:08:28 +02004911 .destroy = intel_dp_encoder_destroy,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004912};
4913
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004914enum irqreturn
Dave Airlie13cf5502014-06-18 11:29:35 +10004915intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4916{
4917 struct intel_dp *intel_dp = &intel_dig_port->dp;
Imre Deak1c767b32014-08-18 14:42:42 +03004918 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Dave Airlie0e32b392014-05-02 14:02:48 +10004919 struct drm_device *dev = intel_dig_port->base.base.dev;
4920 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak1c767b32014-08-18 14:42:42 +03004921 enum intel_display_power_domain power_domain;
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004922 enum irqreturn ret = IRQ_NONE;
Imre Deak1c767b32014-08-18 14:42:42 +03004923
Dave Airlie0e32b392014-05-02 14:02:48 +10004924 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4925 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
Dave Airlie13cf5502014-06-18 11:29:35 +10004926
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03004927 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4928 /*
4929 * vdd off can generate a long pulse on eDP which
4930 * would require vdd on to handle it, and thus we
4931 * would end up in an endless cycle of
4932 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4933 */
4934 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4935 port_name(intel_dig_port->port));
Ville Syrjäläa8b3d522015-02-10 14:11:46 +02004936 return IRQ_HANDLED;
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03004937 }
4938
Ville Syrjälä26fbb772014-08-11 18:37:37 +03004939 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4940 port_name(intel_dig_port->port),
Dave Airlie0e32b392014-05-02 14:02:48 +10004941 long_hpd ? "long" : "short");
Dave Airlie13cf5502014-06-18 11:29:35 +10004942
Imre Deak1c767b32014-08-18 14:42:42 +03004943 power_domain = intel_display_port_power_domain(intel_encoder);
4944 intel_display_power_get(dev_priv, power_domain);
4945
Dave Airlie0e32b392014-05-02 14:02:48 +10004946 if (long_hpd) {
Mika Kahola5fa836a2015-04-29 09:17:40 +03004947 /* indicate that we need to restart link training */
4948 intel_dp->train_set_valid = false;
Dave Airlie2a592be2014-09-01 16:58:12 +10004949
4950 if (HAS_PCH_SPLIT(dev)) {
4951 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4952 goto mst_fail;
4953 } else {
4954 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4955 goto mst_fail;
4956 }
Dave Airlie0e32b392014-05-02 14:02:48 +10004957
4958 if (!intel_dp_get_dpcd(intel_dp)) {
4959 goto mst_fail;
4960 }
4961
4962 intel_dp_probe_oui(intel_dp);
4963
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03004964 if (!intel_dp_probe_mst(intel_dp)) {
4965 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4966 intel_dp_check_link_status(intel_dp);
4967 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10004968 goto mst_fail;
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03004969 }
Dave Airlie0e32b392014-05-02 14:02:48 +10004970 } else {
4971 if (intel_dp->is_mst) {
Imre Deak1c767b32014-08-18 14:42:42 +03004972 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
Dave Airlie0e32b392014-05-02 14:02:48 +10004973 goto mst_fail;
4974 }
4975
4976 if (!intel_dp->is_mst) {
Dave Airlie5b215bc2014-08-05 10:40:20 +10004977 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
Dave Airlie0e32b392014-05-02 14:02:48 +10004978 intel_dp_check_link_status(intel_dp);
Dave Airlie5b215bc2014-08-05 10:40:20 +10004979 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10004980 }
4981 }
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004982
4983 ret = IRQ_HANDLED;
4984
Imre Deak1c767b32014-08-18 14:42:42 +03004985 goto put_power;
Dave Airlie0e32b392014-05-02 14:02:48 +10004986mst_fail:
4987 /* if we were in MST mode, and device is not there get out of MST mode */
4988 if (intel_dp->is_mst) {
4989 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4990 intel_dp->is_mst = false;
4991 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4992 }
Imre Deak1c767b32014-08-18 14:42:42 +03004993put_power:
4994 intel_display_power_put(dev_priv, power_domain);
4995
4996 return ret;
Dave Airlie13cf5502014-06-18 11:29:35 +10004997}
4998
Zhenyu Wange3421a12010-04-08 09:43:27 +08004999/* Return which DP Port should be selected for Transcoder DP control */
5000int
Akshay Joshi0206e352011-08-16 15:34:10 -04005001intel_trans_dp_port_sel(struct drm_crtc *crtc)
Zhenyu Wange3421a12010-04-08 09:43:27 +08005002{
5003 struct drm_device *dev = crtc->dev;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005004 struct intel_encoder *intel_encoder;
5005 struct intel_dp *intel_dp;
Zhenyu Wange3421a12010-04-08 09:43:27 +08005006
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005007 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5008 intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonea5b2132010-08-04 13:50:23 +01005009
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005010 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5011 intel_encoder->type == INTEL_OUTPUT_EDP)
Chris Wilsonea5b2132010-08-04 13:50:23 +01005012 return intel_dp->output_reg;
Zhenyu Wange3421a12010-04-08 09:43:27 +08005013 }
Chris Wilsonea5b2132010-08-04 13:50:23 +01005014
Zhenyu Wange3421a12010-04-08 09:43:27 +08005015 return -1;
5016}
5017
Rodrigo Vivi477ec322015-08-06 15:51:39 +08005018/* check the VBT to see whether the eDP is on another port */
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005019bool intel_dp_is_edp(struct drm_device *dev, enum port port)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005020{
5021 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni768f69c2013-09-11 18:02:47 -03005022 union child_device_config *p_child;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005023 int i;
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005024 static const short port_mapping[] = {
Rodrigo Vivi477ec322015-08-06 15:51:39 +08005025 [PORT_B] = DVO_PORT_DPB,
5026 [PORT_C] = DVO_PORT_DPC,
5027 [PORT_D] = DVO_PORT_DPD,
5028 [PORT_E] = DVO_PORT_DPE,
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005029 };
Zhao Yakui36e83a12010-06-12 14:32:21 +08005030
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005031 if (port == PORT_A)
5032 return true;
5033
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005034 if (!dev_priv->vbt.child_dev_num)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005035 return false;
5036
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005037 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5038 p_child = dev_priv->vbt.child_dev + i;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005039
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005040 if (p_child->common.dvo_port == port_mapping[port] &&
Ville Syrjäläf02586d2013-11-01 20:32:08 +02005041 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5042 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
Zhao Yakui36e83a12010-06-12 14:32:21 +08005043 return true;
5044 }
5045 return false;
5046}
5047
Dave Airlie0e32b392014-05-02 14:02:48 +10005048void
Chris Wilsonf6849602010-09-19 09:29:33 +01005049intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5050{
Yuly Novikov53b41832012-10-26 12:04:00 +03005051 struct intel_connector *intel_connector = to_intel_connector(connector);
5052
Chris Wilson3f43c482011-05-12 22:17:24 +01005053 intel_attach_force_audio_property(connector);
Chris Wilsone953fd72011-02-21 22:23:52 +00005054 intel_attach_broadcast_rgb_property(connector);
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02005055 intel_dp->color_range_auto = true;
Yuly Novikov53b41832012-10-26 12:04:00 +03005056
5057 if (is_edp(intel_dp)) {
5058 drm_mode_create_scaling_mode_property(connector->dev);
Rob Clark6de6d842012-10-11 20:36:04 -05005059 drm_object_attach_property(
5060 &connector->base,
Yuly Novikov53b41832012-10-26 12:04:00 +03005061 connector->dev->mode_config.scaling_mode_property,
Yuly Novikov8e740cd2012-10-26 12:04:01 +03005062 DRM_MODE_SCALE_ASPECT);
5063 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
Yuly Novikov53b41832012-10-26 12:04:00 +03005064 }
Chris Wilsonf6849602010-09-19 09:29:33 +01005065}
5066
Imre Deakdada1a92014-01-29 13:25:41 +02005067static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5068{
5069 intel_dp->last_power_cycle = jiffies;
5070 intel_dp->last_power_on = jiffies;
5071 intel_dp->last_backlight_off = jiffies;
5072}
5073
Daniel Vetter67a54562012-10-20 20:57:45 +02005074static void
5075intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005076 struct intel_dp *intel_dp)
Daniel Vetter67a54562012-10-20 20:57:45 +02005077{
5078 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005079 struct edp_power_seq cur, vbt, spec,
5080 *final = &intel_dp->pps_delays;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305081 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5082 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
Jesse Barnes453c5422013-03-28 09:55:41 -07005083
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005084 lockdep_assert_held(&dev_priv->pps_mutex);
5085
Ville Syrjälä81ddbc62014-10-16 21:27:31 +03005086 /* already initialized? */
5087 if (final->t11_t12 != 0)
5088 return;
5089
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305090 if (IS_BROXTON(dev)) {
5091 /*
5092 * TODO: BXT has 2 sets of PPS registers.
5093 * Correct Register for Broxton need to be identified
5094 * using VBT. hardcoding for now
5095 */
5096 pp_ctrl_reg = BXT_PP_CONTROL(0);
5097 pp_on_reg = BXT_PP_ON_DELAYS(0);
5098 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5099 } else if (HAS_PCH_SPLIT(dev)) {
Jani Nikulabf13e812013-09-06 07:40:05 +03005100 pp_ctrl_reg = PCH_PP_CONTROL;
Jesse Barnes453c5422013-03-28 09:55:41 -07005101 pp_on_reg = PCH_PP_ON_DELAYS;
5102 pp_off_reg = PCH_PP_OFF_DELAYS;
5103 pp_div_reg = PCH_PP_DIVISOR;
5104 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005105 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5106
5107 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5108 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5109 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5110 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005111 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005112
5113 /* Workaround: Need to write PP_CONTROL with the unlock key as
5114 * the very first thing. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305115 pp_ctl = ironlake_get_pp_control(intel_dp);
Daniel Vetter67a54562012-10-20 20:57:45 +02005116
Jesse Barnes453c5422013-03-28 09:55:41 -07005117 pp_on = I915_READ(pp_on_reg);
5118 pp_off = I915_READ(pp_off_reg);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305119 if (!IS_BROXTON(dev)) {
5120 I915_WRITE(pp_ctrl_reg, pp_ctl);
5121 pp_div = I915_READ(pp_div_reg);
5122 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005123
5124 /* Pull timing values out of registers */
5125 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5126 PANEL_POWER_UP_DELAY_SHIFT;
5127
5128 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5129 PANEL_LIGHT_ON_DELAY_SHIFT;
5130
5131 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5132 PANEL_LIGHT_OFF_DELAY_SHIFT;
5133
5134 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5135 PANEL_POWER_DOWN_DELAY_SHIFT;
5136
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305137 if (IS_BROXTON(dev)) {
5138 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5139 BXT_POWER_CYCLE_DELAY_SHIFT;
5140 if (tmp > 0)
5141 cur.t11_t12 = (tmp - 1) * 1000;
5142 else
5143 cur.t11_t12 = 0;
5144 } else {
5145 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
Daniel Vetter67a54562012-10-20 20:57:45 +02005146 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305147 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005148
5149 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5150 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5151
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005152 vbt = dev_priv->vbt.edp_pps;
Daniel Vetter67a54562012-10-20 20:57:45 +02005153
5154 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5155 * our hw here, which are all in 100usec. */
5156 spec.t1_t3 = 210 * 10;
5157 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5158 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5159 spec.t10 = 500 * 10;
5160 /* This one is special and actually in units of 100ms, but zero
5161 * based in the hw (so we need to add 100 ms). But the sw vbt
5162 * table multiplies it with 1000 to make it in units of 100usec,
5163 * too. */
5164 spec.t11_t12 = (510 + 100) * 10;
5165
5166 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5167 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5168
5169 /* Use the max of the register settings and vbt. If both are
5170 * unset, fall back to the spec limits. */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005171#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
Daniel Vetter67a54562012-10-20 20:57:45 +02005172 spec.field : \
5173 max(cur.field, vbt.field))
5174 assign_final(t1_t3);
5175 assign_final(t8);
5176 assign_final(t9);
5177 assign_final(t10);
5178 assign_final(t11_t12);
5179#undef assign_final
5180
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005181#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
Daniel Vetter67a54562012-10-20 20:57:45 +02005182 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5183 intel_dp->backlight_on_delay = get_delay(t8);
5184 intel_dp->backlight_off_delay = get_delay(t9);
5185 intel_dp->panel_power_down_delay = get_delay(t10);
5186 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5187#undef get_delay
5188
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005189 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5190 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5191 intel_dp->panel_power_cycle_delay);
5192
5193 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5194 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005195}
5196
5197static void
5198intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005199 struct intel_dp *intel_dp)
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005200{
5201 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07005202 u32 pp_on, pp_off, pp_div, port_sel = 0;
5203 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305204 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
Ville Syrjäläad933b52014-08-18 22:15:56 +03005205 enum port port = dp_to_dig_port(intel_dp)->port;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005206 const struct edp_power_seq *seq = &intel_dp->pps_delays;
Jesse Barnes453c5422013-03-28 09:55:41 -07005207
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005208 lockdep_assert_held(&dev_priv->pps_mutex);
Jesse Barnes453c5422013-03-28 09:55:41 -07005209
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305210 if (IS_BROXTON(dev)) {
5211 /*
5212 * TODO: BXT has 2 sets of PPS registers.
5213 * Correct Register for Broxton need to be identified
5214 * using VBT. hardcoding for now
5215 */
5216 pp_ctrl_reg = BXT_PP_CONTROL(0);
5217 pp_on_reg = BXT_PP_ON_DELAYS(0);
5218 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5219
5220 } else if (HAS_PCH_SPLIT(dev)) {
Jesse Barnes453c5422013-03-28 09:55:41 -07005221 pp_on_reg = PCH_PP_ON_DELAYS;
5222 pp_off_reg = PCH_PP_OFF_DELAYS;
5223 pp_div_reg = PCH_PP_DIVISOR;
5224 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005225 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5226
5227 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5228 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5229 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005230 }
5231
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005232 /*
5233 * And finally store the new values in the power sequencer. The
5234 * backlight delays are set to 1 because we do manual waits on them. For
5235 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5236 * we'll end up waiting for the backlight off delay twice: once when we
5237 * do the manual sleep, and once when we disable the panel and wait for
5238 * the PP_STATUS bit to become zero.
5239 */
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005240 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005241 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5242 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005243 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
Daniel Vetter67a54562012-10-20 20:57:45 +02005244 /* Compute the divisor for the pp clock, simply match the Bspec
5245 * formula. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305246 if (IS_BROXTON(dev)) {
5247 pp_div = I915_READ(pp_ctrl_reg);
5248 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5249 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5250 << BXT_POWER_CYCLE_DELAY_SHIFT);
5251 } else {
5252 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5253 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5254 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5255 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005256
5257 /* Haswell doesn't have any port selection bits for the panel
5258 * power sequencer any more. */
Imre Deakbc7d38a2013-05-16 14:40:36 +03005259 if (IS_VALLEYVIEW(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005260 port_sel = PANEL_PORT_SELECT_VLV(port);
Imre Deakbc7d38a2013-05-16 14:40:36 +03005261 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005262 if (port == PORT_A)
Jani Nikulaa24c1442013-09-05 16:44:46 +03005263 port_sel = PANEL_PORT_SELECT_DPA;
Daniel Vetter67a54562012-10-20 20:57:45 +02005264 else
Jani Nikulaa24c1442013-09-05 16:44:46 +03005265 port_sel = PANEL_PORT_SELECT_DPD;
Daniel Vetter67a54562012-10-20 20:57:45 +02005266 }
5267
Jesse Barnes453c5422013-03-28 09:55:41 -07005268 pp_on |= port_sel;
5269
5270 I915_WRITE(pp_on_reg, pp_on);
5271 I915_WRITE(pp_off_reg, pp_off);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305272 if (IS_BROXTON(dev))
5273 I915_WRITE(pp_ctrl_reg, pp_div);
5274 else
5275 I915_WRITE(pp_div_reg, pp_div);
Daniel Vetter67a54562012-10-20 20:57:45 +02005276
Daniel Vetter67a54562012-10-20 20:57:45 +02005277 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07005278 I915_READ(pp_on_reg),
5279 I915_READ(pp_off_reg),
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305280 IS_BROXTON(dev) ?
5281 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
Jesse Barnes453c5422013-03-28 09:55:41 -07005282 I915_READ(pp_div_reg));
Keith Packardc8110e52009-05-06 11:51:10 -07005283}
5284
Vandana Kannanb33a2812015-02-13 15:33:03 +05305285/**
5286 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5287 * @dev: DRM device
5288 * @refresh_rate: RR to be programmed
5289 *
5290 * This function gets called when refresh rate (RR) has to be changed from
5291 * one frequency to another. Switches can be between high and low RR
5292 * supported by the panel or to any other RR based on media playback (in
5293 * this case, RR value needs to be passed from user space).
5294 *
5295 * The caller of this function needs to take a lock on dev_priv->drrs.
5296 */
Vandana Kannan96178ee2015-01-10 02:25:56 +05305297static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305298{
5299 struct drm_i915_private *dev_priv = dev->dev_private;
5300 struct intel_encoder *encoder;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305301 struct intel_digital_port *dig_port = NULL;
5302 struct intel_dp *intel_dp = dev_priv->drrs.dp;
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02005303 struct intel_crtc_state *config = NULL;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305304 struct intel_crtc *intel_crtc = NULL;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305305 u32 reg, val;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305306 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305307
5308 if (refresh_rate <= 0) {
5309 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5310 return;
5311 }
5312
Vandana Kannan96178ee2015-01-10 02:25:56 +05305313 if (intel_dp == NULL) {
5314 DRM_DEBUG_KMS("DRRS not supported.\n");
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305315 return;
5316 }
5317
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005318 /*
Rodrigo Vivie4d59f62014-11-20 02:22:08 -08005319 * FIXME: This needs proper synchronization with psr state for some
5320 * platforms that cannot have PSR and DRRS enabled at the same time.
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005321 */
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305322
Vandana Kannan96178ee2015-01-10 02:25:56 +05305323 dig_port = dp_to_dig_port(intel_dp);
5324 encoder = &dig_port->base;
Ander Conselvan de Oliveira723f9aa2015-03-20 16:18:18 +02005325 intel_crtc = to_intel_crtc(encoder->base.crtc);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305326
5327 if (!intel_crtc) {
5328 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5329 return;
5330 }
5331
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005332 config = intel_crtc->config;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305333
Vandana Kannan96178ee2015-01-10 02:25:56 +05305334 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305335 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5336 return;
5337 }
5338
Vandana Kannan96178ee2015-01-10 02:25:56 +05305339 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5340 refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305341 index = DRRS_LOW_RR;
5342
Vandana Kannan96178ee2015-01-10 02:25:56 +05305343 if (index == dev_priv->drrs.refresh_rate_type) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305344 DRM_DEBUG_KMS(
5345 "DRRS requested for previously set RR...ignoring\n");
5346 return;
5347 }
5348
5349 if (!intel_crtc->active) {
5350 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5351 return;
5352 }
5353
Durgadoss R44395bf2015-02-13 15:33:02 +05305354 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
Vandana Kannana4c30b12015-02-13 15:33:00 +05305355 switch (index) {
5356 case DRRS_HIGH_RR:
5357 intel_dp_set_m_n(intel_crtc, M1_N1);
5358 break;
5359 case DRRS_LOW_RR:
5360 intel_dp_set_m_n(intel_crtc, M2_N2);
5361 break;
5362 case DRRS_MAX_RR:
5363 default:
5364 DRM_ERROR("Unsupported refreshrate type\n");
5365 }
5366 } else if (INTEL_INFO(dev)->gen > 6) {
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005367 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305368 val = I915_READ(reg);
Vandana Kannana4c30b12015-02-13 15:33:00 +05305369
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305370 if (index > DRRS_HIGH_RR) {
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305371 if (IS_VALLEYVIEW(dev))
5372 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5373 else
5374 val |= PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305375 } else {
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305376 if (IS_VALLEYVIEW(dev))
5377 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5378 else
5379 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305380 }
5381 I915_WRITE(reg, val);
5382 }
5383
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305384 dev_priv->drrs.refresh_rate_type = index;
5385
5386 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5387}
5388
Vandana Kannanb33a2812015-02-13 15:33:03 +05305389/**
5390 * intel_edp_drrs_enable - init drrs struct if supported
5391 * @intel_dp: DP struct
5392 *
5393 * Initializes frontbuffer_bits and drrs.dp
5394 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305395void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5396{
5397 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5398 struct drm_i915_private *dev_priv = dev->dev_private;
5399 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5400 struct drm_crtc *crtc = dig_port->base.base.crtc;
5401 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5402
5403 if (!intel_crtc->config->has_drrs) {
5404 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5405 return;
5406 }
5407
5408 mutex_lock(&dev_priv->drrs.mutex);
5409 if (WARN_ON(dev_priv->drrs.dp)) {
5410 DRM_ERROR("DRRS already enabled\n");
5411 goto unlock;
5412 }
5413
5414 dev_priv->drrs.busy_frontbuffer_bits = 0;
5415
5416 dev_priv->drrs.dp = intel_dp;
5417
5418unlock:
5419 mutex_unlock(&dev_priv->drrs.mutex);
5420}
5421
Vandana Kannanb33a2812015-02-13 15:33:03 +05305422/**
5423 * intel_edp_drrs_disable - Disable DRRS
5424 * @intel_dp: DP struct
5425 *
5426 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305427void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5428{
5429 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5430 struct drm_i915_private *dev_priv = dev->dev_private;
5431 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5432 struct drm_crtc *crtc = dig_port->base.base.crtc;
5433 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5434
5435 if (!intel_crtc->config->has_drrs)
5436 return;
5437
5438 mutex_lock(&dev_priv->drrs.mutex);
5439 if (!dev_priv->drrs.dp) {
5440 mutex_unlock(&dev_priv->drrs.mutex);
5441 return;
5442 }
5443
5444 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5445 intel_dp_set_drrs_state(dev_priv->dev,
5446 intel_dp->attached_connector->panel.
5447 fixed_mode->vrefresh);
5448
5449 dev_priv->drrs.dp = NULL;
5450 mutex_unlock(&dev_priv->drrs.mutex);
5451
5452 cancel_delayed_work_sync(&dev_priv->drrs.work);
5453}
5454
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305455static void intel_edp_drrs_downclock_work(struct work_struct *work)
5456{
5457 struct drm_i915_private *dev_priv =
5458 container_of(work, typeof(*dev_priv), drrs.work.work);
5459 struct intel_dp *intel_dp;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305460
Vandana Kannan96178ee2015-01-10 02:25:56 +05305461 mutex_lock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305462
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305463 intel_dp = dev_priv->drrs.dp;
5464
5465 if (!intel_dp)
5466 goto unlock;
5467
5468 /*
5469 * The delayed work can race with an invalidate hence we need to
5470 * recheck.
5471 */
5472
5473 if (dev_priv->drrs.busy_frontbuffer_bits)
5474 goto unlock;
5475
5476 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5477 intel_dp_set_drrs_state(dev_priv->dev,
5478 intel_dp->attached_connector->panel.
5479 downclock_mode->vrefresh);
5480
5481unlock:
Vandana Kannan96178ee2015-01-10 02:25:56 +05305482 mutex_unlock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305483}
5484
Vandana Kannanb33a2812015-02-13 15:33:03 +05305485/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305486 * intel_edp_drrs_invalidate - Disable Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305487 * @dev: DRM device
5488 * @frontbuffer_bits: frontbuffer plane tracking bits
5489 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305490 * This function gets called everytime rendering on the given planes start.
5491 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
Vandana Kannanb33a2812015-02-13 15:33:03 +05305492 *
5493 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5494 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305495void intel_edp_drrs_invalidate(struct drm_device *dev,
5496 unsigned frontbuffer_bits)
5497{
5498 struct drm_i915_private *dev_priv = dev->dev_private;
5499 struct drm_crtc *crtc;
5500 enum pipe pipe;
5501
Daniel Vetter9da7d692015-04-09 16:44:15 +02005502 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305503 return;
5504
Daniel Vetter88f933a2015-04-09 16:44:16 +02005505 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305506
Vandana Kannana93fad02015-01-10 02:25:59 +05305507 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005508 if (!dev_priv->drrs.dp) {
5509 mutex_unlock(&dev_priv->drrs.mutex);
5510 return;
5511 }
5512
Vandana Kannana93fad02015-01-10 02:25:59 +05305513 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5514 pipe = to_intel_crtc(crtc)->pipe;
5515
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005516 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5517 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5518
Ramalingam C0ddfd202015-06-15 20:50:05 +05305519 /* invalidate means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005520 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Vandana Kannana93fad02015-01-10 02:25:59 +05305521 intel_dp_set_drrs_state(dev_priv->dev,
5522 dev_priv->drrs.dp->attached_connector->panel.
5523 fixed_mode->vrefresh);
Vandana Kannana93fad02015-01-10 02:25:59 +05305524
Vandana Kannana93fad02015-01-10 02:25:59 +05305525 mutex_unlock(&dev_priv->drrs.mutex);
5526}
5527
Vandana Kannanb33a2812015-02-13 15:33:03 +05305528/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305529 * intel_edp_drrs_flush - Restart Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305530 * @dev: DRM device
5531 * @frontbuffer_bits: frontbuffer plane tracking bits
5532 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305533 * This function gets called every time rendering on the given planes has
5534 * completed or flip on a crtc is completed. So DRRS should be upclocked
5535 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5536 * if no other planes are dirty.
Vandana Kannanb33a2812015-02-13 15:33:03 +05305537 *
5538 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5539 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305540void intel_edp_drrs_flush(struct drm_device *dev,
5541 unsigned frontbuffer_bits)
5542{
5543 struct drm_i915_private *dev_priv = dev->dev_private;
5544 struct drm_crtc *crtc;
5545 enum pipe pipe;
5546
Daniel Vetter9da7d692015-04-09 16:44:15 +02005547 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305548 return;
5549
Daniel Vetter88f933a2015-04-09 16:44:16 +02005550 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305551
Vandana Kannana93fad02015-01-10 02:25:59 +05305552 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005553 if (!dev_priv->drrs.dp) {
5554 mutex_unlock(&dev_priv->drrs.mutex);
5555 return;
5556 }
5557
Vandana Kannana93fad02015-01-10 02:25:59 +05305558 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5559 pipe = to_intel_crtc(crtc)->pipe;
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005560
5561 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Vandana Kannana93fad02015-01-10 02:25:59 +05305562 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5563
Ramalingam C0ddfd202015-06-15 20:50:05 +05305564 /* flush means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005565 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Ramalingam C0ddfd202015-06-15 20:50:05 +05305566 intel_dp_set_drrs_state(dev_priv->dev,
5567 dev_priv->drrs.dp->attached_connector->panel.
5568 fixed_mode->vrefresh);
5569
5570 /*
5571 * flush also means no more activity hence schedule downclock, if all
5572 * other fbs are quiescent too
5573 */
5574 if (!dev_priv->drrs.busy_frontbuffer_bits)
Vandana Kannana93fad02015-01-10 02:25:59 +05305575 schedule_delayed_work(&dev_priv->drrs.work,
5576 msecs_to_jiffies(1000));
5577 mutex_unlock(&dev_priv->drrs.mutex);
5578}
5579
Vandana Kannanb33a2812015-02-13 15:33:03 +05305580/**
5581 * DOC: Display Refresh Rate Switching (DRRS)
5582 *
5583 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5584 * which enables swtching between low and high refresh rates,
5585 * dynamically, based on the usage scenario. This feature is applicable
5586 * for internal panels.
5587 *
5588 * Indication that the panel supports DRRS is given by the panel EDID, which
5589 * would list multiple refresh rates for one resolution.
5590 *
5591 * DRRS is of 2 types - static and seamless.
5592 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5593 * (may appear as a blink on screen) and is used in dock-undock scenario.
5594 * Seamless DRRS involves changing RR without any visual effect to the user
5595 * and can be used during normal system usage. This is done by programming
5596 * certain registers.
5597 *
5598 * Support for static/seamless DRRS may be indicated in the VBT based on
5599 * inputs from the panel spec.
5600 *
5601 * DRRS saves power by switching to low RR based on usage scenarios.
5602 *
5603 * eDP DRRS:-
5604 * The implementation is based on frontbuffer tracking implementation.
5605 * When there is a disturbance on the screen triggered by user activity or a
5606 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5607 * When there is no movement on screen, after a timeout of 1 second, a switch
5608 * to low RR is made.
5609 * For integration with frontbuffer tracking code,
5610 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5611 *
5612 * DRRS can be further extended to support other internal panels and also
5613 * the scenario of video playback wherein RR is set based on the rate
5614 * requested by userspace.
5615 */
5616
5617/**
5618 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5619 * @intel_connector: eDP connector
5620 * @fixed_mode: preferred mode of panel
5621 *
5622 * This function is called only once at driver load to initialize basic
5623 * DRRS stuff.
5624 *
5625 * Returns:
5626 * Downclock mode if panel supports it, else return NULL.
5627 * DRRS support is determined by the presence of downclock mode (apart
5628 * from VBT setting).
5629 */
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305630static struct drm_display_mode *
Vandana Kannan96178ee2015-01-10 02:25:56 +05305631intel_dp_drrs_init(struct intel_connector *intel_connector,
5632 struct drm_display_mode *fixed_mode)
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305633{
5634 struct drm_connector *connector = &intel_connector->base;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305635 struct drm_device *dev = connector->dev;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305636 struct drm_i915_private *dev_priv = dev->dev_private;
5637 struct drm_display_mode *downclock_mode = NULL;
5638
Daniel Vetter9da7d692015-04-09 16:44:15 +02005639 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5640 mutex_init(&dev_priv->drrs.mutex);
5641
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305642 if (INTEL_INFO(dev)->gen <= 6) {
5643 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5644 return NULL;
5645 }
5646
5647 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005648 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305649 return NULL;
5650 }
5651
5652 downclock_mode = intel_find_panel_downclock
5653 (dev, fixed_mode, connector);
5654
5655 if (!downclock_mode) {
Ramalingam Ca1d26342015-02-23 17:38:33 +05305656 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305657 return NULL;
5658 }
5659
Vandana Kannan96178ee2015-01-10 02:25:56 +05305660 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305661
Vandana Kannan96178ee2015-01-10 02:25:56 +05305662 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005663 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305664 return downclock_mode;
5665}
5666
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005667static bool intel_edp_init_connector(struct intel_dp *intel_dp,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005668 struct intel_connector *intel_connector)
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005669{
5670 struct drm_connector *connector = &intel_connector->base;
5671 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005672 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5673 struct drm_device *dev = intel_encoder->base.dev;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005674 struct drm_i915_private *dev_priv = dev->dev_private;
5675 struct drm_display_mode *fixed_mode = NULL;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305676 struct drm_display_mode *downclock_mode = NULL;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005677 bool has_dpcd;
5678 struct drm_display_mode *scan;
5679 struct edid *edid;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005680 enum pipe pipe = INVALID_PIPE;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005681
5682 if (!is_edp(intel_dp))
5683 return true;
5684
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02005685 pps_lock(intel_dp);
5686 intel_edp_panel_vdd_sanitize(intel_dp);
5687 pps_unlock(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005688
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005689 /* Cache DPCD and EDID for edp. */
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005690 has_dpcd = intel_dp_get_dpcd(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005691
5692 if (has_dpcd) {
5693 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5694 dev_priv->no_aux_handshake =
5695 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5696 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5697 } else {
5698 /* if this fails, presume the device is a ghost */
5699 DRM_INFO("failed to retrieve link info, disabling eDP\n");
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005700 return false;
5701 }
5702
5703 /* We now know it's not a ghost, init power sequence regs. */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005704 pps_lock(intel_dp);
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005705 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005706 pps_unlock(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005707
Daniel Vetter060c8772014-03-21 23:22:35 +01005708 mutex_lock(&dev->mode_config.mutex);
Jani Nikula0b998362014-03-14 16:51:17 +02005709 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005710 if (edid) {
5711 if (drm_add_edid_modes(connector, edid)) {
5712 drm_mode_connector_update_edid_property(connector,
5713 edid);
5714 drm_edid_to_eld(connector, edid);
5715 } else {
5716 kfree(edid);
5717 edid = ERR_PTR(-EINVAL);
5718 }
5719 } else {
5720 edid = ERR_PTR(-ENOENT);
5721 }
5722 intel_connector->edid = edid;
5723
5724 /* prefer fixed mode from EDID if available */
5725 list_for_each_entry(scan, &connector->probed_modes, head) {
5726 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5727 fixed_mode = drm_mode_duplicate(dev, scan);
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305728 downclock_mode = intel_dp_drrs_init(
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305729 intel_connector, fixed_mode);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005730 break;
5731 }
5732 }
5733
5734 /* fallback to VBT if available for eDP */
5735 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5736 fixed_mode = drm_mode_duplicate(dev,
5737 dev_priv->vbt.lfp_lvds_vbt_mode);
5738 if (fixed_mode)
5739 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5740 }
Daniel Vetter060c8772014-03-21 23:22:35 +01005741 mutex_unlock(&dev->mode_config.mutex);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005742
Clint Taylor01527b32014-07-07 13:01:46 -07005743 if (IS_VALLEYVIEW(dev)) {
5744 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5745 register_reboot_notifier(&intel_dp->edp_notifier);
Ville Syrjälä6517d272014-11-07 11:16:02 +02005746
5747 /*
5748 * Figure out the current pipe for the initial backlight setup.
5749 * If the current pipe isn't valid, try the PPS pipe, and if that
5750 * fails just assume pipe A.
5751 */
5752 if (IS_CHERRYVIEW(dev))
5753 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5754 else
5755 pipe = PORT_TO_PIPE(intel_dp->DP);
5756
5757 if (pipe != PIPE_A && pipe != PIPE_B)
5758 pipe = intel_dp->pps_pipe;
5759
5760 if (pipe != PIPE_A && pipe != PIPE_B)
5761 pipe = PIPE_A;
5762
5763 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5764 pipe_name(pipe));
Clint Taylor01527b32014-07-07 13:01:46 -07005765 }
5766
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305767 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
Jani Nikula73580fb72014-08-12 17:11:41 +03005768 intel_connector->panel.backlight_power = intel_edp_backlight_power;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005769 intel_panel_setup_backlight(connector, pipe);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005770
5771 return true;
5772}
5773
Paulo Zanoni16c25532013-06-12 17:27:25 -03005774bool
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005775intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5776 struct intel_connector *intel_connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005777{
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005778 struct drm_connector *connector = &intel_connector->base;
5779 struct intel_dp *intel_dp = &intel_dig_port->dp;
5780 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5781 struct drm_device *dev = intel_encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005782 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni174edf12012-10-26 19:05:50 -02005783 enum port port = intel_dig_port->port;
Jani Nikula0b998362014-03-14 16:51:17 +02005784 int type;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005785
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005786 intel_dp->pps_pipe = INVALID_PIPE;
5787
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005788 /* intel_dp vfuncs */
Damien Lespiaub6b5e382014-01-20 16:00:59 +00005789 if (INTEL_INFO(dev)->gen >= 9)
5790 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5791 else if (IS_VALLEYVIEW(dev))
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005792 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5793 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5794 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5795 else if (HAS_PCH_SPLIT(dev))
5796 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5797 else
5798 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5799
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +00005800 if (INTEL_INFO(dev)->gen >= 9)
5801 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5802 else
5803 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
Damien Lespiau153b1102014-01-21 13:37:15 +00005804
Daniel Vetter07679352012-09-06 22:15:42 +02005805 /* Preserve the current hw state. */
5806 intel_dp->DP = I915_READ(intel_dp->output_reg);
Jani Nikuladd06f902012-10-19 14:51:50 +03005807 intel_dp->attached_connector = intel_connector;
Chris Wilson3d3dc142011-02-12 10:33:12 +00005808
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005809 if (intel_dp_is_edp(dev, port))
Gajanan Bhat19c03922012-09-27 19:13:07 +05305810 type = DRM_MODE_CONNECTOR_eDP;
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005811 else
5812 type = DRM_MODE_CONNECTOR_DisplayPort;
Adam Jacksonb3295302010-07-16 14:46:28 -04005813
Imre Deakf7d24902013-05-08 13:14:05 +03005814 /*
5815 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5816 * for DP the encoder type can be set by the caller to
5817 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5818 */
5819 if (type == DRM_MODE_CONNECTOR_eDP)
5820 intel_encoder->type = INTEL_OUTPUT_EDP;
5821
Ville Syrjäläc17ed5b2014-10-16 21:27:27 +03005822 /* eDP only on port B and/or C on vlv/chv */
5823 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5824 port != PORT_B && port != PORT_C))
5825 return false;
5826
Imre Deake7281ea2013-05-08 13:14:08 +03005827 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5828 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5829 port_name(port));
5830
Adam Jacksonb3295302010-07-16 14:46:28 -04005831 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005832 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5833
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005834 connector->interlace_allowed = true;
5835 connector->doublescan_allowed = 0;
Ma Lingf8aed702009-08-24 13:50:24 +08005836
Daniel Vetter66a92782012-07-12 20:08:18 +02005837 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
Daniel Vetter4be73782014-01-17 14:39:48 +01005838 edp_panel_vdd_work);
Zhenyu Wang6251ec02010-01-12 05:38:32 +08005839
Chris Wilsondf0e9242010-09-09 16:20:55 +01005840 intel_connector_attach_encoder(intel_connector, intel_encoder);
Thomas Wood34ea3d32014-05-29 16:57:41 +01005841 drm_connector_register(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005842
Paulo Zanoniaffa9352012-11-23 15:30:39 -02005843 if (HAS_DDI(dev))
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005844 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5845 else
5846 intel_connector->get_hw_state = intel_connector_get_hw_state;
Imre Deak80f65de2014-02-11 17:12:49 +02005847 intel_connector->unregister = intel_dp_connector_unregister;
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005848
Jani Nikula0b998362014-03-14 16:51:17 +02005849 /* Set up the hotplug pin. */
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005850 switch (port) {
5851 case PORT_A:
Egbert Eich1d843f92013-02-25 12:06:49 -05005852 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005853 break;
5854 case PORT_B:
Egbert Eich1d843f92013-02-25 12:06:49 -05005855 intel_encoder->hpd_pin = HPD_PORT_B;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005856 break;
5857 case PORT_C:
Egbert Eich1d843f92013-02-25 12:06:49 -05005858 intel_encoder->hpd_pin = HPD_PORT_C;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005859 break;
5860 case PORT_D:
Egbert Eich1d843f92013-02-25 12:06:49 -05005861 intel_encoder->hpd_pin = HPD_PORT_D;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005862 break;
Xiong Zhang26951ca2015-08-17 15:55:50 +08005863 case PORT_E:
5864 intel_encoder->hpd_pin = HPD_PORT_E;
5865 break;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005866 default:
Damien Lespiauad1c0b12013-03-07 15:30:28 +00005867 BUG();
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08005868 }
5869
Imre Deakdada1a92014-01-29 13:25:41 +02005870 if (is_edp(intel_dp)) {
Ville Syrjälä773538e82014-09-04 14:54:56 +03005871 pps_lock(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005872 intel_dp_init_panel_power_timestamps(intel_dp);
5873 if (IS_VALLEYVIEW(dev))
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005874 vlv_initial_power_sequencer_setup(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005875 else
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005876 intel_dp_init_panel_power_sequencer(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005877 pps_unlock(intel_dp);
Imre Deakdada1a92014-01-29 13:25:41 +02005878 }
Paulo Zanoni0095e6d2013-12-19 14:29:39 -02005879
Jani Nikula9d1a1032014-03-14 16:51:15 +02005880 intel_dp_aux_init(intel_dp, intel_connector);
Dave Airliec1f05262012-08-30 11:06:18 +10005881
Dave Airlie0e32b392014-05-02 14:02:48 +10005882 /* init MST on ports that can support it */
Jani Nikula0c9b3712015-05-18 17:10:01 +03005883 if (HAS_DP_MST(dev) &&
5884 (port == PORT_B || port == PORT_C || port == PORT_D))
5885 intel_dp_mst_encoder_init(intel_dig_port,
5886 intel_connector->base.base.id);
Dave Airlie0e32b392014-05-02 14:02:48 +10005887
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005888 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10005889 drm_dp_aux_unregister(&intel_dp->aux);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03005890 if (is_edp(intel_dp)) {
5891 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03005892 /*
5893 * vdd might still be enabled do to the delayed vdd off.
5894 * Make sure vdd is actually turned off here.
5895 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005896 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01005897 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005898 pps_unlock(intel_dp);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03005899 }
Thomas Wood34ea3d32014-05-29 16:57:41 +01005900 drm_connector_unregister(connector);
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005901 drm_connector_cleanup(connector);
Paulo Zanoni16c25532013-06-12 17:27:25 -03005902 return false;
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005903 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08005904
Chris Wilsonf6849602010-09-19 09:29:33 +01005905 intel_dp_add_properties(intel_dp, connector);
5906
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005907 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5908 * 0xd. Failure to do so will result in spurious interrupts being
5909 * generated on the port when a cable is not attached.
5910 */
5911 if (IS_G4X(dev) && !IS_GM45(dev)) {
5912 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5913 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5914 }
Paulo Zanoni16c25532013-06-12 17:27:25 -03005915
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005916 i915_debugfs_connector_add(connector);
5917
Paulo Zanoni16c25532013-06-12 17:27:25 -03005918 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005919}
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005920
5921void
5922intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5923{
Dave Airlie13cf5502014-06-18 11:29:35 +10005924 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005925 struct intel_digital_port *intel_dig_port;
5926 struct intel_encoder *intel_encoder;
5927 struct drm_encoder *encoder;
5928 struct intel_connector *intel_connector;
5929
Daniel Vetterb14c5672013-09-19 12:18:32 +02005930 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005931 if (!intel_dig_port)
5932 return;
5933
Ander Conselvan de Oliveira08d9bc92015-04-10 10:59:10 +03005934 intel_connector = intel_connector_alloc();
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005935 if (!intel_connector) {
5936 kfree(intel_dig_port);
5937 return;
5938 }
5939
5940 intel_encoder = &intel_dig_port->base;
5941 encoder = &intel_encoder->base;
5942
5943 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5944 DRM_MODE_ENCODER_TMDS);
5945
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01005946 intel_encoder->compute_config = intel_dp_compute_config;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005947 intel_encoder->disable = intel_disable_dp;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005948 intel_encoder->get_hw_state = intel_dp_get_hw_state;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07005949 intel_encoder->get_config = intel_dp_get_config;
Imre Deak07f9cd02014-08-18 14:42:45 +03005950 intel_encoder->suspend = intel_dp_encoder_suspend;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005951 if (IS_CHERRYVIEW(dev)) {
Ville Syrjälä9197c882014-04-09 13:29:05 +03005952 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005953 intel_encoder->pre_enable = chv_pre_enable_dp;
5954 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä580d3812014-04-09 13:29:00 +03005955 intel_encoder->post_disable = chv_post_disable_dp;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005956 } else if (IS_VALLEYVIEW(dev)) {
Jani Nikulaecff4f32013-09-06 07:38:29 +03005957 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005958 intel_encoder->pre_enable = vlv_pre_enable_dp;
5959 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä49277c32014-03-31 18:21:26 +03005960 intel_encoder->post_disable = vlv_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005961 } else {
Jani Nikulaecff4f32013-09-06 07:38:29 +03005962 intel_encoder->pre_enable = g4x_pre_enable_dp;
5963 intel_encoder->enable = g4x_enable_dp;
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03005964 if (INTEL_INFO(dev)->gen >= 5)
5965 intel_encoder->post_disable = ilk_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005966 }
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005967
Paulo Zanoni174edf12012-10-26 19:05:50 -02005968 intel_dig_port->port = port;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005969 intel_dig_port->dp.output_reg = output_reg;
5970
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005971 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Ville Syrjälä882ec382014-04-28 14:07:43 +03005972 if (IS_CHERRYVIEW(dev)) {
5973 if (port == PORT_D)
5974 intel_encoder->crtc_mask = 1 << 2;
5975 else
5976 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5977 } else {
5978 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5979 }
Ville Syrjäläbc079e82014-03-03 16:15:28 +02005980 intel_encoder->cloneable = 0;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005981
Dave Airlie13cf5502014-06-18 11:29:35 +10005982 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
Jani Nikula5fcece82015-05-27 15:03:42 +03005983 dev_priv->hotplug.irq_port[port] = intel_dig_port;
Dave Airlie13cf5502014-06-18 11:29:35 +10005984
Paulo Zanoni15b1d172013-06-12 17:27:27 -03005985 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5986 drm_encoder_cleanup(encoder);
5987 kfree(intel_dig_port);
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005988 kfree(intel_connector);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03005989 }
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005990}
Dave Airlie0e32b392014-05-02 14:02:48 +10005991
5992void intel_dp_mst_suspend(struct drm_device *dev)
5993{
5994 struct drm_i915_private *dev_priv = dev->dev_private;
5995 int i;
5996
5997 /* disable MST */
5998 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03005999 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006000 if (!intel_dig_port)
6001 continue;
6002
6003 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6004 if (!intel_dig_port->dp.can_mst)
6005 continue;
6006 if (intel_dig_port->dp.is_mst)
6007 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6008 }
6009 }
6010}
6011
6012void intel_dp_mst_resume(struct drm_device *dev)
6013{
6014 struct drm_i915_private *dev_priv = dev->dev_private;
6015 int i;
6016
6017 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006018 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006019 if (!intel_dig_port)
6020 continue;
6021 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6022 int ret;
6023
6024 if (!intel_dig_port->dp.can_mst)
6025 continue;
6026
6027 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6028 if (ret != 0) {
6029 intel_dp_check_mst_status(&intel_dig_port->dp);
6030 }
6031 }
6032 }
6033}