blob: 7bb96d5850de3d6e6e05b55b5d234fc5e5f4f56b [file] [log] [blame]
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040030#include <linux/export.h>
Clint Taylor01527b32014-07-07 13:01:46 -070031#include <linux/notifier.h>
32#include <linux/reboot.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
Matt Roperc6f95f22015-01-22 16:50:32 -080034#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010035#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070038#include "intel_drv.h"
David Howells760285e2012-10-02 18:01:07 +010039#include <drm/i915_drm.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070040#include "i915_drv.h"
Keith Packarda4fc5ed2009-04-07 16:16:42 -070041
Keith Packarda4fc5ed2009-04-07 16:16:42 -070042#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
Todd Previte559be302015-05-04 07:48:20 -070044/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080050struct dp_link_dpll {
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030051 int clock;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080052 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030056 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080057 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030058 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080059 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030063 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080064 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030065 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080066 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080069static const struct dp_link_dpll vlv_dpll[] = {
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030070 { 162000,
Chon Ming Lee58f6e632013-09-25 15:47:51 +080071 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030072 { 270000,
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080073 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
Chon Ming Leeef9348c2014-04-09 13:28:18 +030076/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030086 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030087 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030088 { 270000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030089 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
Ville Syrjälä7e6313a2015-08-11 20:21:46 +030090 { 540000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030091 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
Sonika Jindal637a9c62015-05-07 09:52:08 +053093
Sonika Jindal64987fc2015-05-26 17:50:13 +053094static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
Sonika Jindal637a9c62015-05-07 09:52:08 +053096static const int skl_rates[] = { 162000, 216000, 270000,
Ville Syrjäläf4896f12015-03-12 17:10:27 +020097 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
Chon Ming Leeef9348c2014-04-09 13:28:18 +030099
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
Paulo Zanonida63a9f2012-10-26 19:05:46 -0200109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700112}
113
Imre Deak68b4d822013-05-08 13:14:06 +0300114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700115{
Imre Deak68b4d822013-05-08 13:14:06 +0300116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700119}
120
Chris Wilsondf0e9242010-09-09 16:20:55 +0100121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
Paulo Zanonifa90ece2012-10-26 19:05:44 -0200123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Chris Wilsondf0e9242010-09-09 16:20:55 +0100124}
125
Chris Wilsonea5b2132010-08-04 13:50:23 +0100126static void intel_dp_link_down(struct intel_dp *intel_dp);
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +0100128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
Ville Syrjälä093e3f12014-10-16 21:27:33 +0300129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700132
Ville Syrjäläed4e9c12015-03-12 17:10:36 +0200133static int
134intel_dp_max_link_bw(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700135{
Jesse Barnes7183dc22011-07-07 11:10:58 -0700136 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700137
138 switch (max_link_bw) {
139 case DP_LINK_BW_1_62:
140 case DP_LINK_BW_2_7:
Ville Syrjälä1db10e22015-03-12 17:10:32 +0200141 case DP_LINK_BW_5_4:
Imre Deakd4eead52013-07-09 17:05:26 +0300142 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700143 default:
Imre Deakd4eead52013-07-09 17:05:26 +0300144 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
145 max_link_bw);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700146 max_link_bw = DP_LINK_BW_1_62;
147 break;
148 }
149 return max_link_bw;
150}
151
Paulo Zanonieeb63242014-05-06 14:56:50 +0300152static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
153{
154 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
155 struct drm_device *dev = intel_dig_port->base.base.dev;
156 u8 source_max, sink_max;
157
158 source_max = 4;
159 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
160 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
161 source_max = 2;
162
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165 return min(source_max, sink_max);
166}
167
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400168/*
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
171 *
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173 *
174 * 270000 * 1 * 8 / 10 == 216000
175 *
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
180 *
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
183 */
184
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700185static int
Keith Packardc8982612012-01-25 08:16:25 -0800186intel_dp_link_required(int pixel_clock, int bpp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700187{
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400188 return (pixel_clock * bpp + 9) / 10;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700189}
190
191static int
Dave Airliefe27d532010-06-30 11:46:17 +1000192intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193{
194 return (max_link_clock * max_lanes * 8) / 10;
195}
196
Damien Lespiauc19de8e2013-11-28 15:29:18 +0000197static enum drm_mode_status
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700198intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
200{
Chris Wilsondf0e9242010-09-09 16:20:55 +0100201 struct intel_dp *intel_dp = intel_attached_dp(connector);
Jani Nikuladd06f902012-10-19 14:51:50 +0300202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
Daniel Vetter36008362013-03-27 00:44:59 +0100204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700206
Jani Nikuladd06f902012-10-19 14:51:50 +0300207 if (is_edp(intel_dp) && fixed_mode) {
208 if (mode->hdisplay > fixed_mode->hdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100209 return MODE_PANEL;
210
Jani Nikuladd06f902012-10-19 14:51:50 +0300211 if (mode->vdisplay > fixed_mode->vdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100212 return MODE_PANEL;
Daniel Vetter03afc4a2013-04-02 23:42:31 +0200213
214 target_clock = fixed_mode->clock;
Zhao Yakui7de56f42010-07-19 09:43:14 +0100215 }
216
Ville Syrjälä50fec212015-03-12 17:10:34 +0200217 max_link_clock = intel_dp_max_link_rate(intel_dp);
Paulo Zanonieeb63242014-05-06 14:56:50 +0300218 max_lanes = intel_dp_max_lane_count(intel_dp);
Daniel Vetter36008362013-03-27 00:44:59 +0100219
220 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
221 mode_rate = intel_dp_link_required(target_clock, 18);
222
223 if (mode_rate > max_rate)
Daniel Vetterc4867932012-04-10 10:42:36 +0200224 return MODE_CLOCK_HIGH;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700225
226 if (mode->clock < 10000)
227 return MODE_CLOCK_LOW;
228
Daniel Vetter0af78a22012-05-23 11:30:55 +0200229 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
230 return MODE_H_ILLEGAL;
231
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700232 return MODE_OK;
233}
234
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800235uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700236{
237 int i;
238 uint32_t v = 0;
239
240 if (src_bytes > 4)
241 src_bytes = 4;
242 for (i = 0; i < src_bytes; i++)
243 v |= ((uint32_t) src[i]) << ((3-i) * 8);
244 return v;
245}
246
Damien Lespiauc2af70e2015-02-10 19:32:23 +0000247static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700248{
249 int i;
250 if (dst_bytes > 4)
251 dst_bytes = 4;
252 for (i = 0; i < dst_bytes; i++)
253 dst[i] = src >> ((3-i) * 8);
254}
255
Keith Packardfb0f8fb2009-06-11 22:31:31 -0700256/* hrawclock is 1/4 the FSB frequency */
257static int
258intel_hrawclk(struct drm_device *dev)
259{
260 struct drm_i915_private *dev_priv = dev->dev_private;
261 uint32_t clkcfg;
262
Vijay Purushothaman9473c8f2012-09-27 19:13:01 +0530263 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
264 if (IS_VALLEYVIEW(dev))
265 return 200;
266
Keith Packardfb0f8fb2009-06-11 22:31:31 -0700267 clkcfg = I915_READ(CLKCFG);
268 switch (clkcfg & CLKCFG_FSB_MASK) {
269 case CLKCFG_FSB_400:
270 return 100;
271 case CLKCFG_FSB_533:
272 return 133;
273 case CLKCFG_FSB_667:
274 return 166;
275 case CLKCFG_FSB_800:
276 return 200;
277 case CLKCFG_FSB_1067:
278 return 266;
279 case CLKCFG_FSB_1333:
280 return 333;
281 /* these two are just a guess; one of them might be right */
282 case CLKCFG_FSB_1600:
283 case CLKCFG_FSB_1600_ALT:
284 return 400;
285 default:
286 return 133;
287 }
288}
289
Jani Nikulabf13e812013-09-06 07:40:05 +0300290static void
291intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300292 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300293static void
294intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300295 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300296
Ville Syrjälä773538e82014-09-04 14:54:56 +0300297static void pps_lock(struct intel_dp *intel_dp)
298{
299 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300 struct intel_encoder *encoder = &intel_dig_port->base;
301 struct drm_device *dev = encoder->base.dev;
302 struct drm_i915_private *dev_priv = dev->dev_private;
303 enum intel_display_power_domain power_domain;
304
305 /*
306 * See vlv_power_sequencer_reset() why we need
307 * a power domain reference here.
308 */
309 power_domain = intel_display_port_power_domain(encoder);
310 intel_display_power_get(dev_priv, power_domain);
311
312 mutex_lock(&dev_priv->pps_mutex);
313}
314
315static void pps_unlock(struct intel_dp *intel_dp)
316{
317 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
318 struct intel_encoder *encoder = &intel_dig_port->base;
319 struct drm_device *dev = encoder->base.dev;
320 struct drm_i915_private *dev_priv = dev->dev_private;
321 enum intel_display_power_domain power_domain;
322
323 mutex_unlock(&dev_priv->pps_mutex);
324
325 power_domain = intel_display_port_power_domain(encoder);
326 intel_display_power_put(dev_priv, power_domain);
327}
328
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300329static void
330vlv_power_sequencer_kick(struct intel_dp *intel_dp)
331{
332 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
333 struct drm_device *dev = intel_dig_port->base.base.dev;
334 struct drm_i915_private *dev_priv = dev->dev_private;
335 enum pipe pipe = intel_dp->pps_pipe;
Ville Syrjäläd288f652014-10-28 13:20:22 +0200336 bool pll_enabled;
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300337 uint32_t DP;
338
339 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
340 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
341 pipe_name(pipe), port_name(intel_dig_port->port)))
342 return;
343
344 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
345 pipe_name(pipe), port_name(intel_dig_port->port));
346
347 /* Preserve the BIOS-computed detected bit. This is
348 * supposed to be read-only.
349 */
350 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
351 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
352 DP |= DP_PORT_WIDTH(1);
353 DP |= DP_LINK_TRAIN_PAT_1;
354
355 if (IS_CHERRYVIEW(dev))
356 DP |= DP_PIPE_SELECT_CHV(pipe);
357 else if (pipe == PIPE_B)
358 DP |= DP_PIPEB_SELECT;
359
Ville Syrjäläd288f652014-10-28 13:20:22 +0200360 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
361
362 /*
363 * The DPLL for the pipe must be enabled for this to work.
364 * So enable temporarily it if it's not already enabled.
365 */
366 if (!pll_enabled)
367 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
368 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
369
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300370 /*
371 * Similar magic as in intel_dp_enable_port().
372 * We _must_ do this port enable + disable trick
373 * to make this power seqeuencer lock onto the port.
374 * Otherwise even VDD force bit won't work.
375 */
376 I915_WRITE(intel_dp->output_reg, DP);
377 POSTING_READ(intel_dp->output_reg);
378
379 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
380 POSTING_READ(intel_dp->output_reg);
381
382 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200384
385 if (!pll_enabled)
386 vlv_force_pll_off(dev, pipe);
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300387}
388
Jani Nikulabf13e812013-09-06 07:40:05 +0300389static enum pipe
390vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
391{
392 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300393 struct drm_device *dev = intel_dig_port->base.base.dev;
394 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300395 struct intel_encoder *encoder;
396 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300397 enum pipe pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300398
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300399 lockdep_assert_held(&dev_priv->pps_mutex);
400
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300401 /* We should never land here with regular DP ports */
402 WARN_ON(!is_edp(intel_dp));
403
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300404 if (intel_dp->pps_pipe != INVALID_PIPE)
405 return intel_dp->pps_pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300406
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300407 /*
408 * We don't have power sequencer currently.
409 * Pick one that's not used by other ports.
410 */
411 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
412 base.head) {
413 struct intel_dp *tmp;
414
415 if (encoder->type != INTEL_OUTPUT_EDP)
416 continue;
417
418 tmp = enc_to_intel_dp(&encoder->base);
419
420 if (tmp->pps_pipe != INVALID_PIPE)
421 pipes &= ~(1 << tmp->pps_pipe);
422 }
423
424 /*
425 * Didn't find one. This should not happen since there
426 * are two power sequencers and up to two eDP ports.
427 */
428 if (WARN_ON(pipes == 0))
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300429 pipe = PIPE_A;
430 else
431 pipe = ffs(pipes) - 1;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300432
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300433 vlv_steal_power_sequencer(dev, pipe);
434 intel_dp->pps_pipe = pipe;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300435
436 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
437 pipe_name(intel_dp->pps_pipe),
438 port_name(intel_dig_port->port));
439
440 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300441 intel_dp_init_panel_power_sequencer(dev, intel_dp);
442 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300443
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300444 /*
445 * Even vdd force doesn't work until we've made
446 * the power sequencer lock in on the port.
447 */
448 vlv_power_sequencer_kick(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300449
450 return intel_dp->pps_pipe;
451}
452
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300453typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
454 enum pipe pipe);
455
456static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
457 enum pipe pipe)
458{
459 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
460}
461
462static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
463 enum pipe pipe)
464{
465 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
466}
467
468static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
469 enum pipe pipe)
470{
471 return true;
472}
473
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300474static enum pipe
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300475vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
476 enum port port,
477 vlv_pipe_check pipe_check)
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300478{
Jani Nikulabf13e812013-09-06 07:40:05 +0300479 enum pipe pipe;
480
Jani Nikulabf13e812013-09-06 07:40:05 +0300481 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
482 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
483 PANEL_PORT_SELECT_MASK;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300484
485 if (port_sel != PANEL_PORT_SELECT_VLV(port))
486 continue;
487
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300488 if (!pipe_check(dev_priv, pipe))
489 continue;
490
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300491 return pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300492 }
493
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300494 return INVALID_PIPE;
495}
496
497static void
498vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
499{
500 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
501 struct drm_device *dev = intel_dig_port->base.base.dev;
502 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300503 enum port port = intel_dig_port->port;
504
505 lockdep_assert_held(&dev_priv->pps_mutex);
506
507 /* try to find a pipe with this port selected */
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300508 /* first pick one where the panel is on */
509 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510 vlv_pipe_has_pp_on);
511 /* didn't find one? pick one where vdd is on */
512 if (intel_dp->pps_pipe == INVALID_PIPE)
513 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
514 vlv_pipe_has_vdd_on);
515 /* didn't find one? pick one with just the correct port */
516 if (intel_dp->pps_pipe == INVALID_PIPE)
517 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
518 vlv_pipe_any);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300519
520 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
521 if (intel_dp->pps_pipe == INVALID_PIPE) {
522 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
523 port_name(port));
524 return;
525 }
526
527 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
528 port_name(port), pipe_name(intel_dp->pps_pipe));
529
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300530 intel_dp_init_panel_power_sequencer(dev, intel_dp);
531 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300532}
533
Ville Syrjälä773538e82014-09-04 14:54:56 +0300534void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
535{
536 struct drm_device *dev = dev_priv->dev;
537 struct intel_encoder *encoder;
538
539 if (WARN_ON(!IS_VALLEYVIEW(dev)))
540 return;
541
542 /*
543 * We can't grab pps_mutex here due to deadlock with power_domain
544 * mutex when power_domain functions are called while holding pps_mutex.
545 * That also means that in order to use pps_pipe the code needs to
546 * hold both a power domain reference and pps_mutex, and the power domain
547 * reference get/put must be done while _not_ holding pps_mutex.
548 * pps_{lock,unlock}() do these steps in the correct order, so one
549 * should use them always.
550 */
551
552 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
553 struct intel_dp *intel_dp;
554
555 if (encoder->type != INTEL_OUTPUT_EDP)
556 continue;
557
558 intel_dp = enc_to_intel_dp(&encoder->base);
559 intel_dp->pps_pipe = INVALID_PIPE;
560 }
Jani Nikulabf13e812013-09-06 07:40:05 +0300561}
562
563static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
564{
565 struct drm_device *dev = intel_dp_to_dev(intel_dp);
566
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530567 if (IS_BROXTON(dev))
568 return BXT_PP_CONTROL(0);
569 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300570 return PCH_PP_CONTROL;
571 else
572 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
573}
574
575static u32 _pp_stat_reg(struct intel_dp *intel_dp)
576{
577 struct drm_device *dev = intel_dp_to_dev(intel_dp);
578
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530579 if (IS_BROXTON(dev))
580 return BXT_PP_STATUS(0);
581 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300582 return PCH_PP_STATUS;
583 else
584 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
585}
586
Clint Taylor01527b32014-07-07 13:01:46 -0700587/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
588 This function only applicable when panel PM state is not to be tracked */
589static int edp_notify_handler(struct notifier_block *this, unsigned long code,
590 void *unused)
591{
592 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
593 edp_notifier);
594 struct drm_device *dev = intel_dp_to_dev(intel_dp);
595 struct drm_i915_private *dev_priv = dev->dev_private;
596 u32 pp_div;
597 u32 pp_ctrl_reg, pp_div_reg;
Clint Taylor01527b32014-07-07 13:01:46 -0700598
599 if (!is_edp(intel_dp) || code != SYS_RESTART)
600 return 0;
601
Ville Syrjälä773538e82014-09-04 14:54:56 +0300602 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300603
Clint Taylor01527b32014-07-07 13:01:46 -0700604 if (IS_VALLEYVIEW(dev)) {
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300605 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
606
Clint Taylor01527b32014-07-07 13:01:46 -0700607 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
608 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
609 pp_div = I915_READ(pp_div_reg);
610 pp_div &= PP_REFERENCE_DIVIDER_MASK;
611
612 /* 0x1F write to PP_DIV_REG sets max cycle delay */
613 I915_WRITE(pp_div_reg, pp_div | 0x1F);
614 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
615 msleep(intel_dp->panel_power_cycle_delay);
616 }
617
Ville Syrjälä773538e82014-09-04 14:54:56 +0300618 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300619
Clint Taylor01527b32014-07-07 13:01:46 -0700620 return 0;
621}
622
Daniel Vetter4be73782014-01-17 14:39:48 +0100623static bool edp_have_panel_power(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700624{
Paulo Zanoni30add222012-10-26 19:05:45 -0200625 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700626 struct drm_i915_private *dev_priv = dev->dev_private;
627
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300628 lockdep_assert_held(&dev_priv->pps_mutex);
629
Ville Syrjälä9a423562014-10-16 21:29:48 +0300630 if (IS_VALLEYVIEW(dev) &&
631 intel_dp->pps_pipe == INVALID_PIPE)
632 return false;
633
Jani Nikulabf13e812013-09-06 07:40:05 +0300634 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
Keith Packardebf33b12011-09-29 15:53:27 -0700635}
636
Daniel Vetter4be73782014-01-17 14:39:48 +0100637static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700638{
Paulo Zanoni30add222012-10-26 19:05:45 -0200639 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700640 struct drm_i915_private *dev_priv = dev->dev_private;
641
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300642 lockdep_assert_held(&dev_priv->pps_mutex);
643
Ville Syrjälä9a423562014-10-16 21:29:48 +0300644 if (IS_VALLEYVIEW(dev) &&
645 intel_dp->pps_pipe == INVALID_PIPE)
646 return false;
647
Ville Syrjälä773538e82014-09-04 14:54:56 +0300648 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -0700649}
650
Keith Packard9b984da2011-09-19 13:54:47 -0700651static void
652intel_dp_check_edp(struct intel_dp *intel_dp)
653{
Paulo Zanoni30add222012-10-26 19:05:45 -0200654 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard9b984da2011-09-19 13:54:47 -0700655 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packardebf33b12011-09-29 15:53:27 -0700656
Keith Packard9b984da2011-09-19 13:54:47 -0700657 if (!is_edp(intel_dp))
658 return;
Jesse Barnes453c5422013-03-28 09:55:41 -0700659
Daniel Vetter4be73782014-01-17 14:39:48 +0100660 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
Keith Packard9b984da2011-09-19 13:54:47 -0700661 WARN(1, "eDP powered off while attempting aux channel communication.\n");
662 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
Jani Nikulabf13e812013-09-06 07:40:05 +0300663 I915_READ(_pp_stat_reg(intel_dp)),
664 I915_READ(_pp_ctrl_reg(intel_dp)));
Keith Packard9b984da2011-09-19 13:54:47 -0700665 }
666}
667
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100668static uint32_t
669intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
670{
671 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
672 struct drm_device *dev = intel_dig_port->base.base.dev;
673 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni9ed35ab2013-02-18 19:00:25 -0300674 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100675 uint32_t status;
676 bool done;
677
Daniel Vetteref04f002012-12-01 21:03:59 +0100678#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100679 if (has_aux_irq)
Paulo Zanonib18ac462013-02-18 19:00:24 -0300680 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
Imre Deak35987062013-05-21 20:03:20 +0300681 msecs_to_jiffies_timeout(10));
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100682 else
683 done = wait_for_atomic(C, 10) == 0;
684 if (!done)
685 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
686 has_aux_irq);
687#undef C
688
689 return status;
690}
691
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000692static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
693{
694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
695 struct drm_device *dev = intel_dig_port->base.base.dev;
696
697 /*
698 * The clock divider is based off the hrawclk, and would like to run at
699 * 2MHz. So, take the hrawclk value and divide by 2 and use that
700 */
701 return index ? 0 : intel_hrawclk(dev) / 2;
702}
703
704static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705{
706 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707 struct drm_device *dev = intel_dig_port->base.base.dev;
Ville Syrjälä469d4b22015-03-31 14:11:59 +0300708 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000709
710 if (index)
711 return 0;
712
713 if (intel_dig_port->port == PORT_A) {
Ville Syrjälä05024da2015-06-03 15:45:08 +0300714 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
715
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000716 } else {
717 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
718 }
719}
720
721static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300722{
723 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
724 struct drm_device *dev = intel_dig_port->base.base.dev;
725 struct drm_i915_private *dev_priv = dev->dev_private;
726
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000727 if (intel_dig_port->port == PORT_A) {
Chris Wilsonbc866252013-07-21 16:00:03 +0100728 if (index)
729 return 0;
Ville Syrjälä05024da2015-06-03 15:45:08 +0300730 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300731 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
732 /* Workaround for non-ULT HSW */
Chris Wilsonbc866252013-07-21 16:00:03 +0100733 switch (index) {
734 case 0: return 63;
735 case 1: return 72;
736 default: return 0;
737 }
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000738 } else {
Chris Wilsonbc866252013-07-21 16:00:03 +0100739 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300740 }
741}
742
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000743static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
744{
745 return index ? 0 : 100;
746}
747
Damien Lespiaub6b5e382014-01-20 16:00:59 +0000748static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
749{
750 /*
751 * SKL doesn't need us to program the AUX clock divider (Hardware will
752 * derive the clock from CDCLK automatically). We still implement the
753 * get_aux_clock_divider vfunc to plug-in into the existing code.
754 */
755 return index ? 0 : 1;
756}
757
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000758static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
759 bool has_aux_irq,
760 int send_bytes,
761 uint32_t aux_clock_divider)
762{
763 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
764 struct drm_device *dev = intel_dig_port->base.base.dev;
765 uint32_t precharge, timeout;
766
767 if (IS_GEN6(dev))
768 precharge = 3;
769 else
770 precharge = 5;
771
772 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
773 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
774 else
775 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
776
777 return DP_AUX_CH_CTL_SEND_BUSY |
Damien Lespiau788d4432014-01-20 15:52:31 +0000778 DP_AUX_CH_CTL_DONE |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000779 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000780 DP_AUX_CH_CTL_TIME_OUT_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000781 timeout |
Damien Lespiau788d4432014-01-20 15:52:31 +0000782 DP_AUX_CH_CTL_RECEIVE_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000783 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
784 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000785 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000786}
787
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +0000788static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
789 bool has_aux_irq,
790 int send_bytes,
791 uint32_t unused)
792{
793 return DP_AUX_CH_CTL_SEND_BUSY |
794 DP_AUX_CH_CTL_DONE |
795 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
796 DP_AUX_CH_CTL_TIME_OUT_ERROR |
797 DP_AUX_CH_CTL_TIME_OUT_1600us |
798 DP_AUX_CH_CTL_RECEIVE_ERROR |
799 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
800 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
801}
802
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700803static int
Chris Wilsonea5b2132010-08-04 13:50:23 +0100804intel_dp_aux_ch(struct intel_dp *intel_dp,
Daniel Vetterbd9f74a2014-10-02 09:45:35 +0200805 const uint8_t *send, int send_bytes,
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700806 uint8_t *recv, int recv_size)
807{
Paulo Zanoni174edf12012-10-26 19:05:50 -0200808 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
809 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700810 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni9ed35ab2013-02-18 19:00:25 -0300811 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700812 uint32_t ch_data = ch_ctl + 4;
Chris Wilsonbc866252013-07-21 16:00:03 +0100813 uint32_t aux_clock_divider;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100814 int i, ret, recv_bytes;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700815 uint32_t status;
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000816 int try, clock = 0;
Daniel Vetter4e6b7882014-02-07 16:33:20 +0100817 bool has_aux_irq = HAS_AUX_IRQ(dev);
Jani Nikula884f19e2014-03-14 16:51:14 +0200818 bool vdd;
819
Ville Syrjälä773538e82014-09-04 14:54:56 +0300820 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300821
Ville Syrjälä72c35002014-08-18 22:16:00 +0300822 /*
823 * We will be called with VDD already enabled for dpcd/edid/oui reads.
824 * In such cases we want to leave VDD enabled and it's up to upper layers
825 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
826 * ourselves.
827 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300828 vdd = edp_panel_vdd_on(intel_dp);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100829
830 /* dp aux is extremely sensitive to irq latency, hence request the
831 * lowest possible wakeup latency and so prevent the cpu from going into
832 * deep sleep states.
833 */
834 pm_qos_update_request(&dev_priv->pm_qos, 0);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700835
Keith Packard9b984da2011-09-19 13:54:47 -0700836 intel_dp_check_edp(intel_dp);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +0800837
Paulo Zanonic67a4702013-08-19 13:18:09 -0300838 intel_aux_display_runtime_get(dev_priv);
839
Jesse Barnes11bee432011-08-01 15:02:20 -0700840 /* Try to wait for any previous AUX channel activity */
841 for (try = 0; try < 3; try++) {
Daniel Vetteref04f002012-12-01 21:03:59 +0100842 status = I915_READ_NOTRACE(ch_ctl);
Jesse Barnes11bee432011-08-01 15:02:20 -0700843 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
844 break;
845 msleep(1);
846 }
847
848 if (try == 3) {
Mika Kuoppala02196c72015-08-06 16:48:58 +0300849 static u32 last_status = -1;
850 const u32 status = I915_READ(ch_ctl);
851
852 if (status != last_status) {
853 WARN(1, "dp_aux_ch not started status 0x%08x\n",
854 status);
855 last_status = status;
856 }
857
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100858 ret = -EBUSY;
859 goto out;
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100860 }
861
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300862 /* Only 5 data registers! */
863 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
864 ret = -E2BIG;
865 goto out;
866 }
867
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000868 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
Damien Lespiau153b1102014-01-21 13:37:15 +0000869 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
870 has_aux_irq,
871 send_bytes,
872 aux_clock_divider);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000873
Chris Wilsonbc866252013-07-21 16:00:03 +0100874 /* Must try at least 3 times according to DP spec */
875 for (try = 0; try < 5; try++) {
876 /* Load the send data into the aux channel data registers */
877 for (i = 0; i < send_bytes; i += 4)
878 I915_WRITE(ch_data + i,
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800879 intel_dp_pack_aux(send + i,
880 send_bytes - i));
Akshay Joshi0206e352011-08-16 15:34:10 -0400881
Chris Wilsonbc866252013-07-21 16:00:03 +0100882 /* Send the command and wait for it to complete */
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000883 I915_WRITE(ch_ctl, send_ctl);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100884
Chris Wilsonbc866252013-07-21 16:00:03 +0100885 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
Akshay Joshi0206e352011-08-16 15:34:10 -0400886
Chris Wilsonbc866252013-07-21 16:00:03 +0100887 /* Clear done status and any errors */
888 I915_WRITE(ch_ctl,
889 status |
890 DP_AUX_CH_CTL_DONE |
891 DP_AUX_CH_CTL_TIME_OUT_ERROR |
892 DP_AUX_CH_CTL_RECEIVE_ERROR);
Adam Jacksond7e96fe2011-07-26 15:39:46 -0400893
Todd Previte74ebf292015-04-15 08:38:41 -0700894 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
Chris Wilsonbc866252013-07-21 16:00:03 +0100895 continue;
Todd Previte74ebf292015-04-15 08:38:41 -0700896
897 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
898 * 400us delay required for errors and timeouts
899 * Timeout errors from the HW already meet this
900 * requirement so skip to next iteration
901 */
902 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
903 usleep_range(400, 500);
904 continue;
905 }
Chris Wilsonbc866252013-07-21 16:00:03 +0100906 if (status & DP_AUX_CH_CTL_DONE)
Jim Bridee058c942015-05-27 10:21:48 -0700907 goto done;
Chris Wilsonbc866252013-07-21 16:00:03 +0100908 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700909 }
910
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700911 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700912 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100913 ret = -EBUSY;
914 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700915 }
916
Jim Bridee058c942015-05-27 10:21:48 -0700917done:
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700918 /* Check for timeout or receive error.
919 * Timeouts occur when the sink is not connected
920 */
Keith Packarda5b3da52009-06-11 22:30:32 -0700921 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700922 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100923 ret = -EIO;
924 goto out;
Keith Packarda5b3da52009-06-11 22:30:32 -0700925 }
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700926
927 /* Timeouts occur when the device isn't connected, so they're
928 * "normal" -- don't fill the kernel log with these */
Keith Packarda5b3da52009-06-11 22:30:32 -0700929 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
Zhao Yakui28c97732009-10-09 11:39:41 +0800930 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100931 ret = -ETIMEDOUT;
932 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700933 }
934
935 /* Unload any bytes sent back from the other side */
936 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
937 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700938 if (recv_bytes > recv_size)
939 recv_bytes = recv_size;
Akshay Joshi0206e352011-08-16 15:34:10 -0400940
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100941 for (i = 0; i < recv_bytes; i += 4)
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800942 intel_dp_unpack_aux(I915_READ(ch_data + i),
943 recv + i, recv_bytes - i);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700944
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100945 ret = recv_bytes;
946out:
947 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
Paulo Zanonic67a4702013-08-19 13:18:09 -0300948 intel_aux_display_runtime_put(dev_priv);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100949
Jani Nikula884f19e2014-03-14 16:51:14 +0200950 if (vdd)
951 edp_panel_vdd_off(intel_dp, false);
952
Ville Syrjälä773538e82014-09-04 14:54:56 +0300953 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300954
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100955 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700956}
957
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300958#define BARE_ADDRESS_SIZE 3
959#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
Jani Nikula9d1a1032014-03-14 16:51:15 +0200960static ssize_t
961intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700962{
Jani Nikula9d1a1032014-03-14 16:51:15 +0200963 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
964 uint8_t txbuf[20], rxbuf[20];
965 size_t txsize, rxsize;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700966 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700967
Ville Syrjäläd2d9cbb2015-03-19 11:44:06 +0200968 txbuf[0] = (msg->request << 4) |
969 ((msg->address >> 16) & 0xf);
970 txbuf[1] = (msg->address >> 8) & 0xff;
Jani Nikula9d1a1032014-03-14 16:51:15 +0200971 txbuf[2] = msg->address & 0xff;
972 txbuf[3] = msg->size - 1;
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300973
Jani Nikula9d1a1032014-03-14 16:51:15 +0200974 switch (msg->request & ~DP_AUX_I2C_MOT) {
975 case DP_AUX_NATIVE_WRITE:
976 case DP_AUX_I2C_WRITE:
Ville Syrjäläc1e741222015-08-27 17:23:27 +0300977 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300978 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200979 rxsize = 2; /* 0 or 1 data bytes */
Jani Nikulaf51a44b2014-02-11 11:52:05 +0200980
Jani Nikula9d1a1032014-03-14 16:51:15 +0200981 if (WARN_ON(txsize > 20))
982 return -E2BIG;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700983
Jani Nikula9d1a1032014-03-14 16:51:15 +0200984 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700985
Jani Nikula9d1a1032014-03-14 16:51:15 +0200986 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
987 if (ret > 0) {
988 msg->reply = rxbuf[0] >> 4;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700989
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200990 if (ret > 1) {
991 /* Number of bytes written in a short write. */
992 ret = clamp_t(int, rxbuf[1], 0, msg->size);
993 } else {
994 /* Return payload size. */
995 ret = msg->size;
996 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700997 }
Jani Nikula9d1a1032014-03-14 16:51:15 +0200998 break;
999
1000 case DP_AUX_NATIVE_READ:
1001 case DP_AUX_I2C_READ:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +03001002 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
Jani Nikula9d1a1032014-03-14 16:51:15 +02001003 rxsize = msg->size + 1;
1004
1005 if (WARN_ON(rxsize > 20))
1006 return -E2BIG;
1007
1008 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1009 if (ret > 0) {
1010 msg->reply = rxbuf[0] >> 4;
1011 /*
1012 * Assume happy day, and copy the data. The caller is
1013 * expected to check msg->reply before touching it.
1014 *
1015 * Return payload size.
1016 */
1017 ret--;
1018 memcpy(msg->buffer, rxbuf + 1, ret);
1019 }
1020 break;
1021
1022 default:
1023 ret = -EINVAL;
1024 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001025 }
Jani Nikulaf51a44b2014-02-11 11:52:05 +02001026
Jani Nikula9d1a1032014-03-14 16:51:15 +02001027 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001028}
1029
Jani Nikula9d1a1032014-03-14 16:51:15 +02001030static void
1031intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001032{
Jani Nikula9d1a1032014-03-14 16:51:15 +02001033 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001034 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikula33ad6622014-03-14 16:51:16 +02001035 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1036 enum port port = intel_dig_port->port;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001037 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
Jani Nikula0b998362014-03-14 16:51:17 +02001038 const char *name = NULL;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001039 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
Dave Airlieab2c0672009-12-04 10:55:24 +10001040 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001041
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001042 /* On SKL we don't have Aux for port E so we rely on VBT to set
1043 * a proper alternate aux channel.
1044 */
1045 if (IS_SKYLAKE(dev) && port == PORT_E) {
1046 switch (info->alternate_aux_channel) {
1047 case DP_AUX_B:
1048 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1049 break;
1050 case DP_AUX_C:
1051 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1052 break;
1053 case DP_AUX_D:
1054 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1055 break;
1056 case DP_AUX_A:
1057 default:
1058 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1059 }
1060 }
1061
Jani Nikula33ad6622014-03-14 16:51:16 +02001062 switch (port) {
1063 case PORT_A:
1064 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001065 name = "DPDDC-A";
Dave Airlieab2c0672009-12-04 10:55:24 +10001066 break;
Jani Nikula33ad6622014-03-14 16:51:16 +02001067 case PORT_B:
1068 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001069 name = "DPDDC-B";
Jani Nikula33ad6622014-03-14 16:51:16 +02001070 break;
1071 case PORT_C:
1072 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001073 name = "DPDDC-C";
Jani Nikula33ad6622014-03-14 16:51:16 +02001074 break;
1075 case PORT_D:
1076 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001077 name = "DPDDC-D";
Dave Airlieab2c0672009-12-04 10:55:24 +10001078 break;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001079 case PORT_E:
1080 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1081 name = "DPDDC-E";
1082 break;
Dave Airlieab2c0672009-12-04 10:55:24 +10001083 default:
Jani Nikula33ad6622014-03-14 16:51:16 +02001084 BUG();
Dave Airlieab2c0672009-12-04 10:55:24 +10001085 }
1086
Damien Lespiau1b1aad72013-12-03 13:56:29 +00001087 /*
1088 * The AUX_CTL register is usually DP_CTL + 0x10.
1089 *
1090 * On Haswell and Broadwell though:
1091 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1092 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1093 *
1094 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1095 */
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001096 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
Jani Nikula33ad6622014-03-14 16:51:16 +02001097 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
David Flynn8316f332010-12-08 16:10:21 +00001098
Jani Nikula0b998362014-03-14 16:51:17 +02001099 intel_dp->aux.name = name;
Jani Nikula9d1a1032014-03-14 16:51:15 +02001100 intel_dp->aux.dev = dev->dev;
1101 intel_dp->aux.transfer = intel_dp_aux_transfer;
David Flynn8316f332010-12-08 16:10:21 +00001102
Jani Nikula0b998362014-03-14 16:51:17 +02001103 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1104 connector->base.kdev->kobj.name);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001105
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001106 ret = drm_dp_aux_register(&intel_dp->aux);
Jani Nikula0b998362014-03-14 16:51:17 +02001107 if (ret < 0) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001108 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
Jani Nikula0b998362014-03-14 16:51:17 +02001109 name, ret);
1110 return;
Dave Airlieab2c0672009-12-04 10:55:24 +10001111 }
David Flynn8316f332010-12-08 16:10:21 +00001112
Jani Nikula0b998362014-03-14 16:51:17 +02001113 ret = sysfs_create_link(&connector->base.kdev->kobj,
1114 &intel_dp->aux.ddc.dev.kobj,
1115 intel_dp->aux.ddc.dev.kobj.name);
1116 if (ret < 0) {
1117 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001118 drm_dp_aux_unregister(&intel_dp->aux);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001119 }
1120}
1121
Imre Deak80f65de2014-02-11 17:12:49 +02001122static void
1123intel_dp_connector_unregister(struct intel_connector *intel_connector)
1124{
1125 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1126
Dave Airlie0e32b392014-05-02 14:02:48 +10001127 if (!intel_connector->mst_port)
1128 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1129 intel_dp->aux.ddc.dev.kobj.name);
Imre Deak80f65de2014-02-11 17:12:49 +02001130 intel_connector_unregister(intel_connector);
1131}
1132
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001133static void
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001134skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
Damien Lespiau5416d872014-11-14 17:24:33 +00001135{
1136 u32 ctrl1;
1137
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03001138 memset(&pipe_config->dpll_hw_state, 0,
1139 sizeof(pipe_config->dpll_hw_state));
1140
Damien Lespiau5416d872014-11-14 17:24:33 +00001141 pipe_config->ddi_pll_sel = SKL_DPLL0;
1142 pipe_config->dpll_hw_state.cfgcr1 = 0;
1143 pipe_config->dpll_hw_state.cfgcr2 = 0;
1144
1145 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001146 switch (pipe_config->port_clock / 2) {
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301147 case 81000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001148 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
Damien Lespiau5416d872014-11-14 17:24:33 +00001149 SKL_DPLL0);
1150 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301151 case 135000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001152 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
Damien Lespiau5416d872014-11-14 17:24:33 +00001153 SKL_DPLL0);
1154 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301155 case 270000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001156 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
Damien Lespiau5416d872014-11-14 17:24:33 +00001157 SKL_DPLL0);
1158 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301159 case 162000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001160 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301161 SKL_DPLL0);
1162 break;
1163 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1164 results in CDCLK change. Need to handle the change of CDCLK by
1165 disabling pipes and re-enabling them */
1166 case 108000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001167 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301168 SKL_DPLL0);
1169 break;
1170 case 216000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001171 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301172 SKL_DPLL0);
1173 break;
1174
Damien Lespiau5416d872014-11-14 17:24:33 +00001175 }
1176 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1177}
1178
Ander Conselvan de Oliveira6fa2d192015-08-31 11:23:28 +03001179void
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001180hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
Daniel Vetter0e503382014-07-04 11:26:04 -03001181{
Ander Conselvan de Oliveiraee46f3c72015-06-30 16:10:38 +03001182 memset(&pipe_config->dpll_hw_state, 0,
1183 sizeof(pipe_config->dpll_hw_state));
1184
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001185 switch (pipe_config->port_clock / 2) {
1186 case 81000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001187 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1188 break;
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001189 case 135000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001190 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1191 break;
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001192 case 270000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001193 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1194 break;
1195 }
1196}
1197
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301198static int
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001199intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301200{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001201 if (intel_dp->num_sink_rates) {
1202 *sink_rates = intel_dp->sink_rates;
1203 return intel_dp->num_sink_rates;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301204 }
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001205
1206 *sink_rates = default_rates;
1207
1208 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301209}
1210
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301211static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1212{
1213 /* WaDisableHBR2:skl */
1214 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1215 return false;
1216
1217 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1218 (INTEL_INFO(dev)->gen >= 9))
1219 return true;
1220 else
1221 return false;
1222}
1223
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301224static int
Ville Syrjälä1db10e22015-03-12 17:10:32 +02001225intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301226{
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301227 int size;
1228
Sonika Jindal64987fc2015-05-26 17:50:13 +05301229 if (IS_BROXTON(dev)) {
1230 *source_rates = bxt_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301231 size = ARRAY_SIZE(bxt_rates);
Sonika Jindal64987fc2015-05-26 17:50:13 +05301232 } else if (IS_SKYLAKE(dev)) {
Sonika Jindal637a9c62015-05-07 09:52:08 +05301233 *source_rates = skl_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301234 size = ARRAY_SIZE(skl_rates);
1235 } else {
1236 *source_rates = default_rates;
1237 size = ARRAY_SIZE(default_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301238 }
Ville Syrjälä636280b2015-03-12 17:10:29 +02001239
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301240 /* This depends on the fact that 5.4 is last value in the array */
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301241 if (!intel_dp_source_supports_hbr2(dev))
1242 size--;
1243
1244 return size;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301245}
1246
Daniel Vetter0e503382014-07-04 11:26:04 -03001247static void
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001248intel_dp_set_clock(struct intel_encoder *encoder,
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001249 struct intel_crtc_state *pipe_config)
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001250{
1251 struct drm_device *dev = encoder->base.dev;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001252 const struct dp_link_dpll *divisor = NULL;
1253 int i, count = 0;
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001254
1255 if (IS_G4X(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001256 divisor = gen4_dpll;
1257 count = ARRAY_SIZE(gen4_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001258 } else if (HAS_PCH_SPLIT(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001259 divisor = pch_dpll;
1260 count = ARRAY_SIZE(pch_dpll);
Chon Ming Leeef9348c2014-04-09 13:28:18 +03001261 } else if (IS_CHERRYVIEW(dev)) {
1262 divisor = chv_dpll;
1263 count = ARRAY_SIZE(chv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001264 } else if (IS_VALLEYVIEW(dev)) {
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +08001265 divisor = vlv_dpll;
1266 count = ARRAY_SIZE(vlv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001267 }
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001268
1269 if (divisor && count) {
1270 for (i = 0; i < count; i++) {
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001271 if (pipe_config->port_clock == divisor[i].clock) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001272 pipe_config->dpll = divisor[i].dpll;
1273 pipe_config->clock_set = true;
1274 break;
1275 }
1276 }
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001277 }
1278}
1279
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001280static int intersect_rates(const int *source_rates, int source_len,
1281 const int *sink_rates, int sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001282 int *common_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301283{
1284 int i = 0, j = 0, k = 0;
1285
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301286 while (i < source_len && j < sink_len) {
1287 if (source_rates[i] == sink_rates[j]) {
Ville Syrjäläe6bda3e2015-03-12 17:10:37 +02001288 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1289 return k;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001290 common_rates[k] = source_rates[i];
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301291 ++k;
1292 ++i;
1293 ++j;
1294 } else if (source_rates[i] < sink_rates[j]) {
1295 ++i;
1296 } else {
1297 ++j;
1298 }
1299 }
1300 return k;
1301}
1302
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001303static int intel_dp_common_rates(struct intel_dp *intel_dp,
1304 int *common_rates)
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001305{
1306 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1307 const int *source_rates, *sink_rates;
1308 int source_len, sink_len;
1309
1310 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1311 source_len = intel_dp_source_rates(dev, &source_rates);
1312
1313 return intersect_rates(source_rates, source_len,
1314 sink_rates, sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001315 common_rates);
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001316}
1317
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001318static void snprintf_int_array(char *str, size_t len,
1319 const int *array, int nelem)
1320{
1321 int i;
1322
1323 str[0] = '\0';
1324
1325 for (i = 0; i < nelem; i++) {
Jani Nikulab2f505b2015-05-18 16:01:45 +03001326 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001327 if (r >= len)
1328 return;
1329 str += r;
1330 len -= r;
1331 }
1332}
1333
1334static void intel_dp_print_rates(struct intel_dp *intel_dp)
1335{
1336 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1337 const int *source_rates, *sink_rates;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001338 int source_len, sink_len, common_len;
1339 int common_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001340 char str[128]; /* FIXME: too big for stack? */
1341
1342 if ((drm_debug & DRM_UT_KMS) == 0)
1343 return;
1344
1345 source_len = intel_dp_source_rates(dev, &source_rates);
1346 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1347 DRM_DEBUG_KMS("source rates: %s\n", str);
1348
1349 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1350 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1351 DRM_DEBUG_KMS("sink rates: %s\n", str);
1352
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001353 common_len = intel_dp_common_rates(intel_dp, common_rates);
1354 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1355 DRM_DEBUG_KMS("common rates: %s\n", str);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001356}
1357
Ville Syrjäläf4896f12015-03-12 17:10:27 +02001358static int rate_to_index(int find, const int *rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301359{
1360 int i = 0;
1361
1362 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1363 if (find == rates[i])
1364 break;
1365
1366 return i;
1367}
1368
Ville Syrjälä50fec212015-03-12 17:10:34 +02001369int
1370intel_dp_max_link_rate(struct intel_dp *intel_dp)
1371{
1372 int rates[DP_MAX_SUPPORTED_RATES] = {};
1373 int len;
1374
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001375 len = intel_dp_common_rates(intel_dp, rates);
Ville Syrjälä50fec212015-03-12 17:10:34 +02001376 if (WARN_ON(len <= 0))
1377 return 162000;
1378
1379 return rates[rate_to_index(0, rates) - 1];
1380}
1381
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001382int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1383{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001384 return rate_to_index(rate, intel_dp->sink_rates);
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001385}
1386
Paulo Zanoni00c09d72012-10-26 19:05:52 -02001387bool
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001388intel_dp_compute_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02001389 struct intel_crtc_state *pipe_config)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001390{
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001391 struct drm_device *dev = encoder->base.dev;
Daniel Vetter36008362013-03-27 00:44:59 +01001392 struct drm_i915_private *dev_priv = dev->dev_private;
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02001393 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001394 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001395 enum port port = dp_to_dig_port(intel_dp)->port;
Ander Conselvan de Oliveira84556d52015-03-20 16:18:10 +02001396 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
Jani Nikuladd06f902012-10-19 14:51:50 +03001397 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001398 int lane_count, clock;
Jani Nikula56071a22014-05-06 14:56:52 +03001399 int min_lane_count = 1;
Paulo Zanonieeb63242014-05-06 14:56:50 +03001400 int max_lane_count = intel_dp_max_lane_count(intel_dp);
Todd Previte06ea66b2014-01-20 10:19:39 -07001401 /* Conveniently, the link BW constants become indices with a shift...*/
Jani Nikula56071a22014-05-06 14:56:52 +03001402 int min_clock = 0;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301403 int max_clock;
Daniel Vetter083f9562012-04-20 20:23:49 +02001404 int bpp, mode_rate;
Daniel Vetterff9a6752013-06-01 17:16:21 +02001405 int link_avail, link_clock;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001406 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1407 int common_len;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301408
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001409 common_len = intel_dp_common_rates(intel_dp, common_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301410
1411 /* No common link rates between source and sink */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001412 WARN_ON(common_len <= 0);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301413
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001414 max_clock = common_len - 1;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001415
Imre Deakbc7d38a2013-05-16 14:40:36 +03001416 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001417 pipe_config->has_pch_encoder = true;
1418
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001419 pipe_config->has_dp_encoder = true;
Vandana Kannanf769cd22014-08-05 07:51:22 -07001420 pipe_config->has_drrs = false;
Jani Nikula9fcb1702015-05-05 16:32:12 +03001421 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001422
Jani Nikuladd06f902012-10-19 14:51:50 +03001423 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1424 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1425 adjusted_mode);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001426
1427 if (INTEL_INFO(dev)->gen >= 9) {
1428 int ret;
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02001429 ret = skl_update_scaler_crtc(pipe_config);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001430 if (ret)
1431 return ret;
1432 }
1433
Jesse Barnes2dd24552013-04-25 12:55:01 -07001434 if (!HAS_PCH_SPLIT(dev))
1435 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1436 intel_connector->panel.fitting_mode);
1437 else
Jesse Barnesb074cec2013-04-25 12:55:02 -07001438 intel_pch_panel_fitting(intel_crtc, pipe_config,
1439 intel_connector->panel.fitting_mode);
Zhao Yakui0d3a1be2010-07-19 09:43:13 +01001440 }
1441
Daniel Vettercb1793c2012-06-04 18:39:21 +02001442 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
Daniel Vetter0af78a22012-05-23 11:30:55 +02001443 return false;
1444
Daniel Vetter083f9562012-04-20 20:23:49 +02001445 DRM_DEBUG_KMS("DP link computation with max lane count %i "
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301446 "max bw %d pixel clock %iKHz\n",
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001447 max_lane_count, common_rates[max_clock],
Damien Lespiau241bfc32013-09-25 16:45:37 +01001448 adjusted_mode->crtc_clock);
Daniel Vetter083f9562012-04-20 20:23:49 +02001449
Daniel Vetter36008362013-03-27 00:44:59 +01001450 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1451 * bpc in between. */
Daniel Vetter3e7ca982013-06-01 19:45:56 +02001452 bpp = pipe_config->pipe_bpp;
Jani Nikula56071a22014-05-06 14:56:52 +03001453 if (is_edp(intel_dp)) {
Thulasimani,Sivakumar22ce5622015-07-31 11:05:27 +05301454
1455 /* Get bpp from vbt only for panels that dont have bpp in edid */
1456 if (intel_connector->base.display_info.bpc == 0 &&
1457 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
Jani Nikula56071a22014-05-06 14:56:52 +03001458 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1459 dev_priv->vbt.edp_bpp);
1460 bpp = dev_priv->vbt.edp_bpp;
1461 }
1462
Jani Nikula344c5bb2014-09-09 11:25:13 +03001463 /*
1464 * Use the maximum clock and number of lanes the eDP panel
1465 * advertizes being capable of. The panels are generally
1466 * designed to support only a single clock and lane
1467 * configuration, and typically these values correspond to the
1468 * native resolution of the panel.
1469 */
1470 min_lane_count = max_lane_count;
1471 min_clock = max_clock;
Imre Deak79842112013-07-18 17:44:13 +03001472 }
Daniel Vetter657445f2013-05-04 10:09:18 +02001473
Daniel Vetter36008362013-03-27 00:44:59 +01001474 for (; bpp >= 6*3; bpp -= 2*3) {
Damien Lespiau241bfc32013-09-25 16:45:37 +01001475 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1476 bpp);
Daniel Vetterc4867932012-04-10 10:42:36 +02001477
Dave Airliec6930992014-07-14 11:04:39 +10001478 for (clock = min_clock; clock <= max_clock; clock++) {
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301479 for (lane_count = min_lane_count;
1480 lane_count <= max_lane_count;
1481 lane_count <<= 1) {
1482
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001483 link_clock = common_rates[clock];
Daniel Vetter36008362013-03-27 00:44:59 +01001484 link_avail = intel_dp_max_data_rate(link_clock,
1485 lane_count);
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001486
Daniel Vetter36008362013-03-27 00:44:59 +01001487 if (mode_rate <= link_avail) {
1488 goto found;
1489 }
1490 }
1491 }
1492 }
1493
1494 return false;
1495
1496found:
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001497 if (intel_dp->color_range_auto) {
1498 /*
1499 * See:
1500 * CEA-861-E - 5.1 Default Encoding Parameters
1501 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1502 */
Thierry Reding18316c82012-12-20 15:41:44 +01001503 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001504 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1505 else
1506 intel_dp->color_range = 0;
1507 }
1508
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001509 if (intel_dp->color_range)
Daniel Vetter50f3b012013-03-27 00:44:56 +01001510 pipe_config->limited_color_range = true;
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001511
Daniel Vetter36008362013-03-27 00:44:59 +01001512 intel_dp->lane_count = lane_count;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301513
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001514 if (intel_dp->num_sink_rates) {
Ville Syrjäläbc27b7d2015-03-12 17:10:35 +02001515 intel_dp->link_bw = 0;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301516 intel_dp->rate_select =
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001517 intel_dp_rate_select(intel_dp, common_rates[clock]);
Ville Syrjäläbc27b7d2015-03-12 17:10:35 +02001518 } else {
1519 intel_dp->link_bw =
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001520 drm_dp_link_rate_to_bw_code(common_rates[clock]);
Ville Syrjäläbc27b7d2015-03-12 17:10:35 +02001521 intel_dp->rate_select = 0;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301522 }
1523
Daniel Vetter657445f2013-05-04 10:09:18 +02001524 pipe_config->pipe_bpp = bpp;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001525 pipe_config->port_clock = common_rates[clock];
Daniel Vetterc4867932012-04-10 10:42:36 +02001526
Daniel Vetter36008362013-03-27 00:44:59 +01001527 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1528 intel_dp->link_bw, intel_dp->lane_count,
Daniel Vetterff9a6752013-06-01 17:16:21 +02001529 pipe_config->port_clock, bpp);
Daniel Vetter36008362013-03-27 00:44:59 +01001530 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1531 mode_rate, link_avail);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001532
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001533 intel_link_compute_m_n(bpp, lane_count,
Damien Lespiau241bfc32013-09-25 16:45:37 +01001534 adjusted_mode->crtc_clock,
1535 pipe_config->port_clock,
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001536 &pipe_config->dp_m_n);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001537
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301538 if (intel_connector->panel.downclock_mode != NULL &&
Vandana Kannan96178ee2015-01-10 02:25:56 +05301539 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
Vandana Kannanf769cd22014-08-05 07:51:22 -07001540 pipe_config->has_drrs = true;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301541 intel_link_compute_m_n(bpp, lane_count,
1542 intel_connector->panel.downclock_mode->clock,
1543 pipe_config->port_clock,
1544 &pipe_config->dp_m2_n2);
1545 }
1546
Damien Lespiau5416d872014-11-14 17:24:33 +00001547 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001548 skl_edp_set_pll_config(pipe_config);
Satheeshakrishna M977bb382014-08-22 09:49:12 +05301549 else if (IS_BROXTON(dev))
1550 /* handled in ddi */;
Damien Lespiau5416d872014-11-14 17:24:33 +00001551 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001552 hsw_dp_set_ddi_pll_sel(pipe_config);
Daniel Vetter0e503382014-07-04 11:26:04 -03001553 else
Ville Syrjälä7e6313a2015-08-11 20:21:46 +03001554 intel_dp_set_clock(encoder, pipe_config);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001555
Daniel Vetter36008362013-03-27 00:44:59 +01001556 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001557}
1558
Daniel Vetter7c62a162013-06-01 17:16:20 +02001559static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
Daniel Vetterea9b6002012-11-29 15:59:31 +01001560{
Daniel Vetter7c62a162013-06-01 17:16:20 +02001561 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1562 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1563 struct drm_device *dev = crtc->base.dev;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001564 struct drm_i915_private *dev_priv = dev->dev_private;
1565 u32 dpa_ctl;
1566
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001567 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1568 crtc->config->port_clock);
Daniel Vetterea9b6002012-11-29 15:59:31 +01001569 dpa_ctl = I915_READ(DP_A);
1570 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1571
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001572 if (crtc->config->port_clock == 162000) {
Daniel Vetter1ce17032012-11-29 15:59:32 +01001573 /* For a long time we've carried around a ILK-DevA w/a for the
1574 * 160MHz clock. If we're really unlucky, it's still required.
1575 */
1576 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
Daniel Vetterea9b6002012-11-29 15:59:31 +01001577 dpa_ctl |= DP_PLL_FREQ_160MHZ;
Daniel Vetter7c62a162013-06-01 17:16:20 +02001578 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001579 } else {
1580 dpa_ctl |= DP_PLL_FREQ_270MHZ;
Daniel Vetter7c62a162013-06-01 17:16:20 +02001581 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001582 }
Daniel Vetter1ce17032012-11-29 15:59:32 +01001583
Daniel Vetterea9b6002012-11-29 15:59:31 +01001584 I915_WRITE(DP_A, dpa_ctl);
1585
1586 POSTING_READ(DP_A);
1587 udelay(500);
1588}
1589
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02001590static void intel_dp_prepare(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001591{
Daniel Vetterb934223d2013-07-21 21:37:05 +02001592 struct drm_device *dev = encoder->base.dev;
Keith Packard417e8222011-11-01 19:54:11 -07001593 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001594 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001595 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001596 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001597 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001598
Keith Packard417e8222011-11-01 19:54:11 -07001599 /*
Keith Packard1a2eb462011-11-16 16:26:07 -08001600 * There are four kinds of DP registers:
Keith Packard417e8222011-11-01 19:54:11 -07001601 *
1602 * IBX PCH
Keith Packard1a2eb462011-11-16 16:26:07 -08001603 * SNB CPU
1604 * IVB CPU
Keith Packard417e8222011-11-01 19:54:11 -07001605 * CPT PCH
1606 *
1607 * IBX PCH and CPU are the same for almost everything,
1608 * except that the CPU DP PLL is configured in this
1609 * register
1610 *
1611 * CPT PCH is quite different, having many bits moved
1612 * to the TRANS_DP_CTL register instead. That
1613 * configuration happens (oddly) in ironlake_pch_enable
1614 */
Adam Jackson9c9e7922010-04-05 17:57:59 -04001615
Keith Packard417e8222011-11-01 19:54:11 -07001616 /* Preserve the BIOS-computed detected bit. This is
1617 * supposed to be read-only.
1618 */
1619 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001620
Keith Packard417e8222011-11-01 19:54:11 -07001621 /* Handle DP bits in common between all three register formats */
Keith Packard417e8222011-11-01 19:54:11 -07001622 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
Daniel Vetter17aa6be2013-04-30 14:01:40 +02001623 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001624
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001625 if (crtc->config->has_audio)
Chris Wilsonea5b2132010-08-04 13:50:23 +01001626 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
Paulo Zanoni247d89f2012-10-15 15:51:33 -03001627
Keith Packard417e8222011-11-01 19:54:11 -07001628 /* Split out the IBX/CPU vs CPT settings */
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001629
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001630 if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08001631 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1632 intel_dp->DP |= DP_SYNC_HS_HIGH;
1633 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1634 intel_dp->DP |= DP_SYNC_VS_HIGH;
1635 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1636
Jani Nikula6aba5b62013-10-04 15:08:10 +03001637 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard1a2eb462011-11-16 16:26:07 -08001638 intel_dp->DP |= DP_ENHANCED_FRAMING;
1639
Daniel Vetter7c62a162013-06-01 17:16:20 +02001640 intel_dp->DP |= crtc->pipe << 29;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001641 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001642 u32 trans_dp;
1643
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001644 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001645
1646 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1647 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1648 trans_dp |= TRANS_DP_ENH_FRAMING;
1649 else
1650 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1651 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001652 } else {
Jesse Barnesb2634012013-03-28 09:55:40 -07001653 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001654 intel_dp->DP |= intel_dp->color_range;
Keith Packard417e8222011-11-01 19:54:11 -07001655
1656 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1657 intel_dp->DP |= DP_SYNC_HS_HIGH;
1658 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1659 intel_dp->DP |= DP_SYNC_VS_HIGH;
1660 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1661
Jani Nikula6aba5b62013-10-04 15:08:10 +03001662 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard417e8222011-11-01 19:54:11 -07001663 intel_dp->DP |= DP_ENHANCED_FRAMING;
1664
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001665 if (IS_CHERRYVIEW(dev))
Chon Ming Lee44f37d12014-04-09 13:28:21 +03001666 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001667 else if (crtc->pipe == PIPE_B)
1668 intel_dp->DP |= DP_PIPEB_SELECT;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001669 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001670}
1671
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001672#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1673#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001674
Paulo Zanoni1a5ef5b2013-12-19 14:29:43 -02001675#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1676#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
Keith Packard99ea7122011-11-01 19:57:50 -07001677
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001678#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1679#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001680
Daniel Vetter4be73782014-01-17 14:39:48 +01001681static void wait_panel_status(struct intel_dp *intel_dp,
Keith Packard99ea7122011-11-01 19:57:50 -07001682 u32 mask,
1683 u32 value)
1684{
Paulo Zanoni30add222012-10-26 19:05:45 -02001685 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001686 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07001687 u32 pp_stat_reg, pp_ctrl_reg;
1688
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001689 lockdep_assert_held(&dev_priv->pps_mutex);
1690
Jani Nikulabf13e812013-09-06 07:40:05 +03001691 pp_stat_reg = _pp_stat_reg(intel_dp);
1692 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001693
1694 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001695 mask, value,
1696 I915_READ(pp_stat_reg),
1697 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001698
Jesse Barnes453c5422013-03-28 09:55:41 -07001699 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
Keith Packard99ea7122011-11-01 19:57:50 -07001700 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001701 I915_READ(pp_stat_reg),
1702 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001703 }
Chris Wilson54c136d2013-12-02 09:57:16 +00001704
1705 DRM_DEBUG_KMS("Wait complete\n");
Keith Packard99ea7122011-11-01 19:57:50 -07001706}
1707
Daniel Vetter4be73782014-01-17 14:39:48 +01001708static void wait_panel_on(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001709{
1710 DRM_DEBUG_KMS("Wait for panel power on\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001711 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001712}
1713
Daniel Vetter4be73782014-01-17 14:39:48 +01001714static void wait_panel_off(struct intel_dp *intel_dp)
Keith Packardbd943152011-09-18 23:09:52 -07001715{
Keith Packardbd943152011-09-18 23:09:52 -07001716 DRM_DEBUG_KMS("Wait for panel power off time\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001717 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
Keith Packardbd943152011-09-18 23:09:52 -07001718}
Keith Packardbd943152011-09-18 23:09:52 -07001719
Daniel Vetter4be73782014-01-17 14:39:48 +01001720static void wait_panel_power_cycle(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001721{
1722 DRM_DEBUG_KMS("Wait for panel power cycle\n");
Paulo Zanonidce56b32013-12-19 14:29:40 -02001723
1724 /* When we disable the VDD override bit last we have to do the manual
1725 * wait. */
1726 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1727 intel_dp->panel_power_cycle_delay);
1728
Daniel Vetter4be73782014-01-17 14:39:48 +01001729 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001730}
Keith Packardbd943152011-09-18 23:09:52 -07001731
Daniel Vetter4be73782014-01-17 14:39:48 +01001732static void wait_backlight_on(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001733{
1734 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1735 intel_dp->backlight_on_delay);
1736}
1737
Daniel Vetter4be73782014-01-17 14:39:48 +01001738static void edp_wait_backlight_off(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001739{
1740 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1741 intel_dp->backlight_off_delay);
1742}
Keith Packard99ea7122011-11-01 19:57:50 -07001743
Keith Packard832dd3c2011-11-01 19:34:06 -07001744/* Read the current pp_control value, unlocking the register if it
1745 * is locked
1746 */
1747
Jesse Barnes453c5422013-03-28 09:55:41 -07001748static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
Keith Packard832dd3c2011-11-01 19:34:06 -07001749{
Jesse Barnes453c5422013-03-28 09:55:41 -07001750 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1751 struct drm_i915_private *dev_priv = dev->dev_private;
1752 u32 control;
Jesse Barnes453c5422013-03-28 09:55:41 -07001753
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001754 lockdep_assert_held(&dev_priv->pps_mutex);
1755
Jani Nikulabf13e812013-09-06 07:40:05 +03001756 control = I915_READ(_pp_ctrl_reg(intel_dp));
Vandana Kannanb0a08be2015-06-18 11:00:55 +05301757 if (!IS_BROXTON(dev)) {
1758 control &= ~PANEL_UNLOCK_MASK;
1759 control |= PANEL_UNLOCK_REGS;
1760 }
Keith Packard832dd3c2011-11-01 19:34:06 -07001761 return control;
Keith Packardbd943152011-09-18 23:09:52 -07001762}
1763
Ville Syrjälä951468f2014-09-04 14:55:31 +03001764/*
1765 * Must be paired with edp_panel_vdd_off().
1766 * Must hold pps_mutex around the whole on/off sequence.
1767 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1768 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +03001769static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001770{
Paulo Zanoni30add222012-10-26 19:05:45 -02001771 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001772 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1773 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Jesse Barnes5d613502011-01-24 17:10:54 -08001774 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02001775 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001776 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001777 u32 pp_stat_reg, pp_ctrl_reg;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001778 bool need_to_disable = !intel_dp->want_panel_vdd;
Jesse Barnes5d613502011-01-24 17:10:54 -08001779
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001780 lockdep_assert_held(&dev_priv->pps_mutex);
1781
Keith Packard97af61f572011-09-28 16:23:51 -07001782 if (!is_edp(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001783 return false;
Keith Packardbd943152011-09-18 23:09:52 -07001784
Egbert Eich2c623c12014-11-25 12:54:57 +01001785 cancel_delayed_work(&intel_dp->panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001786 intel_dp->want_panel_vdd = true;
Keith Packard99ea7122011-11-01 19:57:50 -07001787
Daniel Vetter4be73782014-01-17 14:39:48 +01001788 if (edp_have_panel_vdd(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001789 return need_to_disable;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001790
Imre Deak4e6e1a52014-03-27 17:45:11 +02001791 power_domain = intel_display_port_power_domain(intel_encoder);
1792 intel_display_power_get(dev_priv, power_domain);
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001793
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001794 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1795 port_name(intel_dig_port->port));
Keith Packardbd943152011-09-18 23:09:52 -07001796
Daniel Vetter4be73782014-01-17 14:39:48 +01001797 if (!edp_have_panel_power(intel_dp))
1798 wait_panel_power_cycle(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001799
Jesse Barnes453c5422013-03-28 09:55:41 -07001800 pp = ironlake_get_pp_control(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001801 pp |= EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -07001802
Jani Nikulabf13e812013-09-06 07:40:05 +03001803 pp_stat_reg = _pp_stat_reg(intel_dp);
1804 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001805
1806 I915_WRITE(pp_ctrl_reg, pp);
1807 POSTING_READ(pp_ctrl_reg);
1808 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1809 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Keith Packardebf33b12011-09-29 15:53:27 -07001810 /*
1811 * If the panel wasn't on, delay before accessing aux channel
1812 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001813 if (!edp_have_panel_power(intel_dp)) {
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001814 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1815 port_name(intel_dig_port->port));
Keith Packardf01eca22011-09-28 16:48:10 -07001816 msleep(intel_dp->panel_power_up_delay);
Keith Packardf01eca22011-09-28 16:48:10 -07001817 }
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001818
1819 return need_to_disable;
1820}
1821
Ville Syrjälä951468f2014-09-04 14:55:31 +03001822/*
1823 * Must be paired with intel_edp_panel_vdd_off() or
1824 * intel_edp_panel_off().
1825 * Nested calls to these functions are not allowed since
1826 * we drop the lock. Caller must use some higher level
1827 * locking to prevent nested calls from other threads.
1828 */
Daniel Vetterb80d6c72014-03-19 15:54:37 +01001829void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001830{
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001831 bool vdd;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001832
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001833 if (!is_edp(intel_dp))
1834 return;
1835
Ville Syrjälä773538e82014-09-04 14:54:56 +03001836 pps_lock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001837 vdd = edp_panel_vdd_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001838 pps_unlock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001839
Rob Clarke2c719b2014-12-15 13:56:32 -05001840 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001841 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes5d613502011-01-24 17:10:54 -08001842}
1843
Daniel Vetter4be73782014-01-17 14:39:48 +01001844static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001845{
Paulo Zanoni30add222012-10-26 19:05:45 -02001846 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001847 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001848 struct intel_digital_port *intel_dig_port =
1849 dp_to_dig_port(intel_dp);
1850 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1851 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001852 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001853 u32 pp_stat_reg, pp_ctrl_reg;
Jesse Barnes5d613502011-01-24 17:10:54 -08001854
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001855 lockdep_assert_held(&dev_priv->pps_mutex);
Daniel Vettera0e99e62012-12-02 01:05:46 +01001856
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001857 WARN_ON(intel_dp->want_panel_vdd);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001858
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001859 if (!edp_have_panel_vdd(intel_dp))
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001860 return;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001861
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001862 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1863 port_name(intel_dig_port->port));
Jesse Barnes453c5422013-03-28 09:55:41 -07001864
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001865 pp = ironlake_get_pp_control(intel_dp);
1866 pp &= ~EDP_FORCE_VDD;
Jesse Barnes453c5422013-03-28 09:55:41 -07001867
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001868 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1869 pp_stat_reg = _pp_stat_reg(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001870
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001871 I915_WRITE(pp_ctrl_reg, pp);
1872 POSTING_READ(pp_ctrl_reg);
Paulo Zanoni90791a52013-12-06 17:32:42 -02001873
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001874 /* Make sure sequencer is idle before allowing subsequent activity */
1875 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1876 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001877
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001878 if ((pp & POWER_TARGET_ON) == 0)
1879 intel_dp->last_power_cycle = jiffies;
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001880
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001881 power_domain = intel_display_port_power_domain(intel_encoder);
1882 intel_display_power_put(dev_priv, power_domain);
Keith Packardbd943152011-09-18 23:09:52 -07001883}
1884
Daniel Vetter4be73782014-01-17 14:39:48 +01001885static void edp_panel_vdd_work(struct work_struct *__work)
Keith Packardbd943152011-09-18 23:09:52 -07001886{
1887 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1888 struct intel_dp, panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001889
Ville Syrjälä773538e82014-09-04 14:54:56 +03001890 pps_lock(intel_dp);
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001891 if (!intel_dp->want_panel_vdd)
1892 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001893 pps_unlock(intel_dp);
Keith Packardbd943152011-09-18 23:09:52 -07001894}
1895
Imre Deakaba86892014-07-30 15:57:31 +03001896static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1897{
1898 unsigned long delay;
1899
1900 /*
1901 * Queue the timer to fire a long time from now (relative to the power
1902 * down delay) to keep the panel power up across a sequence of
1903 * operations.
1904 */
1905 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1906 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1907}
1908
Ville Syrjälä951468f2014-09-04 14:55:31 +03001909/*
1910 * Must be paired with edp_panel_vdd_on().
1911 * Must hold pps_mutex around the whole on/off sequence.
1912 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1913 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001914static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
Keith Packardbd943152011-09-18 23:09:52 -07001915{
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001916 struct drm_i915_private *dev_priv =
1917 intel_dp_to_dev(intel_dp)->dev_private;
1918
1919 lockdep_assert_held(&dev_priv->pps_mutex);
1920
Keith Packard97af61f572011-09-28 16:23:51 -07001921 if (!is_edp(intel_dp))
1922 return;
Jesse Barnes5d613502011-01-24 17:10:54 -08001923
Rob Clarke2c719b2014-12-15 13:56:32 -05001924 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001925 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packardf2e8b182011-11-01 20:01:35 -07001926
Keith Packardbd943152011-09-18 23:09:52 -07001927 intel_dp->want_panel_vdd = false;
1928
Imre Deakaba86892014-07-30 15:57:31 +03001929 if (sync)
Daniel Vetter4be73782014-01-17 14:39:48 +01001930 edp_panel_vdd_off_sync(intel_dp);
Imre Deakaba86892014-07-30 15:57:31 +03001931 else
1932 edp_panel_vdd_schedule_off(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001933}
1934
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001935static void edp_panel_on(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07001936{
Paulo Zanoni30add222012-10-26 19:05:45 -02001937 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001938 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packard99ea7122011-11-01 19:57:50 -07001939 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001940 u32 pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07001941
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001942 lockdep_assert_held(&dev_priv->pps_mutex);
1943
Keith Packard97af61f572011-09-28 16:23:51 -07001944 if (!is_edp(intel_dp))
Keith Packardbd943152011-09-18 23:09:52 -07001945 return;
Keith Packard99ea7122011-11-01 19:57:50 -07001946
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001947 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1948 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packard99ea7122011-11-01 19:57:50 -07001949
Ville Syrjäläe7a89ac2014-10-16 21:30:07 +03001950 if (WARN(edp_have_panel_power(intel_dp),
1951 "eDP port %c panel power already on\n",
1952 port_name(dp_to_dig_port(intel_dp)->port)))
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001953 return;
Jesse Barnes9934c132010-07-22 13:18:19 -07001954
Daniel Vetter4be73782014-01-17 14:39:48 +01001955 wait_panel_power_cycle(intel_dp);
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07001956
Jani Nikulabf13e812013-09-06 07:40:05 +03001957 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001958 pp = ironlake_get_pp_control(intel_dp);
Keith Packard05ce1a42011-09-29 16:33:01 -07001959 if (IS_GEN5(dev)) {
1960 /* ILK workaround: disable reset around power sequence */
1961 pp &= ~PANEL_POWER_RESET;
Jani Nikulabf13e812013-09-06 07:40:05 +03001962 I915_WRITE(pp_ctrl_reg, pp);
1963 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07001964 }
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07001965
Keith Packard1c0ae802011-09-19 13:59:29 -07001966 pp |= POWER_TARGET_ON;
Keith Packard99ea7122011-11-01 19:57:50 -07001967 if (!IS_GEN5(dev))
1968 pp |= PANEL_POWER_RESET;
1969
Jesse Barnes453c5422013-03-28 09:55:41 -07001970 I915_WRITE(pp_ctrl_reg, pp);
1971 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07001972
Daniel Vetter4be73782014-01-17 14:39:48 +01001973 wait_panel_on(intel_dp);
Paulo Zanonidce56b32013-12-19 14:29:40 -02001974 intel_dp->last_power_on = jiffies;
Jesse Barnes9934c132010-07-22 13:18:19 -07001975
Keith Packard05ce1a42011-09-29 16:33:01 -07001976 if (IS_GEN5(dev)) {
1977 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
Jani Nikulabf13e812013-09-06 07:40:05 +03001978 I915_WRITE(pp_ctrl_reg, pp);
1979 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07001980 }
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001981}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001982
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001983void intel_edp_panel_on(struct intel_dp *intel_dp)
1984{
1985 if (!is_edp(intel_dp))
1986 return;
1987
1988 pps_lock(intel_dp);
1989 edp_panel_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001990 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001991}
1992
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001993
1994static void edp_panel_off(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07001995{
Imre Deak4e6e1a52014-03-27 17:45:11 +02001996 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1997 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanoni30add222012-10-26 19:05:45 -02001998 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001999 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02002000 enum intel_display_power_domain power_domain;
Keith Packard99ea7122011-11-01 19:57:50 -07002001 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002002 u32 pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07002003
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002004 lockdep_assert_held(&dev_priv->pps_mutex);
2005
Keith Packard97af61f572011-09-28 16:23:51 -07002006 if (!is_edp(intel_dp))
2007 return;
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002008
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002009 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2010 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002011
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002012 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2013 port_name(dp_to_dig_port(intel_dp)->port));
Jani Nikula24f3e092014-03-17 16:43:36 +02002014
Jesse Barnes453c5422013-03-28 09:55:41 -07002015 pp = ironlake_get_pp_control(intel_dp);
Daniel Vetter35a38552012-08-12 22:17:14 +02002016 /* We need to switch off panel power _and_ force vdd, for otherwise some
2017 * panels get very unhappy and cease to work. */
Patrik Jakobssonb3064152014-03-04 00:42:44 +01002018 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2019 EDP_BLC_ENABLE);
Jesse Barnes453c5422013-03-28 09:55:41 -07002020
Jani Nikulabf13e812013-09-06 07:40:05 +03002021 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002022
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002023 intel_dp->want_panel_vdd = false;
2024
Jesse Barnes453c5422013-03-28 09:55:41 -07002025 I915_WRITE(pp_ctrl_reg, pp);
2026 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07002027
Paulo Zanonidce56b32013-12-19 14:29:40 -02002028 intel_dp->last_power_cycle = jiffies;
Daniel Vetter4be73782014-01-17 14:39:48 +01002029 wait_panel_off(intel_dp);
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002030
2031 /* We got a reference when we enabled the VDD. */
Imre Deak4e6e1a52014-03-27 17:45:11 +02002032 power_domain = intel_display_port_power_domain(intel_encoder);
2033 intel_display_power_put(dev_priv, power_domain);
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002034}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002035
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002036void intel_edp_panel_off(struct intel_dp *intel_dp)
2037{
2038 if (!is_edp(intel_dp))
2039 return;
2040
2041 pps_lock(intel_dp);
2042 edp_panel_off(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002043 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002044}
2045
Jani Nikula1250d102014-08-12 17:11:39 +03002046/* Enable backlight in the panel power control. */
2047static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002048{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002049 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2050 struct drm_device *dev = intel_dig_port->base.base.dev;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002051 struct drm_i915_private *dev_priv = dev->dev_private;
2052 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002053 u32 pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002054
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07002055 /*
2056 * If we enable the backlight right away following a panel power
2057 * on, we may see slight flicker as the panel syncs with the eDP
2058 * link. So delay a bit to make sure the image is solid before
2059 * allowing it to appear.
2060 */
Daniel Vetter4be73782014-01-17 14:39:48 +01002061 wait_backlight_on(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002062
Ville Syrjälä773538e82014-09-04 14:54:56 +03002063 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002064
Jesse Barnes453c5422013-03-28 09:55:41 -07002065 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002066 pp |= EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002067
Jani Nikulabf13e812013-09-06 07:40:05 +03002068 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002069
2070 I915_WRITE(pp_ctrl_reg, pp);
2071 POSTING_READ(pp_ctrl_reg);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002072
Ville Syrjälä773538e82014-09-04 14:54:56 +03002073 pps_unlock(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002074}
2075
Jani Nikula1250d102014-08-12 17:11:39 +03002076/* Enable backlight PWM and backlight PP control. */
2077void intel_edp_backlight_on(struct intel_dp *intel_dp)
2078{
2079 if (!is_edp(intel_dp))
2080 return;
2081
2082 DRM_DEBUG_KMS("\n");
2083
2084 intel_panel_enable_backlight(intel_dp->attached_connector);
2085 _intel_edp_backlight_on(intel_dp);
2086}
2087
2088/* Disable backlight in the panel power control. */
2089static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002090{
Paulo Zanoni30add222012-10-26 19:05:45 -02002091 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002092 struct drm_i915_private *dev_priv = dev->dev_private;
2093 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002094 u32 pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002095
Keith Packardf01eca22011-09-28 16:48:10 -07002096 if (!is_edp(intel_dp))
2097 return;
2098
Ville Syrjälä773538e82014-09-04 14:54:56 +03002099 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002100
Jesse Barnes453c5422013-03-28 09:55:41 -07002101 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002102 pp &= ~EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002103
Jani Nikulabf13e812013-09-06 07:40:05 +03002104 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002105
2106 I915_WRITE(pp_ctrl_reg, pp);
2107 POSTING_READ(pp_ctrl_reg);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002108
Ville Syrjälä773538e82014-09-04 14:54:56 +03002109 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002110
Paulo Zanonidce56b32013-12-19 14:29:40 -02002111 intel_dp->last_backlight_off = jiffies;
Jesse Barnesf7d23232014-03-31 11:13:56 -07002112 edp_wait_backlight_off(intel_dp);
Jani Nikula1250d102014-08-12 17:11:39 +03002113}
Jesse Barnesf7d23232014-03-31 11:13:56 -07002114
Jani Nikula1250d102014-08-12 17:11:39 +03002115/* Disable backlight PP control and backlight PWM. */
2116void intel_edp_backlight_off(struct intel_dp *intel_dp)
2117{
2118 if (!is_edp(intel_dp))
2119 return;
2120
2121 DRM_DEBUG_KMS("\n");
2122
2123 _intel_edp_backlight_off(intel_dp);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002124 intel_panel_disable_backlight(intel_dp->attached_connector);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002125}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002126
Jani Nikula73580fb72014-08-12 17:11:41 +03002127/*
2128 * Hook for controlling the panel power control backlight through the bl_power
2129 * sysfs attribute. Take care to handle multiple calls.
2130 */
2131static void intel_edp_backlight_power(struct intel_connector *connector,
2132 bool enable)
2133{
2134 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002135 bool is_enabled;
2136
Ville Syrjälä773538e82014-09-04 14:54:56 +03002137 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002138 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002139 pps_unlock(intel_dp);
Jani Nikula73580fb72014-08-12 17:11:41 +03002140
2141 if (is_enabled == enable)
2142 return;
2143
Jani Nikula23ba9372014-08-27 14:08:43 +03002144 DRM_DEBUG_KMS("panel power control backlight %s\n",
2145 enable ? "enable" : "disable");
Jani Nikula73580fb72014-08-12 17:11:41 +03002146
2147 if (enable)
2148 _intel_edp_backlight_on(intel_dp);
2149 else
2150 _intel_edp_backlight_off(intel_dp);
2151}
2152
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002153static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002154{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002155 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2156 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2157 struct drm_device *dev = crtc->dev;
Jesse Barnesd240f202010-08-13 15:43:26 -07002158 struct drm_i915_private *dev_priv = dev->dev_private;
2159 u32 dpa_ctl;
2160
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002161 assert_pipe_disabled(dev_priv,
2162 to_intel_crtc(crtc)->pipe);
2163
Jesse Barnesd240f202010-08-13 15:43:26 -07002164 DRM_DEBUG_KMS("\n");
2165 dpa_ctl = I915_READ(DP_A);
Daniel Vetter07679352012-09-06 22:15:42 +02002166 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2167 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2168
2169 /* We don't adjust intel_dp->DP while tearing down the link, to
2170 * facilitate link retraining (e.g. after hotplug). Hence clear all
2171 * enable bits here to ensure that we don't enable too much. */
2172 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2173 intel_dp->DP |= DP_PLL_ENABLE;
2174 I915_WRITE(DP_A, intel_dp->DP);
Jesse Barnes298b0b32010-10-07 16:01:24 -07002175 POSTING_READ(DP_A);
2176 udelay(200);
Jesse Barnesd240f202010-08-13 15:43:26 -07002177}
2178
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002179static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002180{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002181 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2182 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2183 struct drm_device *dev = crtc->dev;
Jesse Barnesd240f202010-08-13 15:43:26 -07002184 struct drm_i915_private *dev_priv = dev->dev_private;
2185 u32 dpa_ctl;
2186
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002187 assert_pipe_disabled(dev_priv,
2188 to_intel_crtc(crtc)->pipe);
2189
Jesse Barnesd240f202010-08-13 15:43:26 -07002190 dpa_ctl = I915_READ(DP_A);
Daniel Vetter07679352012-09-06 22:15:42 +02002191 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2192 "dp pll off, should be on\n");
2193 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2194
2195 /* We can't rely on the value tracked for the DP register in
2196 * intel_dp->DP because link_down must not change that (otherwise link
2197 * re-training will fail. */
Jesse Barnes298b0b32010-10-07 16:01:24 -07002198 dpa_ctl &= ~DP_PLL_ENABLE;
Jesse Barnesd240f202010-08-13 15:43:26 -07002199 I915_WRITE(DP_A, dpa_ctl);
Chris Wilson1af5fa12010-09-08 21:07:28 +01002200 POSTING_READ(DP_A);
Jesse Barnesd240f202010-08-13 15:43:26 -07002201 udelay(200);
2202}
2203
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002204/* If the sink supports it, try to set the power state appropriately */
Paulo Zanonic19b0662012-10-15 15:51:41 -03002205void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002206{
2207 int ret, i;
2208
2209 /* Should have a valid DPCD by this point */
2210 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2211 return;
2212
2213 if (mode != DRM_MODE_DPMS_ON) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002214 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2215 DP_SET_POWER_D3);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002216 } else {
2217 /*
2218 * When turning on, we need to retry for 1ms to give the sink
2219 * time to wake up.
2220 */
2221 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002222 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2223 DP_SET_POWER_D0);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002224 if (ret == 1)
2225 break;
2226 msleep(1);
2227 }
2228 }
Jani Nikulaf9cac722014-09-02 16:33:52 +03002229
2230 if (ret != 1)
2231 DRM_DEBUG_KMS("failed to %s sink power state\n",
2232 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002233}
2234
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002235static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2236 enum pipe *pipe)
Jesse Barnesd240f202010-08-13 15:43:26 -07002237{
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002238 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03002239 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002240 struct drm_device *dev = encoder->base.dev;
2241 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak6d129be2014-03-05 16:20:54 +02002242 enum intel_display_power_domain power_domain;
2243 u32 tmp;
2244
2245 power_domain = intel_display_port_power_domain(encoder);
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002246 if (!intel_display_power_is_enabled(dev_priv, power_domain))
Imre Deak6d129be2014-03-05 16:20:54 +02002247 return false;
2248
2249 tmp = I915_READ(intel_dp->output_reg);
Jesse Barnesd240f202010-08-13 15:43:26 -07002250
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002251 if (!(tmp & DP_PORT_EN))
2252 return false;
2253
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002254 if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002255 *pipe = PORT_TO_PIPE_CPT(tmp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002256 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002257 enum pipe p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002258
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002259 for_each_pipe(dev_priv, p) {
2260 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2261 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2262 *pipe = p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002263 return true;
2264 }
2265 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002266
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002267 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2268 intel_dp->output_reg);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002269 } else if (IS_CHERRYVIEW(dev)) {
2270 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2271 } else {
2272 *pipe = PORT_TO_PIPE(tmp);
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002273 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002274
2275 return true;
2276}
2277
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002278static void intel_dp_get_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02002279 struct intel_crtc_state *pipe_config)
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002280{
2281 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002282 u32 tmp, flags = 0;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002283 struct drm_device *dev = encoder->base.dev;
2284 struct drm_i915_private *dev_priv = dev->dev_private;
2285 enum port port = dp_to_dig_port(intel_dp)->port;
2286 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjälä18442d02013-09-13 16:00:08 +03002287 int dotclock;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002288
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002289 tmp = I915_READ(intel_dp->output_reg);
Jani Nikula9fcb1702015-05-05 16:32:12 +03002290
2291 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002292
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002293 if (HAS_PCH_CPT(dev) && port != PORT_A) {
Xiong Zhang63000ef2013-06-28 12:59:06 +08002294 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2295 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2296 flags |= DRM_MODE_FLAG_PHSYNC;
2297 else
2298 flags |= DRM_MODE_FLAG_NHSYNC;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002299
Xiong Zhang63000ef2013-06-28 12:59:06 +08002300 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2301 flags |= DRM_MODE_FLAG_PVSYNC;
2302 else
2303 flags |= DRM_MODE_FLAG_NVSYNC;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002304 } else {
2305 if (tmp & DP_SYNC_HS_HIGH)
2306 flags |= DRM_MODE_FLAG_PHSYNC;
2307 else
2308 flags |= DRM_MODE_FLAG_NHSYNC;
2309
2310 if (tmp & DP_SYNC_VS_HIGH)
2311 flags |= DRM_MODE_FLAG_PVSYNC;
2312 else
2313 flags |= DRM_MODE_FLAG_NVSYNC;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002314 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002315
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002316 pipe_config->base.adjusted_mode.flags |= flags;
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002317
Ville Syrjälä8c875fc2014-09-12 15:46:29 +03002318 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2319 tmp & DP_COLOR_RANGE_16_235)
2320 pipe_config->limited_color_range = true;
2321
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002322 pipe_config->has_dp_encoder = true;
2323
2324 intel_dp_get_m_n(crtc, pipe_config);
2325
Ville Syrjälä18442d02013-09-13 16:00:08 +03002326 if (port == PORT_A) {
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002327 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2328 pipe_config->port_clock = 162000;
2329 else
2330 pipe_config->port_clock = 270000;
2331 }
Ville Syrjälä18442d02013-09-13 16:00:08 +03002332
2333 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2334 &pipe_config->dp_m_n);
2335
2336 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2337 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2338
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002339 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
Daniel Vetter7f16e5c2013-11-04 16:28:47 +01002340
Jani Nikulac6cd2ee2013-10-21 10:52:07 +03002341 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2342 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2343 /*
2344 * This is a big fat ugly hack.
2345 *
2346 * Some machines in UEFI boot mode provide us a VBT that has 18
2347 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2348 * unknown we fail to light up. Yet the same BIOS boots up with
2349 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2350 * max, not what it tells us to use.
2351 *
2352 * Note: This will still be broken if the eDP panel is not lit
2353 * up by the BIOS, and thus we can't get the mode at module
2354 * load.
2355 */
2356 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2357 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2358 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2359 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002360}
2361
Daniel Vettere8cb4552012-07-01 13:05:48 +02002362static void intel_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002363{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002364 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002365 struct drm_device *dev = encoder->base.dev;
Jani Nikula495a5bb2014-10-27 16:26:55 +02002366 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2367
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002368 if (crtc->config->has_audio)
Jani Nikula495a5bb2014-10-27 16:26:55 +02002369 intel_audio_codec_disable(encoder);
Daniel Vetter6cb49832012-05-20 17:14:50 +02002370
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002371 if (HAS_PSR(dev) && !HAS_DDI(dev))
2372 intel_psr_disable(intel_dp);
2373
Daniel Vetter6cb49832012-05-20 17:14:50 +02002374 /* Make sure the panel is off before trying to change the mode. But also
2375 * ensure that we have vdd while we switch off the panel. */
Jani Nikula24f3e092014-03-17 16:43:36 +02002376 intel_edp_panel_vdd_on(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01002377 intel_edp_backlight_off(intel_dp);
Jani Nikulafdbc3b12013-11-12 17:10:13 +02002378 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
Daniel Vetter4be73782014-01-17 14:39:48 +01002379 intel_edp_panel_off(intel_dp);
Daniel Vetter37398502012-09-06 22:15:44 +02002380
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002381 /* disable the port before the pipe on g4x */
2382 if (INTEL_INFO(dev)->gen < 5)
Daniel Vetter37398502012-09-06 22:15:44 +02002383 intel_dp_link_down(intel_dp);
Jesse Barnesd240f202010-08-13 15:43:26 -07002384}
2385
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002386static void ilk_post_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002387{
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002388 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002389 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002390
Ville Syrjälä49277c32014-03-31 18:21:26 +03002391 intel_dp_link_down(intel_dp);
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002392 if (port == PORT_A)
2393 ironlake_edp_pll_off(intel_dp);
Ville Syrjälä49277c32014-03-31 18:21:26 +03002394}
2395
2396static void vlv_post_disable_dp(struct intel_encoder *encoder)
2397{
2398 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2399
2400 intel_dp_link_down(intel_dp);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002401}
2402
Ville Syrjälä580d3812014-04-09 13:29:00 +03002403static void chv_post_disable_dp(struct intel_encoder *encoder)
2404{
2405 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2406 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2407 struct drm_device *dev = encoder->base.dev;
2408 struct drm_i915_private *dev_priv = dev->dev_private;
2409 struct intel_crtc *intel_crtc =
2410 to_intel_crtc(encoder->base.crtc);
2411 enum dpio_channel ch = vlv_dport_to_channel(dport);
2412 enum pipe pipe = intel_crtc->pipe;
2413 u32 val;
2414
2415 intel_dp_link_down(intel_dp);
2416
Ville Syrjäläa5805162015-05-26 20:42:30 +03002417 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002418
2419 /* Propagate soft reset to data lane reset */
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002420 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
Ville Syrjäläd2152b22014-04-28 14:15:24 +03002421 val |= CHV_PCS_REQ_SOFTRESET_EN;
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002422 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
Ville Syrjäläd2152b22014-04-28 14:15:24 +03002423
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002424 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2425 val |= CHV_PCS_REQ_SOFTRESET_EN;
2426 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2427
2428 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
Ville Syrjälä580d3812014-04-09 13:29:00 +03002429 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002430 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2431
2432 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2433 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2434 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002435
Ville Syrjäläa5805162015-05-26 20:42:30 +03002436 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002437}
2438
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002439static void
2440_intel_dp_set_link_train(struct intel_dp *intel_dp,
2441 uint32_t *DP,
2442 uint8_t dp_train_pat)
2443{
2444 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2445 struct drm_device *dev = intel_dig_port->base.base.dev;
2446 struct drm_i915_private *dev_priv = dev->dev_private;
2447 enum port port = intel_dig_port->port;
2448
2449 if (HAS_DDI(dev)) {
2450 uint32_t temp = I915_READ(DP_TP_CTL(port));
2451
2452 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2453 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2454 else
2455 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2456
2457 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2458 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2459 case DP_TRAINING_PATTERN_DISABLE:
2460 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2461
2462 break;
2463 case DP_TRAINING_PATTERN_1:
2464 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2465 break;
2466 case DP_TRAINING_PATTERN_2:
2467 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2468 break;
2469 case DP_TRAINING_PATTERN_3:
2470 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2471 break;
2472 }
2473 I915_WRITE(DP_TP_CTL(port), temp);
2474
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002475 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2476 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002477 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2478
2479 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2480 case DP_TRAINING_PATTERN_DISABLE:
2481 *DP |= DP_LINK_TRAIN_OFF_CPT;
2482 break;
2483 case DP_TRAINING_PATTERN_1:
2484 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2485 break;
2486 case DP_TRAINING_PATTERN_2:
2487 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2488 break;
2489 case DP_TRAINING_PATTERN_3:
2490 DRM_ERROR("DP training pattern 3 not supported\n");
2491 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2492 break;
2493 }
2494
2495 } else {
2496 if (IS_CHERRYVIEW(dev))
2497 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2498 else
2499 *DP &= ~DP_LINK_TRAIN_MASK;
2500
2501 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2502 case DP_TRAINING_PATTERN_DISABLE:
2503 *DP |= DP_LINK_TRAIN_OFF;
2504 break;
2505 case DP_TRAINING_PATTERN_1:
2506 *DP |= DP_LINK_TRAIN_PAT_1;
2507 break;
2508 case DP_TRAINING_PATTERN_2:
2509 *DP |= DP_LINK_TRAIN_PAT_2;
2510 break;
2511 case DP_TRAINING_PATTERN_3:
2512 if (IS_CHERRYVIEW(dev)) {
2513 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2514 } else {
2515 DRM_ERROR("DP training pattern 3 not supported\n");
2516 *DP |= DP_LINK_TRAIN_PAT_2;
2517 }
2518 break;
2519 }
2520 }
2521}
2522
2523static void intel_dp_enable_port(struct intel_dp *intel_dp)
2524{
2525 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2526 struct drm_i915_private *dev_priv = dev->dev_private;
2527
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002528 /* enable with pattern 1 (as per spec) */
2529 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2530 DP_TRAINING_PATTERN_1);
2531
2532 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2533 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b713f52014-10-16 21:27:35 +03002534
2535 /*
2536 * Magic for VLV/CHV. We _must_ first set up the register
2537 * without actually enabling the port, and then do another
2538 * write to enable the port. Otherwise link training will
2539 * fail when the power sequencer is freshly used for this port.
2540 */
2541 intel_dp->DP |= DP_PORT_EN;
2542
2543 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2544 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002545}
2546
Daniel Vettere8cb4552012-07-01 13:05:48 +02002547static void intel_enable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002548{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002549 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2550 struct drm_device *dev = encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002551 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulac1dec792014-10-27 16:26:56 +02002552 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Chris Wilsonea5b2132010-08-04 13:50:23 +01002553 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03002554 unsigned int lane_mask = 0x0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002555
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02002556 if (WARN_ON(dp_reg & DP_PORT_EN))
2557 return;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002558
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002559 pps_lock(intel_dp);
2560
2561 if (IS_VALLEYVIEW(dev))
2562 vlv_init_panel_power_sequencer(intel_dp);
2563
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002564 intel_dp_enable_port(intel_dp);
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002565
2566 edp_panel_vdd_on(intel_dp);
2567 edp_panel_on(intel_dp);
2568 edp_panel_vdd_off(intel_dp, true);
2569
2570 pps_unlock(intel_dp);
2571
Ville Syrjälä61234fa2014-10-16 21:27:34 +03002572 if (IS_VALLEYVIEW(dev))
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03002573 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2574 lane_mask);
Ville Syrjälä61234fa2014-10-16 21:27:34 +03002575
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002576 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2577 intel_dp_start_link_train(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002578 intel_dp_complete_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03002579 intel_dp_stop_link_train(intel_dp);
Jani Nikulac1dec792014-10-27 16:26:56 +02002580
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002581 if (crtc->config->has_audio) {
Jani Nikulac1dec792014-10-27 16:26:56 +02002582 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2583 pipe_name(crtc->pipe));
2584 intel_audio_codec_enable(encoder);
2585 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002586}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002587
Jani Nikulaecff4f32013-09-06 07:38:29 +03002588static void g4x_enable_dp(struct intel_encoder *encoder)
2589{
Jani Nikula828f5c62013-09-05 16:44:45 +03002590 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2591
Jani Nikulaecff4f32013-09-06 07:38:29 +03002592 intel_enable_dp(encoder);
Daniel Vetter4be73782014-01-17 14:39:48 +01002593 intel_edp_backlight_on(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002594}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002595
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002596static void vlv_enable_dp(struct intel_encoder *encoder)
2597{
Jani Nikula828f5c62013-09-05 16:44:45 +03002598 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2599
Daniel Vetter4be73782014-01-17 14:39:48 +01002600 intel_edp_backlight_on(intel_dp);
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002601 intel_psr_enable(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002602}
2603
Jani Nikulaecff4f32013-09-06 07:38:29 +03002604static void g4x_pre_enable_dp(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002605{
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002606 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03002607 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002608
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002609 intel_dp_prepare(encoder);
2610
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002611 /* Only ilk+ has port A */
2612 if (dport->port == PORT_A) {
2613 ironlake_set_pll_cpu_edp(intel_dp);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002614 ironlake_edp_pll_on(intel_dp);
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002615 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002616}
2617
Ville Syrjälä83b84592014-10-16 21:29:51 +03002618static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2619{
2620 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2621 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2622 enum pipe pipe = intel_dp->pps_pipe;
2623 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2624
2625 edp_panel_vdd_off_sync(intel_dp);
2626
2627 /*
2628 * VLV seems to get confused when multiple power seqeuencers
2629 * have the same port selected (even if only one has power/vdd
2630 * enabled). The failure manifests as vlv_wait_port_ready() failing
2631 * CHV on the other hand doesn't seem to mind having the same port
2632 * selected in multiple power seqeuencers, but let's clear the
2633 * port select always when logically disconnecting a power sequencer
2634 * from a port.
2635 */
2636 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2637 pipe_name(pipe), port_name(intel_dig_port->port));
2638 I915_WRITE(pp_on_reg, 0);
2639 POSTING_READ(pp_on_reg);
2640
2641 intel_dp->pps_pipe = INVALID_PIPE;
2642}
2643
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002644static void vlv_steal_power_sequencer(struct drm_device *dev,
2645 enum pipe pipe)
2646{
2647 struct drm_i915_private *dev_priv = dev->dev_private;
2648 struct intel_encoder *encoder;
2649
2650 lockdep_assert_held(&dev_priv->pps_mutex);
2651
Ville Syrjäläac3c12e2014-10-16 21:29:56 +03002652 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2653 return;
2654
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002655 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2656 base.head) {
2657 struct intel_dp *intel_dp;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002658 enum port port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002659
2660 if (encoder->type != INTEL_OUTPUT_EDP)
2661 continue;
2662
2663 intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002664 port = dp_to_dig_port(intel_dp)->port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002665
2666 if (intel_dp->pps_pipe != pipe)
2667 continue;
2668
2669 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
Ville Syrjälä773538e82014-09-04 14:54:56 +03002670 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002671
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02002672 WARN(encoder->base.crtc,
Ville Syrjälä034e43c2014-10-16 21:27:28 +03002673 "stealing pipe %c power sequencer from active eDP port %c\n",
2674 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002675
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002676 /* make sure vdd is off before we steal it */
Ville Syrjälä83b84592014-10-16 21:29:51 +03002677 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002678 }
2679}
2680
2681static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2682{
2683 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2684 struct intel_encoder *encoder = &intel_dig_port->base;
2685 struct drm_device *dev = encoder->base.dev;
2686 struct drm_i915_private *dev_priv = dev->dev_private;
2687 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002688
2689 lockdep_assert_held(&dev_priv->pps_mutex);
2690
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002691 if (!is_edp(intel_dp))
2692 return;
2693
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002694 if (intel_dp->pps_pipe == crtc->pipe)
2695 return;
2696
2697 /*
2698 * If another power sequencer was being used on this
2699 * port previously make sure to turn off vdd there while
2700 * we still have control of it.
2701 */
2702 if (intel_dp->pps_pipe != INVALID_PIPE)
Ville Syrjälä83b84592014-10-16 21:29:51 +03002703 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002704
2705 /*
2706 * We may be stealing the power
2707 * sequencer from another port.
2708 */
2709 vlv_steal_power_sequencer(dev, crtc->pipe);
2710
2711 /* now it's all ours */
2712 intel_dp->pps_pipe = crtc->pipe;
2713
2714 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2715 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2716
2717 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03002718 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2719 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002720}
2721
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002722static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2723{
2724 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2725 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Jesse Barnesb2634012013-03-28 09:55:40 -07002726 struct drm_device *dev = encoder->base.dev;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002727 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002728 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002729 enum dpio_channel port = vlv_dport_to_channel(dport);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002730 int pipe = intel_crtc->pipe;
2731 u32 val;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002732
Ville Syrjäläa5805162015-05-26 20:42:30 +03002733 mutex_lock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002734
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002735 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002736 val = 0;
2737 if (pipe)
2738 val |= (1<<21);
2739 else
2740 val &= ~(1<<21);
2741 val |= 0x001000c4;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002742 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2743 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2744 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002745
Ville Syrjäläa5805162015-05-26 20:42:30 +03002746 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002747
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002748 intel_enable_dp(encoder);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002749}
2750
Jani Nikulaecff4f32013-09-06 07:38:29 +03002751static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
Jesse Barnes89b667f2013-04-18 14:51:36 -07002752{
2753 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2754 struct drm_device *dev = encoder->base.dev;
2755 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002756 struct intel_crtc *intel_crtc =
2757 to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002758 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002759 int pipe = intel_crtc->pipe;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002760
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002761 intel_dp_prepare(encoder);
2762
Jesse Barnes89b667f2013-04-18 14:51:36 -07002763 /* Program Tx lane resets to default */
Ville Syrjäläa5805162015-05-26 20:42:30 +03002764 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002765 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002766 DPIO_PCS_TX_LANE2_RESET |
2767 DPIO_PCS_TX_LANE1_RESET);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002768 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002769 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2770 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2771 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2772 DPIO_PCS_CLK_SOFT_RESET);
2773
2774 /* Fix up inter-pair skew failure */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002775 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2776 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2777 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03002778 mutex_unlock(&dev_priv->sb_lock);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002779}
2780
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002781static void chv_pre_enable_dp(struct intel_encoder *encoder)
2782{
2783 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2784 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2785 struct drm_device *dev = encoder->base.dev;
2786 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002787 struct intel_crtc *intel_crtc =
2788 to_intel_crtc(encoder->base.crtc);
2789 enum dpio_channel ch = vlv_dport_to_channel(dport);
2790 int pipe = intel_crtc->pipe;
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002791 int data, i, stagger;
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002792 u32 val;
2793
Ville Syrjäläa5805162015-05-26 20:42:30 +03002794 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002795
Ville Syrjälä570e2a72014-08-18 14:42:46 +03002796 /* allow hardware to manage TX FIFO reset source */
2797 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2798 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2799 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2800
2801 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2802 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2803 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2804
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002805 /* Deassert soft data lane reset*/
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002806 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
Ville Syrjäläd2152b22014-04-28 14:15:24 +03002807 val |= CHV_PCS_REQ_SOFTRESET_EN;
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002808 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
Ville Syrjäläd2152b22014-04-28 14:15:24 +03002809
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002810 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2811 val |= CHV_PCS_REQ_SOFTRESET_EN;
2812 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2813
2814 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002815 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002816 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2817
2818 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2819 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2820 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002821
2822 /* Program Tx lane latency optimal setting*/
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002823 for (i = 0; i < 4; i++) {
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002824 /* Set the upar bit */
2825 data = (i == 1) ? 0x0 : 0x1;
2826 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2827 data << DPIO_UPAR_SHIFT);
2828 }
2829
2830 /* Data lane stagger programming */
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002831 if (intel_crtc->config->port_clock > 270000)
2832 stagger = 0x18;
2833 else if (intel_crtc->config->port_clock > 135000)
2834 stagger = 0xd;
2835 else if (intel_crtc->config->port_clock > 67500)
2836 stagger = 0x7;
2837 else if (intel_crtc->config->port_clock > 33750)
2838 stagger = 0x4;
2839 else
2840 stagger = 0x2;
2841
2842 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2843 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2844 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2845
2846 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2847 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2848 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2849
2850 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2851 DPIO_LANESTAGGER_STRAP(stagger) |
2852 DPIO_LANESTAGGER_STRAP_OVRD |
2853 DPIO_TX1_STAGGER_MASK(0x1f) |
2854 DPIO_TX1_STAGGER_MULT(6) |
2855 DPIO_TX2_STAGGER_MULT(0));
2856
2857 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2858 DPIO_LANESTAGGER_STRAP(stagger) |
2859 DPIO_LANESTAGGER_STRAP_OVRD |
2860 DPIO_TX1_STAGGER_MASK(0x1f) |
2861 DPIO_TX1_STAGGER_MULT(7) |
2862 DPIO_TX2_STAGGER_MULT(5));
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002863
Ville Syrjäläa5805162015-05-26 20:42:30 +03002864 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002865
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002866 intel_enable_dp(encoder);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002867}
2868
Ville Syrjälä9197c882014-04-09 13:29:05 +03002869static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2870{
2871 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2872 struct drm_device *dev = encoder->base.dev;
2873 struct drm_i915_private *dev_priv = dev->dev_private;
2874 struct intel_crtc *intel_crtc =
2875 to_intel_crtc(encoder->base.crtc);
2876 enum dpio_channel ch = vlv_dport_to_channel(dport);
2877 enum pipe pipe = intel_crtc->pipe;
2878 u32 val;
2879
Ville Syrjälä625695f2014-06-28 02:04:02 +03002880 intel_dp_prepare(encoder);
2881
Ville Syrjäläa5805162015-05-26 20:42:30 +03002882 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002883
Ville Syrjäläb9e5ac32014-05-27 16:30:18 +03002884 /* program left/right clock distribution */
2885 if (pipe != PIPE_B) {
2886 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2887 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2888 if (ch == DPIO_CH0)
2889 val |= CHV_BUFLEFTENA1_FORCE;
2890 if (ch == DPIO_CH1)
2891 val |= CHV_BUFRIGHTENA1_FORCE;
2892 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2893 } else {
2894 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2895 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2896 if (ch == DPIO_CH0)
2897 val |= CHV_BUFLEFTENA2_FORCE;
2898 if (ch == DPIO_CH1)
2899 val |= CHV_BUFRIGHTENA2_FORCE;
2900 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2901 }
2902
Ville Syrjälä9197c882014-04-09 13:29:05 +03002903 /* program clock channel usage */
2904 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2905 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2906 if (pipe != PIPE_B)
2907 val &= ~CHV_PCS_USEDCLKCHANNEL;
2908 else
2909 val |= CHV_PCS_USEDCLKCHANNEL;
2910 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2911
2912 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2913 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2914 if (pipe != PIPE_B)
2915 val &= ~CHV_PCS_USEDCLKCHANNEL;
2916 else
2917 val |= CHV_PCS_USEDCLKCHANNEL;
2918 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2919
2920 /*
2921 * This a a bit weird since generally CL
2922 * matches the pipe, but here we need to
2923 * pick the CL based on the port.
2924 */
2925 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2926 if (pipe != PIPE_B)
2927 val &= ~CHV_CMN_USEDCLKCHANNEL;
2928 else
2929 val |= CHV_CMN_USEDCLKCHANNEL;
2930 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2931
Ville Syrjäläa5805162015-05-26 20:42:30 +03002932 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002933}
2934
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002935/*
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002936 * Native read with retry for link status and receiver capability reads for
2937 * cases where the sink may still be asleep.
Jani Nikula9d1a1032014-03-14 16:51:15 +02002938 *
2939 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2940 * supposed to retry 3 times per the spec.
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002941 */
Jani Nikula9d1a1032014-03-14 16:51:15 +02002942static ssize_t
2943intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2944 void *buffer, size_t size)
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002945{
Jani Nikula9d1a1032014-03-14 16:51:15 +02002946 ssize_t ret;
2947 int i;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002948
Ville Syrjäläf6a19062014-10-16 20:46:09 +03002949 /*
2950 * Sometime we just get the same incorrect byte repeated
2951 * over the entire buffer. Doing just one throw away read
2952 * initially seems to "solve" it.
2953 */
2954 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2955
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002956 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002957 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2958 if (ret == size)
2959 return ret;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002960 msleep(1);
2961 }
2962
Jani Nikula9d1a1032014-03-14 16:51:15 +02002963 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002964}
2965
2966/*
2967 * Fetch AUX CH registers 0x202 - 0x207 which contain
2968 * link status information
2969 */
2970static bool
Keith Packard93f62da2011-11-01 19:45:03 -07002971intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002972{
Jani Nikula9d1a1032014-03-14 16:51:15 +02002973 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2974 DP_LANE0_1_STATUS,
2975 link_status,
2976 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002977}
2978
Paulo Zanoni11002442014-06-13 18:45:41 -03002979/* These are source-specific values. */
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002980static uint8_t
Keith Packard1a2eb462011-11-16 16:26:07 -08002981intel_dp_voltage_max(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002982{
Paulo Zanoni30add222012-10-26 19:05:45 -02002983 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Sonika Jindal7ad14a22015-02-25 10:29:12 +05302984 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deakbc7d38a2013-05-16 14:40:36 +03002985 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08002986
Vandana Kannan93147262014-11-18 15:45:29 +05302987 if (IS_BROXTON(dev))
2988 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2989 else if (INTEL_INFO(dev)->gen >= 9) {
Sonika Jindal9e458032015-05-06 17:35:48 +05302990 if (dev_priv->edp_low_vswing && port == PORT_A)
Sonika Jindal7ad14a22015-02-25 10:29:12 +05302991 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00002992 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05302993 } else if (IS_VALLEYVIEW(dev))
Sonika Jindalbd600182014-08-08 16:23:41 +05302994 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Imre Deakbc7d38a2013-05-16 14:40:36 +03002995 else if (IS_GEN7(dev) && port == PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05302996 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Imre Deakbc7d38a2013-05-16 14:40:36 +03002997 else if (HAS_PCH_CPT(dev) && port != PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05302998 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Keith Packard1a2eb462011-11-16 16:26:07 -08002999 else
Sonika Jindalbd600182014-08-08 16:23:41 +05303000 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Keith Packard1a2eb462011-11-16 16:26:07 -08003001}
3002
3003static uint8_t
3004intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3005{
Paulo Zanoni30add222012-10-26 19:05:45 -02003006 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003007 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003008
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003009 if (INTEL_INFO(dev)->gen >= 9) {
3010 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3011 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3012 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3013 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3014 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3015 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3016 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303017 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3018 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003019 default:
3020 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3021 }
3022 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003023 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303024 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3025 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3026 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3027 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3028 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3029 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3030 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003031 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303032 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003033 }
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003034 } else if (IS_VALLEYVIEW(dev)) {
3035 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303036 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3037 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3038 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3039 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3040 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3041 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3042 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003043 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303044 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003045 }
Imre Deakbc7d38a2013-05-16 14:40:36 +03003046 } else if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08003047 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303048 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3049 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3050 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3051 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3052 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Keith Packard1a2eb462011-11-16 16:26:07 -08003053 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303054 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003055 }
3056 } else {
3057 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303058 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3059 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3060 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3061 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3062 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3063 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3064 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packard1a2eb462011-11-16 16:26:07 -08003065 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303066 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003067 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003068 }
3069}
3070
Daniel Vetter5829975c2015-04-16 11:36:52 +02003071static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003072{
3073 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3074 struct drm_i915_private *dev_priv = dev->dev_private;
3075 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003076 struct intel_crtc *intel_crtc =
3077 to_intel_crtc(dport->base.base.crtc);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003078 unsigned long demph_reg_value, preemph_reg_value,
3079 uniqtranscale_reg_value;
3080 uint8_t train_set = intel_dp->train_set[0];
Chon Ming Leee4607fc2013-11-06 14:36:35 +08003081 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003082 int pipe = intel_crtc->pipe;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003083
3084 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303085 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003086 preemph_reg_value = 0x0004000;
3087 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303088 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003089 demph_reg_value = 0x2B405555;
3090 uniqtranscale_reg_value = 0x552AB83A;
3091 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303092 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003093 demph_reg_value = 0x2B404040;
3094 uniqtranscale_reg_value = 0x5548B83A;
3095 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303096 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003097 demph_reg_value = 0x2B245555;
3098 uniqtranscale_reg_value = 0x5560B83A;
3099 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303100 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003101 demph_reg_value = 0x2B405555;
3102 uniqtranscale_reg_value = 0x5598DA3A;
3103 break;
3104 default:
3105 return 0;
3106 }
3107 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303108 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003109 preemph_reg_value = 0x0002000;
3110 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303111 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003112 demph_reg_value = 0x2B404040;
3113 uniqtranscale_reg_value = 0x5552B83A;
3114 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303115 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003116 demph_reg_value = 0x2B404848;
3117 uniqtranscale_reg_value = 0x5580B83A;
3118 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303119 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003120 demph_reg_value = 0x2B404040;
3121 uniqtranscale_reg_value = 0x55ADDA3A;
3122 break;
3123 default:
3124 return 0;
3125 }
3126 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303127 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003128 preemph_reg_value = 0x0000000;
3129 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003131 demph_reg_value = 0x2B305555;
3132 uniqtranscale_reg_value = 0x5570B83A;
3133 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303134 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003135 demph_reg_value = 0x2B2B4040;
3136 uniqtranscale_reg_value = 0x55ADDA3A;
3137 break;
3138 default:
3139 return 0;
3140 }
3141 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303142 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003143 preemph_reg_value = 0x0006000;
3144 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303145 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003146 demph_reg_value = 0x1B405555;
3147 uniqtranscale_reg_value = 0x55ADDA3A;
3148 break;
3149 default:
3150 return 0;
3151 }
3152 break;
3153 default:
3154 return 0;
3155 }
3156
Ville Syrjäläa5805162015-05-26 20:42:30 +03003157 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003158 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3159 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3160 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003161 uniqtranscale_reg_value);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003162 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3163 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3164 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3165 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03003166 mutex_unlock(&dev_priv->sb_lock);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003167
3168 return 0;
3169}
3170
Daniel Vetter5829975c2015-04-16 11:36:52 +02003171static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003172{
3173 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3174 struct drm_i915_private *dev_priv = dev->dev_private;
3175 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3176 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003177 u32 deemph_reg_value, margin_reg_value, val;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003178 uint8_t train_set = intel_dp->train_set[0];
3179 enum dpio_channel ch = vlv_dport_to_channel(dport);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003180 enum pipe pipe = intel_crtc->pipe;
3181 int i;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003182
3183 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303184 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003185 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303186 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003187 deemph_reg_value = 128;
3188 margin_reg_value = 52;
3189 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303190 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003191 deemph_reg_value = 128;
3192 margin_reg_value = 77;
3193 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303194 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003195 deemph_reg_value = 128;
3196 margin_reg_value = 102;
3197 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303198 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003199 deemph_reg_value = 128;
3200 margin_reg_value = 154;
3201 /* FIXME extra to set for 1200 */
3202 break;
3203 default:
3204 return 0;
3205 }
3206 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303207 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003208 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303209 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003210 deemph_reg_value = 85;
3211 margin_reg_value = 78;
3212 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303213 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003214 deemph_reg_value = 85;
3215 margin_reg_value = 116;
3216 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303217 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003218 deemph_reg_value = 85;
3219 margin_reg_value = 154;
3220 break;
3221 default:
3222 return 0;
3223 }
3224 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303225 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003226 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303227 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003228 deemph_reg_value = 64;
3229 margin_reg_value = 104;
3230 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303231 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003232 deemph_reg_value = 64;
3233 margin_reg_value = 154;
3234 break;
3235 default:
3236 return 0;
3237 }
3238 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303239 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003240 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303241 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003242 deemph_reg_value = 43;
3243 margin_reg_value = 154;
3244 break;
3245 default:
3246 return 0;
3247 }
3248 break;
3249 default:
3250 return 0;
3251 }
3252
Ville Syrjäläa5805162015-05-26 20:42:30 +03003253 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003254
3255 /* Clear calc init */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003256 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3257 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003258 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3259 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
Ville Syrjälä1966e592014-04-09 13:29:04 +03003260 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3261
3262 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3263 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003264 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3265 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
Ville Syrjälä1966e592014-04-09 13:29:04 +03003266 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003267
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003268 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3269 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3270 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3271 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3272
3273 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3274 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3275 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3276 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3277
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003278 /* Program swing deemph */
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003279 for (i = 0; i < 4; i++) {
3280 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3281 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3282 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3283 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3284 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003285
3286 /* Program swing margin */
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003287 for (i = 0; i < 4; i++) {
3288 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
Ville Syrjälä1fb44502014-06-28 02:04:03 +03003289 val &= ~DPIO_SWING_MARGIN000_MASK;
3290 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003291 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3292 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003293
3294 /* Disable unique transition scale */
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003295 for (i = 0; i < 4; i++) {
3296 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3297 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3298 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3299 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003300
3301 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
Sonika Jindalbd600182014-08-08 16:23:41 +05303302 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003303 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
Sonika Jindalbd600182014-08-08 16:23:41 +05303304 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003305
3306 /*
3307 * The document said it needs to set bit 27 for ch0 and bit 26
3308 * for ch1. Might be a typo in the doc.
3309 * For now, for this unique transition scale selection, set bit
3310 * 27 for ch0 and ch1.
3311 */
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003312 for (i = 0; i < 4; i++) {
3313 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3314 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3315 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3316 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003317
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003318 for (i = 0; i < 4; i++) {
3319 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3320 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3321 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3322 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3323 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003324 }
3325
3326 /* Start swing calculation */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003327 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3328 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3329 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3330
3331 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3332 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3333 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003334
3335 /* LRC Bypass */
3336 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3337 val |= DPIO_LRC_BYPASS;
3338 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3339
Ville Syrjäläa5805162015-05-26 20:42:30 +03003340 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003341
3342 return 0;
3343}
3344
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003345static void
Jani Nikula0301b3a2013-10-15 09:36:08 +03003346intel_get_adjust_train(struct intel_dp *intel_dp,
3347 const uint8_t link_status[DP_LINK_STATUS_SIZE])
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003348{
3349 uint8_t v = 0;
3350 uint8_t p = 0;
3351 int lane;
Keith Packard1a2eb462011-11-16 16:26:07 -08003352 uint8_t voltage_max;
3353 uint8_t preemph_max;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003354
Jesse Barnes33a34e42010-09-08 12:42:02 -07003355 for (lane = 0; lane < intel_dp->lane_count; lane++) {
Daniel Vetter0f037bd2012-10-18 10:15:27 +02003356 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3357 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003358
3359 if (this_v > v)
3360 v = this_v;
3361 if (this_p > p)
3362 p = this_p;
3363 }
3364
Keith Packard1a2eb462011-11-16 16:26:07 -08003365 voltage_max = intel_dp_voltage_max(intel_dp);
Keith Packard417e8222011-11-01 19:54:11 -07003366 if (v >= voltage_max)
3367 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003368
Keith Packard1a2eb462011-11-16 16:26:07 -08003369 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3370 if (p >= preemph_max)
3371 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003372
3373 for (lane = 0; lane < 4; lane++)
Jesse Barnes33a34e42010-09-08 12:42:02 -07003374 intel_dp->train_set[lane] = v | p;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003375}
3376
3377static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003378gen4_signal_levels(uint8_t train_set)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003379{
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003380 uint32_t signal_levels = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003381
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003382 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303383 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003384 default:
3385 signal_levels |= DP_VOLTAGE_0_4;
3386 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303387 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003388 signal_levels |= DP_VOLTAGE_0_6;
3389 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303390 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003391 signal_levels |= DP_VOLTAGE_0_8;
3392 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303393 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003394 signal_levels |= DP_VOLTAGE_1_2;
3395 break;
3396 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003397 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303398 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003399 default:
3400 signal_levels |= DP_PRE_EMPHASIS_0;
3401 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303402 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003403 signal_levels |= DP_PRE_EMPHASIS_3_5;
3404 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303405 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003406 signal_levels |= DP_PRE_EMPHASIS_6;
3407 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303408 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003409 signal_levels |= DP_PRE_EMPHASIS_9_5;
3410 break;
3411 }
3412 return signal_levels;
3413}
3414
Zhenyu Wange3421a12010-04-08 09:43:27 +08003415/* Gen6's DP voltage swing and pre-emphasis control */
3416static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003417gen6_edp_signal_levels(uint8_t train_set)
Zhenyu Wange3421a12010-04-08 09:43:27 +08003418{
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003419 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3420 DP_TRAIN_PRE_EMPHASIS_MASK);
3421 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303422 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3423 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003424 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303425 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003426 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3428 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003429 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303430 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3431 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003432 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303433 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3434 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003435 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003436 default:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003437 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3438 "0x%x\n", signal_levels);
3439 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003440 }
3441}
3442
Keith Packard1a2eb462011-11-16 16:26:07 -08003443/* Gen7's DP voltage swing and pre-emphasis control */
3444static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003445gen7_edp_signal_levels(uint8_t train_set)
Keith Packard1a2eb462011-11-16 16:26:07 -08003446{
3447 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3448 DP_TRAIN_PRE_EMPHASIS_MASK);
3449 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303450 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003451 return EDP_LINK_TRAIN_400MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303452 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003453 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303454 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packard1a2eb462011-11-16 16:26:07 -08003455 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3456
Sonika Jindalbd600182014-08-08 16:23:41 +05303457 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003458 return EDP_LINK_TRAIN_600MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303459 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003460 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3461
Sonika Jindalbd600182014-08-08 16:23:41 +05303462 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003463 return EDP_LINK_TRAIN_800MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303464 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003465 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3466
3467 default:
3468 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3469 "0x%x\n", signal_levels);
3470 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3471 }
3472}
3473
Paulo Zanonif0a34242012-12-06 16:51:50 -02003474/* Properly updates "DP" with the correct signal levels. */
3475static void
3476intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3477{
3478 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003479 enum port port = intel_dig_port->port;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003480 struct drm_device *dev = intel_dig_port->base.base.dev;
David Weinehallf8896f52015-06-25 11:11:03 +03003481 uint32_t signal_levels, mask = 0;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003482 uint8_t train_set = intel_dp->train_set[0];
3483
David Weinehallf8896f52015-06-25 11:11:03 +03003484 if (HAS_DDI(dev)) {
3485 signal_levels = ddi_signal_levels(intel_dp);
3486
3487 if (IS_BROXTON(dev))
3488 signal_levels = 0;
3489 else
3490 mask = DDI_BUF_EMP_MASK;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003491 } else if (IS_CHERRYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003492 signal_levels = chv_signal_levels(intel_dp);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003493 } else if (IS_VALLEYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003494 signal_levels = vlv_signal_levels(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003495 } else if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003496 signal_levels = gen7_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003497 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003498 } else if (IS_GEN6(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003499 signal_levels = gen6_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003500 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3501 } else {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003502 signal_levels = gen4_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003503 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3504 }
3505
Vandana Kannan96fb9f92014-11-18 15:45:27 +05303506 if (mask)
3507 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3508
3509 DRM_DEBUG_KMS("Using vswing level %d\n",
3510 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3511 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3512 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3513 DP_TRAIN_PRE_EMPHASIS_SHIFT);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003514
3515 *DP = (*DP & ~mask) | signal_levels;
3516}
3517
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003518static bool
Chris Wilsonea5b2132010-08-04 13:50:23 +01003519intel_dp_set_link_train(struct intel_dp *intel_dp,
Jani Nikula70aff662013-09-27 15:10:44 +03003520 uint32_t *DP,
Chris Wilson58e10eb2010-10-03 10:56:11 +01003521 uint8_t dp_train_pat)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003522{
Paulo Zanoni174edf12012-10-26 19:05:50 -02003523 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3524 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003525 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003526 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3527 int ret, len;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003528
Ville Syrjälä7b13b582014-08-18 22:16:08 +03003529 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003530
Jani Nikula70aff662013-09-27 15:10:44 +03003531 I915_WRITE(intel_dp->output_reg, *DP);
Chris Wilsonea5b2132010-08-04 13:50:23 +01003532 POSTING_READ(intel_dp->output_reg);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003533
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003534 buf[0] = dp_train_pat;
3535 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003536 DP_TRAINING_PATTERN_DISABLE) {
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003537 /* don't write DP_TRAINING_LANEx_SET on disable */
3538 len = 1;
3539 } else {
3540 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3541 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3542 len = intel_dp->lane_count + 1;
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003543 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003544
Jani Nikula9d1a1032014-03-14 16:51:15 +02003545 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3546 buf, len);
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003547
3548 return ret == len;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003549}
3550
Jani Nikula70aff662013-09-27 15:10:44 +03003551static bool
3552intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3553 uint8_t dp_train_pat)
3554{
Mika Kahola4e96c972015-04-29 09:17:39 +03003555 if (!intel_dp->train_set_valid)
3556 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
Jani Nikula70aff662013-09-27 15:10:44 +03003557 intel_dp_set_signal_levels(intel_dp, DP);
3558 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3559}
3560
3561static bool
3562intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
Jani Nikula0301b3a2013-10-15 09:36:08 +03003563 const uint8_t link_status[DP_LINK_STATUS_SIZE])
Jani Nikula70aff662013-09-27 15:10:44 +03003564{
3565 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3566 struct drm_device *dev = intel_dig_port->base.base.dev;
3567 struct drm_i915_private *dev_priv = dev->dev_private;
3568 int ret;
3569
3570 intel_get_adjust_train(intel_dp, link_status);
3571 intel_dp_set_signal_levels(intel_dp, DP);
3572
3573 I915_WRITE(intel_dp->output_reg, *DP);
3574 POSTING_READ(intel_dp->output_reg);
3575
Jani Nikula9d1a1032014-03-14 16:51:15 +02003576 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3577 intel_dp->train_set, intel_dp->lane_count);
Jani Nikula70aff662013-09-27 15:10:44 +03003578
3579 return ret == intel_dp->lane_count;
3580}
3581
Imre Deak3ab9c632013-05-03 12:57:41 +03003582static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3583{
3584 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3585 struct drm_device *dev = intel_dig_port->base.base.dev;
3586 struct drm_i915_private *dev_priv = dev->dev_private;
3587 enum port port = intel_dig_port->port;
3588 uint32_t val;
3589
3590 if (!HAS_DDI(dev))
3591 return;
3592
3593 val = I915_READ(DP_TP_CTL(port));
3594 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3595 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3596 I915_WRITE(DP_TP_CTL(port), val);
3597
3598 /*
3599 * On PORT_A we can have only eDP in SST mode. There the only reason
3600 * we need to set idle transmission mode is to work around a HW issue
3601 * where we enable the pipe while not in idle link-training mode.
3602 * In this case there is requirement to wait for a minimum number of
3603 * idle patterns to be sent.
3604 */
3605 if (port == PORT_A)
3606 return;
3607
3608 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3609 1))
3610 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3611}
3612
Jesse Barnes33a34e42010-09-08 12:42:02 -07003613/* Enable corresponding port and start training pattern 1 */
Paulo Zanonic19b0662012-10-15 15:51:41 -03003614void
Jesse Barnes33a34e42010-09-08 12:42:02 -07003615intel_dp_start_link_train(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003616{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003617 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
Paulo Zanonic19b0662012-10-15 15:51:41 -03003618 struct drm_device *dev = encoder->dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003619 int i;
3620 uint8_t voltage;
Keith Packardcdb0e952011-11-01 20:00:06 -07003621 int voltage_tries, loop_tries;
Chris Wilsonea5b2132010-08-04 13:50:23 +01003622 uint32_t DP = intel_dp->DP;
Jani Nikula6aba5b62013-10-04 15:08:10 +03003623 uint8_t link_config[2];
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003624
Paulo Zanoniaffa9352012-11-23 15:30:39 -02003625 if (HAS_DDI(dev))
Paulo Zanonic19b0662012-10-15 15:51:41 -03003626 intel_ddi_prepare_link_retrain(encoder);
3627
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003628 /* Write the link configuration data */
Jani Nikula6aba5b62013-10-04 15:08:10 +03003629 link_config[0] = intel_dp->link_bw;
3630 link_config[1] = intel_dp->lane_count;
3631 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3632 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
Jani Nikula9d1a1032014-03-14 16:51:15 +02003633 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003634 if (intel_dp->num_sink_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05303635 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3636 &intel_dp->rate_select, 1);
Jani Nikula6aba5b62013-10-04 15:08:10 +03003637
3638 link_config[0] = 0;
3639 link_config[1] = DP_SET_ANSI_8B10B;
Jani Nikula9d1a1032014-03-14 16:51:15 +02003640 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003641
3642 DP |= DP_PORT_EN;
Keith Packard1a2eb462011-11-16 16:26:07 -08003643
Jani Nikula70aff662013-09-27 15:10:44 +03003644 /* clock recovery */
3645 if (!intel_dp_reset_link_train(intel_dp, &DP,
3646 DP_TRAINING_PATTERN_1 |
3647 DP_LINK_SCRAMBLING_DISABLE)) {
3648 DRM_ERROR("failed to enable link training\n");
3649 return;
3650 }
3651
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003652 voltage = 0xff;
Keith Packardcdb0e952011-11-01 20:00:06 -07003653 voltage_tries = 0;
3654 loop_tries = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003655 for (;;) {
Jani Nikula70aff662013-09-27 15:10:44 +03003656 uint8_t link_status[DP_LINK_STATUS_SIZE];
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003657
Daniel Vettera7c96552012-10-18 10:15:30 +02003658 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
Keith Packard93f62da2011-11-01 19:45:03 -07003659 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3660 DRM_ERROR("failed to get link status\n");
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003661 break;
Keith Packard93f62da2011-11-01 19:45:03 -07003662 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003663
Daniel Vetter01916272012-10-18 10:15:25 +02003664 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
Keith Packard93f62da2011-11-01 19:45:03 -07003665 DRM_DEBUG_KMS("clock recovery OK\n");
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003666 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003667 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003668
Mika Kahola4e96c972015-04-29 09:17:39 +03003669 /*
3670 * if we used previously trained voltage and pre-emphasis values
3671 * and we don't get clock recovery, reset link training values
3672 */
3673 if (intel_dp->train_set_valid) {
3674 DRM_DEBUG_KMS("clock recovery not ok, reset");
3675 /* clear the flag as we are not reusing train set */
3676 intel_dp->train_set_valid = false;
3677 if (!intel_dp_reset_link_train(intel_dp, &DP,
3678 DP_TRAINING_PATTERN_1 |
3679 DP_LINK_SCRAMBLING_DISABLE)) {
3680 DRM_ERROR("failed to enable link training\n");
3681 return;
3682 }
3683 continue;
3684 }
3685
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003686 /* Check to see if we've tried the max voltage */
3687 for (i = 0; i < intel_dp->lane_count; i++)
3688 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3689 break;
Takashi Iwai3b4f8192013-03-11 18:40:16 +01003690 if (i == intel_dp->lane_count) {
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003691 ++loop_tries;
3692 if (loop_tries == 5) {
Jani Nikula3def84b2013-10-05 16:13:56 +03003693 DRM_ERROR("too many full retries, give up\n");
Keith Packardcdb0e952011-11-01 20:00:06 -07003694 break;
3695 }
Jani Nikula70aff662013-09-27 15:10:44 +03003696 intel_dp_reset_link_train(intel_dp, &DP,
3697 DP_TRAINING_PATTERN_1 |
3698 DP_LINK_SCRAMBLING_DISABLE);
Keith Packardcdb0e952011-11-01 20:00:06 -07003699 voltage_tries = 0;
3700 continue;
3701 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003702
3703 /* Check to see if we've tried the same voltage 5 times */
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003704 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
Chris Wilson24773672012-09-26 16:48:30 +01003705 ++voltage_tries;
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003706 if (voltage_tries == 5) {
Jani Nikula3def84b2013-10-05 16:13:56 +03003707 DRM_ERROR("too many voltage retries, give up\n");
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003708 break;
3709 }
3710 } else
3711 voltage_tries = 0;
3712 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003713
Jani Nikula70aff662013-09-27 15:10:44 +03003714 /* Update training set as requested by target */
3715 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3716 DRM_ERROR("failed to update link training\n");
3717 break;
3718 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003719 }
3720
Jesse Barnes33a34e42010-09-08 12:42:02 -07003721 intel_dp->DP = DP;
3722}
3723
Paulo Zanonic19b0662012-10-15 15:51:41 -03003724void
Jesse Barnes33a34e42010-09-08 12:42:02 -07003725intel_dp_complete_link_train(struct intel_dp *intel_dp)
3726{
Jesse Barnes33a34e42010-09-08 12:42:02 -07003727 bool channel_eq = false;
Jesse Barnes37f80972011-01-05 14:45:24 -08003728 int tries, cr_tries;
Jesse Barnes33a34e42010-09-08 12:42:02 -07003729 uint32_t DP = intel_dp->DP;
Todd Previte06ea66b2014-01-20 10:19:39 -07003730 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3731
3732 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3733 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3734 training_pattern = DP_TRAINING_PATTERN_3;
Jesse Barnes33a34e42010-09-08 12:42:02 -07003735
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003736 /* channel equalization */
Jani Nikula70aff662013-09-27 15:10:44 +03003737 if (!intel_dp_set_link_train(intel_dp, &DP,
Todd Previte06ea66b2014-01-20 10:19:39 -07003738 training_pattern |
Jani Nikula70aff662013-09-27 15:10:44 +03003739 DP_LINK_SCRAMBLING_DISABLE)) {
3740 DRM_ERROR("failed to start channel equalization\n");
3741 return;
3742 }
3743
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003744 tries = 0;
Jesse Barnes37f80972011-01-05 14:45:24 -08003745 cr_tries = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003746 channel_eq = false;
3747 for (;;) {
Jani Nikula70aff662013-09-27 15:10:44 +03003748 uint8_t link_status[DP_LINK_STATUS_SIZE];
Zhenyu Wange3421a12010-04-08 09:43:27 +08003749
Jesse Barnes37f80972011-01-05 14:45:24 -08003750 if (cr_tries > 5) {
3751 DRM_ERROR("failed to train DP, aborting\n");
Jesse Barnes37f80972011-01-05 14:45:24 -08003752 break;
3753 }
3754
Daniel Vettera7c96552012-10-18 10:15:30 +02003755 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
Jani Nikula70aff662013-09-27 15:10:44 +03003756 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3757 DRM_ERROR("failed to get link status\n");
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003758 break;
Jani Nikula70aff662013-09-27 15:10:44 +03003759 }
Jesse Barnes869184a2010-10-07 16:01:22 -07003760
Jesse Barnes37f80972011-01-05 14:45:24 -08003761 /* Make sure clock is still ok */
Daniel Vetter01916272012-10-18 10:15:25 +02003762 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
Mika Kahola4e96c972015-04-29 09:17:39 +03003763 intel_dp->train_set_valid = false;
Jesse Barnes37f80972011-01-05 14:45:24 -08003764 intel_dp_start_link_train(intel_dp);
Jani Nikula70aff662013-09-27 15:10:44 +03003765 intel_dp_set_link_train(intel_dp, &DP,
Todd Previte06ea66b2014-01-20 10:19:39 -07003766 training_pattern |
Jani Nikula70aff662013-09-27 15:10:44 +03003767 DP_LINK_SCRAMBLING_DISABLE);
Jesse Barnes37f80972011-01-05 14:45:24 -08003768 cr_tries++;
3769 continue;
3770 }
3771
Daniel Vetter1ffdff12012-10-18 10:15:24 +02003772 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003773 channel_eq = true;
3774 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003775 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003776
Jesse Barnes37f80972011-01-05 14:45:24 -08003777 /* Try 5 times, then try clock recovery if that fails */
3778 if (tries > 5) {
Mika Kahola4e96c972015-04-29 09:17:39 +03003779 intel_dp->train_set_valid = false;
Jesse Barnes37f80972011-01-05 14:45:24 -08003780 intel_dp_start_link_train(intel_dp);
Jani Nikula70aff662013-09-27 15:10:44 +03003781 intel_dp_set_link_train(intel_dp, &DP,
Todd Previte06ea66b2014-01-20 10:19:39 -07003782 training_pattern |
Jani Nikula70aff662013-09-27 15:10:44 +03003783 DP_LINK_SCRAMBLING_DISABLE);
Jesse Barnes37f80972011-01-05 14:45:24 -08003784 tries = 0;
3785 cr_tries++;
3786 continue;
3787 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003788
Jani Nikula70aff662013-09-27 15:10:44 +03003789 /* Update training set as requested by target */
3790 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3791 DRM_ERROR("failed to update link training\n");
3792 break;
3793 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003794 ++tries;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003795 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003796
Imre Deak3ab9c632013-05-03 12:57:41 +03003797 intel_dp_set_idle_link_train(intel_dp);
3798
3799 intel_dp->DP = DP;
3800
Mika Kahola4e96c972015-04-29 09:17:39 +03003801 if (channel_eq) {
Mika Kahola5fa836a2015-04-29 09:17:40 +03003802 intel_dp->train_set_valid = true;
Masanari Iida07f42252013-03-20 11:00:34 +09003803 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
Mika Kahola4e96c972015-04-29 09:17:39 +03003804 }
Imre Deak3ab9c632013-05-03 12:57:41 +03003805}
3806
3807void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3808{
Jani Nikula70aff662013-09-27 15:10:44 +03003809 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
Imre Deak3ab9c632013-05-03 12:57:41 +03003810 DP_TRAINING_PATTERN_DISABLE);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003811}
3812
3813static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01003814intel_dp_link_down(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003815{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003816 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003817 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003818 enum port port = intel_dig_port->port;
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003819 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003820 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonea5b2132010-08-04 13:50:23 +01003821 uint32_t DP = intel_dp->DP;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003822
Daniel Vetterbc76e3202014-05-20 22:46:50 +02003823 if (WARN_ON(HAS_DDI(dev)))
Paulo Zanonic19b0662012-10-15 15:51:41 -03003824 return;
3825
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02003826 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
Chris Wilson1b39d6f2010-12-06 11:20:45 +00003827 return;
3828
Zhao Yakui28c97732009-10-09 11:39:41 +08003829 DRM_DEBUG_KMS("\n");
Zhenyu Wang32f9d652009-07-24 01:00:32 +08003830
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03003831 if ((IS_GEN7(dev) && port == PORT_A) ||
3832 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Zhenyu Wange3421a12010-04-08 09:43:27 +08003833 DP &= ~DP_LINK_TRAIN_MASK_CPT;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003834 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003835 } else {
Ville Syrjäläaad3d142014-06-28 02:04:25 +03003836 if (IS_CHERRYVIEW(dev))
3837 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3838 else
3839 DP &= ~DP_LINK_TRAIN_MASK;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003840 DP |= DP_LINK_TRAIN_PAT_IDLE;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003841 }
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003842 I915_WRITE(intel_dp->output_reg, DP);
Chris Wilsonfe255d02010-09-11 21:37:48 +01003843 POSTING_READ(intel_dp->output_reg);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08003844
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003845 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3846 I915_WRITE(intel_dp->output_reg, DP);
3847 POSTING_READ(intel_dp->output_reg);
3848
3849 /*
3850 * HW workaround for IBX, we need to move the port
3851 * to transcoder A after disabling it to allow the
3852 * matching HDMI port to be enabled on transcoder A.
3853 */
3854 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3855 /* always enable with pattern 1 (as per spec) */
3856 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3857 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3858 I915_WRITE(intel_dp->output_reg, DP);
3859 POSTING_READ(intel_dp->output_reg);
3860
3861 DP &= ~DP_PORT_EN;
Eric Anholt5bddd172010-11-18 09:32:59 +08003862 I915_WRITE(intel_dp->output_reg, DP);
Daniel Vetter0ca09682014-11-24 16:54:11 +01003863 POSTING_READ(intel_dp->output_reg);
Eric Anholt5bddd172010-11-18 09:32:59 +08003864 }
3865
Keith Packardf01eca22011-09-28 16:48:10 -07003866 msleep(intel_dp->panel_power_down_delay);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003867}
3868
Keith Packard26d61aa2011-07-25 20:01:09 -07003869static bool
3870intel_dp_get_dpcd(struct intel_dp *intel_dp)
Keith Packard92fd8fd2011-07-25 19:50:10 -07003871{
Rodrigo Vivia031d702013-10-03 16:15:06 -03003872 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3873 struct drm_device *dev = dig_port->base.base.dev;
3874 struct drm_i915_private *dev_priv = dev->dev_private;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303875 uint8_t rev;
Rodrigo Vivia031d702013-10-03 16:15:06 -03003876
Jani Nikula9d1a1032014-03-14 16:51:15 +02003877 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3878 sizeof(intel_dp->dpcd)) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003879 return false; /* aux transfer failed */
Keith Packard92fd8fd2011-07-25 19:50:10 -07003880
Andy Shevchenkoa8e98152014-09-01 14:12:01 +03003881 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
Damien Lespiau577c7a52012-12-13 16:09:02 +00003882
Adam Jacksonedb39242012-09-18 10:58:49 -04003883 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3884 return false; /* DPCD not present */
3885
Shobhit Kumar2293bb52013-07-11 18:44:56 -03003886 /* Check if the panel supports PSR */
3887 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
Jani Nikula50003932013-09-20 16:42:17 +03003888 if (is_edp(intel_dp)) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003889 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3890 intel_dp->psr_dpcd,
3891 sizeof(intel_dp->psr_dpcd));
Rodrigo Vivia031d702013-10-03 16:15:06 -03003892 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3893 dev_priv->psr.sink_support = true;
Jani Nikula50003932013-09-20 16:42:17 +03003894 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
Rodrigo Vivia031d702013-10-03 16:15:06 -03003895 }
Sonika Jindal474d1ec2015-04-02 11:02:44 +05303896
3897 if (INTEL_INFO(dev)->gen >= 9 &&
3898 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3899 uint8_t frame_sync_cap;
3900
3901 dev_priv->psr.sink_support = true;
3902 intel_dp_dpcd_read_wake(&intel_dp->aux,
3903 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3904 &frame_sync_cap, 1);
3905 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3906 /* PSR2 needs frame sync as well */
3907 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3908 DRM_DEBUG_KMS("PSR2 %s on sink",
3909 dev_priv->psr.psr2_support ? "supported" : "not supported");
3910 }
Jani Nikula50003932013-09-20 16:42:17 +03003911 }
3912
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05303913 /* Training Pattern 3 support, Intel platforms that support HBR2 alone
3914 * have support for TP3 hence that check is used along with dpcd check
3915 * to ensure TP3 can be enabled.
3916 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
3917 * supported but still not enabled.
3918 */
Todd Previte06ea66b2014-01-20 10:19:39 -07003919 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
Jani Nikula7809a612014-10-29 11:03:26 +02003920 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05303921 intel_dp_source_supports_hbr2(dev)) {
Todd Previte06ea66b2014-01-20 10:19:39 -07003922 intel_dp->use_tps3 = true;
Jani Nikulaf8d8a672014-09-05 16:19:18 +03003923 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
Todd Previte06ea66b2014-01-20 10:19:39 -07003924 } else
3925 intel_dp->use_tps3 = false;
3926
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303927 /* Intermediate frequency support */
3928 if (is_edp(intel_dp) &&
3929 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3930 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3931 (rev >= 0x03)) { /* eDp v1.4 or higher */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003932 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003933 int i;
3934
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303935 intel_dp_dpcd_read_wake(&intel_dp->aux,
3936 DP_SUPPORTED_LINK_RATES,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003937 sink_rates,
3938 sizeof(sink_rates));
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003939
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003940 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3941 int val = le16_to_cpu(sink_rates[i]);
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003942
3943 if (val == 0)
3944 break;
3945
Sonika Jindalaf77b972015-05-07 13:59:28 +05303946 /* Value read is in kHz while drm clock is saved in deca-kHz */
3947 intel_dp->sink_rates[i] = (val * 200) / 10;
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003948 }
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003949 intel_dp->num_sink_rates = i;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303950 }
Ville Syrjälä0336400e2015-03-12 17:10:39 +02003951
3952 intel_dp_print_rates(intel_dp);
3953
Adam Jacksonedb39242012-09-18 10:58:49 -04003954 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3955 DP_DWN_STRM_PORT_PRESENT))
3956 return true; /* native DP sink */
3957
3958 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3959 return true; /* no per-port downstream info */
3960
Jani Nikula9d1a1032014-03-14 16:51:15 +02003961 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3962 intel_dp->downstream_ports,
3963 DP_MAX_DOWNSTREAM_PORTS) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003964 return false; /* downstream port status fetch failed */
3965
3966 return true;
Keith Packard92fd8fd2011-07-25 19:50:10 -07003967}
3968
Adam Jackson0d198322012-05-14 16:05:47 -04003969static void
3970intel_dp_probe_oui(struct intel_dp *intel_dp)
3971{
3972 u8 buf[3];
3973
3974 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3975 return;
3976
Jani Nikula9d1a1032014-03-14 16:51:15 +02003977 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04003978 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3979 buf[0], buf[1], buf[2]);
3980
Jani Nikula9d1a1032014-03-14 16:51:15 +02003981 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04003982 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3983 buf[0], buf[1], buf[2]);
3984}
3985
Dave Airlie0e32b392014-05-02 14:02:48 +10003986static bool
3987intel_dp_probe_mst(struct intel_dp *intel_dp)
3988{
3989 u8 buf[1];
3990
3991 if (!intel_dp->can_mst)
3992 return false;
3993
3994 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3995 return false;
3996
Dave Airlie0e32b392014-05-02 14:02:48 +10003997 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3998 if (buf[0] & DP_MST_CAP) {
3999 DRM_DEBUG_KMS("Sink is MST capable\n");
4000 intel_dp->is_mst = true;
4001 } else {
4002 DRM_DEBUG_KMS("Sink is not MST capable\n");
4003 intel_dp->is_mst = false;
4004 }
4005 }
Dave Airlie0e32b392014-05-02 14:02:48 +10004006
4007 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4008 return intel_dp->is_mst;
4009}
4010
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004011static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004012{
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004013 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4014 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004015 u8 buf;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004016
4017 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004018 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4019 return;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004020 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004021
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004022 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004023 buf & ~DP_TEST_SINK_START) < 0)
4024 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4025
4026 hsw_enable_ips(intel_crtc);
4027}
4028
4029static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4030{
4031 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4032 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4033 u8 buf;
4034
4035 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4036 return -EIO;
4037
4038 if (!(buf & DP_TEST_CRC_SUPPORTED))
4039 return -ENOTTY;
4040
4041 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4042 return -EIO;
4043
4044 hsw_disable_ips(intel_crtc);
4045
4046 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4047 buf | DP_TEST_SINK_START) < 0) {
4048 hsw_enable_ips(intel_crtc);
4049 return -EIO;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004050 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004051
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004052 return 0;
4053}
4054
4055int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4056{
4057 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4058 struct drm_device *dev = dig_port->base.base.dev;
4059 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4060 u8 buf;
4061 int test_crc_count;
4062 int attempts = 6;
4063 int ret;
4064
4065 ret = intel_dp_sink_crc_start(intel_dp);
4066 if (ret)
4067 return ret;
4068
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004069 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4070 ret = -EIO;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004071 goto stop;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004072 }
4073
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004074 test_crc_count = buf & DP_TEST_COUNT_MASK;
4075
4076 do {
Rodrigo Vivi1dda5f92014-10-01 07:32:37 -07004077 if (drm_dp_dpcd_readb(&intel_dp->aux,
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004078 DP_TEST_SINK_MISC, &buf) < 0) {
4079 ret = -EIO;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004080 goto stop;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004081 }
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004082 intel_wait_for_vblank(dev, intel_crtc->pipe);
4083 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4084
4085 if (attempts == 0) {
Daniel Vetter90bd1f42014-11-19 11:18:47 +01004086 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004087 ret = -ETIMEDOUT;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004088 goto stop;
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004089 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004090
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004091 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004092 ret = -EIO;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004093stop:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004094 intel_dp_sink_crc_stop(intel_dp);
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004095 return ret;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004096}
4097
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004098static bool
4099intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4100{
Jani Nikula9d1a1032014-03-14 16:51:15 +02004101 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4102 DP_DEVICE_SERVICE_IRQ_VECTOR,
4103 sink_irq_vector, 1) == 1;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004104}
4105
Dave Airlie0e32b392014-05-02 14:02:48 +10004106static bool
4107intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4108{
4109 int ret;
4110
4111 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4112 DP_SINK_COUNT_ESI,
4113 sink_irq_vector, 14);
4114 if (ret != 14)
4115 return false;
4116
4117 return true;
4118}
4119
Todd Previtec5d5ab72015-04-15 08:38:38 -07004120static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004121{
Todd Previtec5d5ab72015-04-15 08:38:38 -07004122 uint8_t test_result = DP_TEST_ACK;
4123 return test_result;
4124}
4125
4126static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4127{
4128 uint8_t test_result = DP_TEST_NAK;
4129 return test_result;
4130}
4131
4132static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4133{
4134 uint8_t test_result = DP_TEST_NAK;
Todd Previte559be302015-05-04 07:48:20 -07004135 struct intel_connector *intel_connector = intel_dp->attached_connector;
4136 struct drm_connector *connector = &intel_connector->base;
4137
4138 if (intel_connector->detect_edid == NULL ||
Daniel Vetterac6f2e22015-05-08 16:15:41 +02004139 connector->edid_corrupt ||
Todd Previte559be302015-05-04 07:48:20 -07004140 intel_dp->aux.i2c_defer_count > 6) {
4141 /* Check EDID read for NACKs, DEFERs and corruption
4142 * (DP CTS 1.2 Core r1.1)
4143 * 4.2.2.4 : Failed EDID read, I2C_NAK
4144 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4145 * 4.2.2.6 : EDID corruption detected
4146 * Use failsafe mode for all cases
4147 */
4148 if (intel_dp->aux.i2c_nack_count > 0 ||
4149 intel_dp->aux.i2c_defer_count > 0)
4150 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4151 intel_dp->aux.i2c_nack_count,
4152 intel_dp->aux.i2c_defer_count);
4153 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4154 } else {
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304155 struct edid *block = intel_connector->detect_edid;
4156
4157 /* We have to write the checksum
4158 * of the last block read
4159 */
4160 block += intel_connector->detect_edid->extensions;
4161
Todd Previte559be302015-05-04 07:48:20 -07004162 if (!drm_dp_dpcd_write(&intel_dp->aux,
4163 DP_TEST_EDID_CHECKSUM,
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304164 &block->checksum,
Dan Carpenter5a1cc652015-05-12 21:07:37 +03004165 1))
Todd Previte559be302015-05-04 07:48:20 -07004166 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4167
4168 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4169 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4170 }
4171
4172 /* Set test active flag here so userspace doesn't interrupt things */
4173 intel_dp->compliance_test_active = 1;
4174
Todd Previtec5d5ab72015-04-15 08:38:38 -07004175 return test_result;
4176}
4177
4178static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4179{
4180 uint8_t test_result = DP_TEST_NAK;
4181 return test_result;
4182}
4183
4184static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4185{
4186 uint8_t response = DP_TEST_NAK;
4187 uint8_t rxdata = 0;
4188 int status = 0;
4189
Todd Previte559be302015-05-04 07:48:20 -07004190 intel_dp->compliance_test_active = 0;
Todd Previtec5d5ab72015-04-15 08:38:38 -07004191 intel_dp->compliance_test_type = 0;
Todd Previte559be302015-05-04 07:48:20 -07004192 intel_dp->compliance_test_data = 0;
4193
Todd Previtec5d5ab72015-04-15 08:38:38 -07004194 intel_dp->aux.i2c_nack_count = 0;
4195 intel_dp->aux.i2c_defer_count = 0;
4196
4197 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4198 if (status <= 0) {
4199 DRM_DEBUG_KMS("Could not read test request from sink\n");
4200 goto update_status;
4201 }
4202
4203 switch (rxdata) {
4204 case DP_TEST_LINK_TRAINING:
4205 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4206 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4207 response = intel_dp_autotest_link_training(intel_dp);
4208 break;
4209 case DP_TEST_LINK_VIDEO_PATTERN:
4210 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4211 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4212 response = intel_dp_autotest_video_pattern(intel_dp);
4213 break;
4214 case DP_TEST_LINK_EDID_READ:
4215 DRM_DEBUG_KMS("EDID test requested\n");
4216 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4217 response = intel_dp_autotest_edid(intel_dp);
4218 break;
4219 case DP_TEST_LINK_PHY_TEST_PATTERN:
4220 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4221 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4222 response = intel_dp_autotest_phy_pattern(intel_dp);
4223 break;
4224 default:
4225 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4226 break;
4227 }
4228
4229update_status:
4230 status = drm_dp_dpcd_write(&intel_dp->aux,
4231 DP_TEST_RESPONSE,
4232 &response, 1);
4233 if (status <= 0)
4234 DRM_DEBUG_KMS("Could not write test response to sink\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004235}
4236
Dave Airlie0e32b392014-05-02 14:02:48 +10004237static int
4238intel_dp_check_mst_status(struct intel_dp *intel_dp)
4239{
4240 bool bret;
4241
4242 if (intel_dp->is_mst) {
4243 u8 esi[16] = { 0 };
4244 int ret = 0;
4245 int retry;
4246 bool handled;
4247 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4248go_again:
4249 if (bret == true) {
4250
4251 /* check link status - esi[10] = 0x200c */
4252 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4253 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4254 intel_dp_start_link_train(intel_dp);
4255 intel_dp_complete_link_train(intel_dp);
4256 intel_dp_stop_link_train(intel_dp);
4257 }
4258
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004259 DRM_DEBUG_KMS("got esi %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004260 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4261
4262 if (handled) {
4263 for (retry = 0; retry < 3; retry++) {
4264 int wret;
4265 wret = drm_dp_dpcd_write(&intel_dp->aux,
4266 DP_SINK_COUNT_ESI+1,
4267 &esi[1], 3);
4268 if (wret == 3) {
4269 break;
4270 }
4271 }
4272
4273 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4274 if (bret == true) {
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004275 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004276 goto go_again;
4277 }
4278 } else
4279 ret = 0;
4280
4281 return ret;
4282 } else {
4283 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4284 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4285 intel_dp->is_mst = false;
4286 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4287 /* send a hotplug event */
4288 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4289 }
4290 }
4291 return -EINVAL;
4292}
4293
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004294/*
4295 * According to DP spec
4296 * 5.1.2:
4297 * 1. Read DPCD
4298 * 2. Configure link according to Receiver Capabilities
4299 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4300 * 4. Check link status on receipt of hot-plug interrupt
4301 */
Damien Lespiaua5146202015-02-10 19:32:22 +00004302static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01004303intel_dp_check_link_status(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004304{
Dave Airlie5b215bc2014-08-05 10:40:20 +10004305 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004306 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004307 u8 sink_irq_vector;
Keith Packard93f62da2011-11-01 19:45:03 -07004308 u8 link_status[DP_LINK_STATUS_SIZE];
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004309
Dave Airlie5b215bc2014-08-05 10:40:20 +10004310 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4311
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02004312 if (!intel_encoder->base.crtc)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004313 return;
4314
Imre Deak1a125d82014-08-18 14:42:46 +03004315 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4316 return;
4317
Keith Packard92fd8fd2011-07-25 19:50:10 -07004318 /* Try to read receiver status if the link appears to be up */
Keith Packard93f62da2011-11-01 19:45:03 -07004319 if (!intel_dp_get_link_status(intel_dp, link_status)) {
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004320 return;
4321 }
4322
Keith Packard92fd8fd2011-07-25 19:50:10 -07004323 /* Now read the DPCD to see if it's actually running */
Keith Packard26d61aa2011-07-25 20:01:09 -07004324 if (!intel_dp_get_dpcd(intel_dp)) {
Jesse Barnes59cd09e2011-07-07 11:10:59 -07004325 return;
4326 }
4327
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004328 /* Try to read the source of the interrupt */
4329 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4330 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4331 /* Clear interrupt source */
Jani Nikula9d1a1032014-03-14 16:51:15 +02004332 drm_dp_dpcd_writeb(&intel_dp->aux,
4333 DP_DEVICE_SERVICE_IRQ_VECTOR,
4334 sink_irq_vector);
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004335
4336 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
Todd Previte09b1eb12015-04-20 15:27:34 -07004337 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004338 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4339 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4340 }
4341
Daniel Vetter1ffdff12012-10-18 10:15:24 +02004342 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
Keith Packard92fd8fd2011-07-25 19:50:10 -07004343 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
Jani Nikula8e329a02014-06-03 14:56:21 +03004344 intel_encoder->base.name);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004345 intel_dp_start_link_train(intel_dp);
4346 intel_dp_complete_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03004347 intel_dp_stop_link_train(intel_dp);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004348 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004349}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004350
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004351/* XXX this is probably wrong for multiple downstream ports */
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004352static enum drm_connector_status
Keith Packard26d61aa2011-07-25 20:01:09 -07004353intel_dp_detect_dpcd(struct intel_dp *intel_dp)
Adam Jackson71ba90002011-07-12 17:38:04 -04004354{
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004355 uint8_t *dpcd = intel_dp->dpcd;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004356 uint8_t type;
4357
4358 if (!intel_dp_get_dpcd(intel_dp))
4359 return connector_status_disconnected;
4360
4361 /* if there's no downstream port, we're done */
4362 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
Keith Packard26d61aa2011-07-25 20:01:09 -07004363 return connector_status_connected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004364
4365 /* If we're HPD-aware, SINK_COUNT changes dynamically */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004366 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4367 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
Adam Jackson23235172012-09-20 16:42:45 -04004368 uint8_t reg;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004369
4370 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4371 &reg, 1) < 0)
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004372 return connector_status_unknown;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004373
Adam Jackson23235172012-09-20 16:42:45 -04004374 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4375 : connector_status_disconnected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004376 }
4377
4378 /* If no HPD, poke DDC gently */
Jani Nikula0b998362014-03-14 16:51:17 +02004379 if (drm_probe_ddc(&intel_dp->aux.ddc))
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004380 return connector_status_connected;
4381
4382 /* Well we tried, say unknown for unreliable port types */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004383 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4384 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4385 if (type == DP_DS_PORT_TYPE_VGA ||
4386 type == DP_DS_PORT_TYPE_NON_EDID)
4387 return connector_status_unknown;
4388 } else {
4389 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4390 DP_DWN_STRM_PORT_TYPE_MASK;
4391 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4392 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4393 return connector_status_unknown;
4394 }
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004395
4396 /* Anything else is out of spec, warn and ignore */
4397 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
Keith Packard26d61aa2011-07-25 20:01:09 -07004398 return connector_status_disconnected;
Adam Jackson71ba90002011-07-12 17:38:04 -04004399}
4400
4401static enum drm_connector_status
Chris Wilsond410b562014-09-02 20:03:59 +01004402edp_detect(struct intel_dp *intel_dp)
4403{
4404 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4405 enum drm_connector_status status;
4406
4407 status = intel_panel_detect(dev);
4408 if (status == connector_status_unknown)
4409 status = connector_status_connected;
4410
4411 return status;
4412}
4413
4414static enum drm_connector_status
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004415ironlake_dp_detect(struct intel_dp *intel_dp)
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004416{
Paulo Zanoni30add222012-10-26 19:05:45 -02004417 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Damien Lespiau1b469632012-12-13 16:09:01 +00004418 struct drm_i915_private *dev_priv = dev->dev_private;
4419 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07004420
Damien Lespiau1b469632012-12-13 16:09:01 +00004421 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4422 return connector_status_disconnected;
4423
Keith Packard26d61aa2011-07-25 20:01:09 -07004424 return intel_dp_detect_dpcd(intel_dp);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004425}
4426
Dave Airlie2a592be2014-09-01 16:58:12 +10004427static int g4x_digital_port_connected(struct drm_device *dev,
4428 struct intel_digital_port *intel_dig_port)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004429{
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004430 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson10f76a32012-05-11 18:01:32 +01004431 uint32_t bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004432
Todd Previte232a6ee2014-01-23 00:13:41 -07004433 if (IS_VALLEYVIEW(dev)) {
4434 switch (intel_dig_port->port) {
4435 case PORT_B:
4436 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4437 break;
4438 case PORT_C:
4439 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4440 break;
4441 case PORT_D:
4442 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4443 break;
4444 default:
Dave Airlie2a592be2014-09-01 16:58:12 +10004445 return -EINVAL;
Todd Previte232a6ee2014-01-23 00:13:41 -07004446 }
4447 } else {
4448 switch (intel_dig_port->port) {
4449 case PORT_B:
4450 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4451 break;
4452 case PORT_C:
4453 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4454 break;
4455 case PORT_D:
4456 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4457 break;
4458 default:
Dave Airlie2a592be2014-09-01 16:58:12 +10004459 return -EINVAL;
Todd Previte232a6ee2014-01-23 00:13:41 -07004460 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004461 }
4462
Chris Wilson10f76a32012-05-11 18:01:32 +01004463 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
Dave Airlie2a592be2014-09-01 16:58:12 +10004464 return 0;
4465 return 1;
4466}
4467
4468static enum drm_connector_status
4469g4x_dp_detect(struct intel_dp *intel_dp)
4470{
4471 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4472 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4473 int ret;
4474
4475 /* Can't disconnect eDP, but you can close the lid... */
4476 if (is_edp(intel_dp)) {
4477 enum drm_connector_status status;
4478
4479 status = intel_panel_detect(dev);
4480 if (status == connector_status_unknown)
4481 status = connector_status_connected;
4482 return status;
4483 }
4484
4485 ret = g4x_digital_port_connected(dev, intel_dig_port);
4486 if (ret == -EINVAL)
4487 return connector_status_unknown;
4488 else if (ret == 0)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004489 return connector_status_disconnected;
4490
Keith Packard26d61aa2011-07-25 20:01:09 -07004491 return intel_dp_detect_dpcd(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004492}
4493
Keith Packard8c241fe2011-09-28 16:38:44 -07004494static struct edid *
Chris Wilsonbeb60602014-09-02 20:04:00 +01004495intel_dp_get_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004496{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004497 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packard8c241fe2011-09-28 16:38:44 -07004498
Jani Nikula9cd300e2012-10-19 14:51:52 +03004499 /* use cached edid if we have one */
4500 if (intel_connector->edid) {
Jani Nikula9cd300e2012-10-19 14:51:52 +03004501 /* invalid edid */
4502 if (IS_ERR(intel_connector->edid))
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004503 return NULL;
4504
Jani Nikula55e9ede2013-10-01 10:38:54 +03004505 return drm_edid_duplicate(intel_connector->edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004506 } else
4507 return drm_get_edid(&intel_connector->base,
4508 &intel_dp->aux.ddc);
Keith Packard8c241fe2011-09-28 16:38:44 -07004509}
4510
Chris Wilsonbeb60602014-09-02 20:04:00 +01004511static void
4512intel_dp_set_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004513{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004514 struct intel_connector *intel_connector = intel_dp->attached_connector;
4515 struct edid *edid;
Keith Packard8c241fe2011-09-28 16:38:44 -07004516
Chris Wilsonbeb60602014-09-02 20:04:00 +01004517 edid = intel_dp_get_edid(intel_dp);
4518 intel_connector->detect_edid = edid;
Jani Nikula9cd300e2012-10-19 14:51:52 +03004519
Chris Wilsonbeb60602014-09-02 20:04:00 +01004520 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4521 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4522 else
4523 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4524}
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004525
Chris Wilsonbeb60602014-09-02 20:04:00 +01004526static void
4527intel_dp_unset_edid(struct intel_dp *intel_dp)
4528{
4529 struct intel_connector *intel_connector = intel_dp->attached_connector;
4530
4531 kfree(intel_connector->detect_edid);
4532 intel_connector->detect_edid = NULL;
4533
4534 intel_dp->has_audio = false;
4535}
4536
4537static enum intel_display_power_domain
4538intel_dp_power_get(struct intel_dp *dp)
4539{
4540 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4541 enum intel_display_power_domain power_domain;
4542
4543 power_domain = intel_display_port_power_domain(encoder);
4544 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4545
4546 return power_domain;
4547}
4548
4549static void
4550intel_dp_power_put(struct intel_dp *dp,
4551 enum intel_display_power_domain power_domain)
4552{
4553 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4554 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
Keith Packard8c241fe2011-09-28 16:38:44 -07004555}
4556
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004557static enum drm_connector_status
4558intel_dp_detect(struct drm_connector *connector, bool force)
4559{
4560 struct intel_dp *intel_dp = intel_attached_dp(connector);
Paulo Zanonid63885d2012-10-26 19:05:49 -02004561 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4562 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004563 struct drm_device *dev = connector->dev;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004564 enum drm_connector_status status;
Imre Deak671dedd2014-03-05 16:20:53 +02004565 enum intel_display_power_domain power_domain;
Dave Airlie0e32b392014-05-02 14:02:48 +10004566 bool ret;
Todd Previte09b1eb12015-04-20 15:27:34 -07004567 u8 sink_irq_vector;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004568
Chris Wilson164c8592013-07-20 20:27:08 +01004569 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03004570 connector->base.id, connector->name);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004571 intel_dp_unset_edid(intel_dp);
Chris Wilson164c8592013-07-20 20:27:08 +01004572
Dave Airlie0e32b392014-05-02 14:02:48 +10004573 if (intel_dp->is_mst) {
4574 /* MST devices are disconnected from a monitor POV */
4575 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4576 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004577 return connector_status_disconnected;
Dave Airlie0e32b392014-05-02 14:02:48 +10004578 }
4579
Chris Wilsonbeb60602014-09-02 20:04:00 +01004580 power_domain = intel_dp_power_get(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004581
Chris Wilsond410b562014-09-02 20:03:59 +01004582 /* Can't disconnect eDP, but you can close the lid... */
4583 if (is_edp(intel_dp))
4584 status = edp_detect(intel_dp);
4585 else if (HAS_PCH_SPLIT(dev))
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004586 status = ironlake_dp_detect(intel_dp);
4587 else
4588 status = g4x_dp_detect(intel_dp);
4589 if (status != connector_status_connected)
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004590 goto out;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004591
Adam Jackson0d198322012-05-14 16:05:47 -04004592 intel_dp_probe_oui(intel_dp);
4593
Dave Airlie0e32b392014-05-02 14:02:48 +10004594 ret = intel_dp_probe_mst(intel_dp);
4595 if (ret) {
4596 /* if we are in MST mode then this connector
4597 won't appear connected or have anything with EDID on it */
4598 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4599 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4600 status = connector_status_disconnected;
4601 goto out;
4602 }
4603
Chris Wilsonbeb60602014-09-02 20:04:00 +01004604 intel_dp_set_edid(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004605
Paulo Zanonid63885d2012-10-26 19:05:49 -02004606 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4607 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004608 status = connector_status_connected;
4609
Todd Previte09b1eb12015-04-20 15:27:34 -07004610 /* Try to read the source of the interrupt */
4611 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4612 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4613 /* Clear interrupt source */
4614 drm_dp_dpcd_writeb(&intel_dp->aux,
4615 DP_DEVICE_SERVICE_IRQ_VECTOR,
4616 sink_irq_vector);
4617
4618 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4619 intel_dp_handle_test_request(intel_dp);
4620 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4621 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4622 }
4623
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004624out:
Chris Wilsonbeb60602014-09-02 20:04:00 +01004625 intel_dp_power_put(intel_dp, power_domain);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004626 return status;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004627}
4628
Chris Wilsonbeb60602014-09-02 20:04:00 +01004629static void
4630intel_dp_force(struct drm_connector *connector)
4631{
4632 struct intel_dp *intel_dp = intel_attached_dp(connector);
4633 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4634 enum intel_display_power_domain power_domain;
4635
4636 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4637 connector->base.id, connector->name);
4638 intel_dp_unset_edid(intel_dp);
4639
4640 if (connector->status != connector_status_connected)
4641 return;
4642
4643 power_domain = intel_dp_power_get(intel_dp);
4644
4645 intel_dp_set_edid(intel_dp);
4646
4647 intel_dp_power_put(intel_dp, power_domain);
4648
4649 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4650 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4651}
4652
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004653static int intel_dp_get_modes(struct drm_connector *connector)
4654{
Jani Nikuladd06f902012-10-19 14:51:50 +03004655 struct intel_connector *intel_connector = to_intel_connector(connector);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004656 struct edid *edid;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004657
Chris Wilsonbeb60602014-09-02 20:04:00 +01004658 edid = intel_connector->detect_edid;
4659 if (edid) {
4660 int ret = intel_connector_update_modes(connector, edid);
4661 if (ret)
4662 return ret;
4663 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004664
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004665 /* if eDP has no EDID, fall back to fixed mode */
Chris Wilsonbeb60602014-09-02 20:04:00 +01004666 if (is_edp(intel_attached_dp(connector)) &&
4667 intel_connector->panel.fixed_mode) {
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004668 struct drm_display_mode *mode;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004669
4670 mode = drm_mode_duplicate(connector->dev,
Jani Nikuladd06f902012-10-19 14:51:50 +03004671 intel_connector->panel.fixed_mode);
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004672 if (mode) {
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004673 drm_mode_probed_add(connector, mode);
4674 return 1;
4675 }
4676 }
Chris Wilsonbeb60602014-09-02 20:04:00 +01004677
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004678 return 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004679}
4680
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004681static bool
4682intel_dp_detect_audio(struct drm_connector *connector)
4683{
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004684 bool has_audio = false;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004685 struct edid *edid;
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004686
Chris Wilsonbeb60602014-09-02 20:04:00 +01004687 edid = to_intel_connector(connector)->detect_edid;
4688 if (edid)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004689 has_audio = drm_detect_monitor_audio(edid);
Imre Deak671dedd2014-03-05 16:20:53 +02004690
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004691 return has_audio;
4692}
4693
Chris Wilsonf6849602010-09-19 09:29:33 +01004694static int
4695intel_dp_set_property(struct drm_connector *connector,
4696 struct drm_property *property,
4697 uint64_t val)
4698{
Chris Wilsone953fd72011-02-21 22:23:52 +00004699 struct drm_i915_private *dev_priv = connector->dev->dev_private;
Yuly Novikov53b41832012-10-26 12:04:00 +03004700 struct intel_connector *intel_connector = to_intel_connector(connector);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004701 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4702 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonf6849602010-09-19 09:29:33 +01004703 int ret;
4704
Rob Clark662595d2012-10-11 20:36:04 -05004705 ret = drm_object_property_set_value(&connector->base, property, val);
Chris Wilsonf6849602010-09-19 09:29:33 +01004706 if (ret)
4707 return ret;
4708
Chris Wilson3f43c482011-05-12 22:17:24 +01004709 if (property == dev_priv->force_audio_property) {
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004710 int i = val;
4711 bool has_audio;
4712
4713 if (i == intel_dp->force_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004714 return 0;
4715
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004716 intel_dp->force_audio = i;
Chris Wilsonf6849602010-09-19 09:29:33 +01004717
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004718 if (i == HDMI_AUDIO_AUTO)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004719 has_audio = intel_dp_detect_audio(connector);
4720 else
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004721 has_audio = (i == HDMI_AUDIO_ON);
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004722
4723 if (has_audio == intel_dp->has_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004724 return 0;
4725
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004726 intel_dp->has_audio = has_audio;
Chris Wilsonf6849602010-09-19 09:29:33 +01004727 goto done;
4728 }
4729
Chris Wilsone953fd72011-02-21 22:23:52 +00004730 if (property == dev_priv->broadcast_rgb_property) {
Daniel Vetterae4edb82013-04-22 17:07:23 +02004731 bool old_auto = intel_dp->color_range_auto;
4732 uint32_t old_range = intel_dp->color_range;
4733
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004734 switch (val) {
4735 case INTEL_BROADCAST_RGB_AUTO:
4736 intel_dp->color_range_auto = true;
4737 break;
4738 case INTEL_BROADCAST_RGB_FULL:
4739 intel_dp->color_range_auto = false;
4740 intel_dp->color_range = 0;
4741 break;
4742 case INTEL_BROADCAST_RGB_LIMITED:
4743 intel_dp->color_range_auto = false;
4744 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4745 break;
4746 default:
4747 return -EINVAL;
4748 }
Daniel Vetterae4edb82013-04-22 17:07:23 +02004749
4750 if (old_auto == intel_dp->color_range_auto &&
4751 old_range == intel_dp->color_range)
4752 return 0;
4753
Chris Wilsone953fd72011-02-21 22:23:52 +00004754 goto done;
4755 }
4756
Yuly Novikov53b41832012-10-26 12:04:00 +03004757 if (is_edp(intel_dp) &&
4758 property == connector->dev->mode_config.scaling_mode_property) {
4759 if (val == DRM_MODE_SCALE_NONE) {
4760 DRM_DEBUG_KMS("no scaling not supported\n");
4761 return -EINVAL;
4762 }
4763
4764 if (intel_connector->panel.fitting_mode == val) {
4765 /* the eDP scaling property is not changed */
4766 return 0;
4767 }
4768 intel_connector->panel.fitting_mode = val;
4769
4770 goto done;
4771 }
4772
Chris Wilsonf6849602010-09-19 09:29:33 +01004773 return -EINVAL;
4774
4775done:
Chris Wilsonc0c36b942012-12-19 16:08:43 +00004776 if (intel_encoder->base.crtc)
4777 intel_crtc_restore_mode(intel_encoder->base.crtc);
Chris Wilsonf6849602010-09-19 09:29:33 +01004778
4779 return 0;
4780}
4781
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004782static void
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004783intel_dp_connector_destroy(struct drm_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004784{
Jani Nikula1d508702012-10-19 14:51:49 +03004785 struct intel_connector *intel_connector = to_intel_connector(connector);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004786
Chris Wilson10e972d2014-09-04 21:43:45 +01004787 kfree(intel_connector->detect_edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004788
Jani Nikula9cd300e2012-10-19 14:51:52 +03004789 if (!IS_ERR_OR_NULL(intel_connector->edid))
4790 kfree(intel_connector->edid);
4791
Paulo Zanoniacd8db102013-06-12 17:27:23 -03004792 /* Can't call is_edp() since the encoder may have been destroyed
4793 * already. */
4794 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
Jani Nikula1d508702012-10-19 14:51:49 +03004795 intel_panel_fini(&intel_connector->panel);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004796
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004797 drm_connector_cleanup(connector);
Zhenyu Wang55f78c42010-03-29 16:13:57 +08004798 kfree(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004799}
4800
Paulo Zanoni00c09d72012-10-26 19:05:52 -02004801void intel_dp_encoder_destroy(struct drm_encoder *encoder)
Daniel Vetter24d05922010-08-20 18:08:28 +02004802{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004803 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4804 struct intel_dp *intel_dp = &intel_dig_port->dp;
Daniel Vetter24d05922010-08-20 18:08:28 +02004805
Dave Airlie4f71d0c2014-06-04 16:02:28 +10004806 drm_dp_aux_unregister(&intel_dp->aux);
Dave Airlie0e32b392014-05-02 14:02:48 +10004807 intel_dp_mst_encoder_cleanup(intel_dig_port);
Keith Packardbd943152011-09-18 23:09:52 -07004808 if (is_edp(intel_dp)) {
4809 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03004810 /*
4811 * vdd might still be enabled do to the delayed vdd off.
4812 * Make sure vdd is actually turned off here.
4813 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03004814 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01004815 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004816 pps_unlock(intel_dp);
4817
Clint Taylor01527b32014-07-07 13:01:46 -07004818 if (intel_dp->edp_notifier.notifier_call) {
4819 unregister_reboot_notifier(&intel_dp->edp_notifier);
4820 intel_dp->edp_notifier.notifier_call = NULL;
4821 }
Keith Packardbd943152011-09-18 23:09:52 -07004822 }
Imre Deakc8bd0e42014-12-12 17:57:38 +02004823 drm_encoder_cleanup(encoder);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004824 kfree(intel_dig_port);
Daniel Vetter24d05922010-08-20 18:08:28 +02004825}
4826
Imre Deak07f9cd02014-08-18 14:42:45 +03004827static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4828{
4829 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4830
4831 if (!is_edp(intel_dp))
4832 return;
4833
Ville Syrjälä951468f2014-09-04 14:55:31 +03004834 /*
4835 * vdd might still be enabled do to the delayed vdd off.
4836 * Make sure vdd is actually turned off here.
4837 */
Ville Syrjäläafa4e532014-11-25 15:43:48 +02004838 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004839 pps_lock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004840 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004841 pps_unlock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004842}
4843
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004844static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4845{
4846 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4847 struct drm_device *dev = intel_dig_port->base.base.dev;
4848 struct drm_i915_private *dev_priv = dev->dev_private;
4849 enum intel_display_power_domain power_domain;
4850
4851 lockdep_assert_held(&dev_priv->pps_mutex);
4852
4853 if (!edp_have_panel_vdd(intel_dp))
4854 return;
4855
4856 /*
4857 * The VDD bit needs a power domain reference, so if the bit is
4858 * already enabled when we boot or resume, grab this reference and
4859 * schedule a vdd off, so we don't hold on to the reference
4860 * indefinitely.
4861 */
4862 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4863 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4864 intel_display_power_get(dev_priv, power_domain);
4865
4866 edp_panel_vdd_schedule_off(intel_dp);
4867}
4868
Imre Deak6d93c0c2014-07-31 14:03:36 +03004869static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4870{
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004871 struct intel_dp *intel_dp;
4872
4873 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4874 return;
4875
4876 intel_dp = enc_to_intel_dp(encoder);
4877
4878 pps_lock(intel_dp);
4879
4880 /*
4881 * Read out the current power sequencer assignment,
4882 * in case the BIOS did something with it.
4883 */
4884 if (IS_VALLEYVIEW(encoder->dev))
4885 vlv_initial_power_sequencer_setup(intel_dp);
4886
4887 intel_edp_panel_vdd_sanitize(intel_dp);
4888
4889 pps_unlock(intel_dp);
Imre Deak6d93c0c2014-07-31 14:03:36 +03004890}
4891
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004892static const struct drm_connector_funcs intel_dp_connector_funcs = {
Maarten Lankhorst4d688a22015-08-05 12:37:06 +02004893 .dpms = drm_atomic_helper_connector_dpms,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004894 .detect = intel_dp_detect,
Chris Wilsonbeb60602014-09-02 20:04:00 +01004895 .force = intel_dp_force,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004896 .fill_modes = drm_helper_probe_single_connector_modes,
Chris Wilsonf6849602010-09-19 09:29:33 +01004897 .set_property = intel_dp_set_property,
Matt Roper2545e4a2015-01-22 16:51:27 -08004898 .atomic_get_property = intel_connector_atomic_get_property,
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004899 .destroy = intel_dp_connector_destroy,
Matt Roperc6f95f22015-01-22 16:50:32 -08004900 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
Ander Conselvan de Oliveira98969722015-03-20 16:18:06 +02004901 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004902};
4903
4904static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4905 .get_modes = intel_dp_get_modes,
4906 .mode_valid = intel_dp_mode_valid,
Chris Wilsondf0e9242010-09-09 16:20:55 +01004907 .best_encoder = intel_best_encoder,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004908};
4909
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004910static const struct drm_encoder_funcs intel_dp_enc_funcs = {
Imre Deak6d93c0c2014-07-31 14:03:36 +03004911 .reset = intel_dp_encoder_reset,
Daniel Vetter24d05922010-08-20 18:08:28 +02004912 .destroy = intel_dp_encoder_destroy,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004913};
4914
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004915enum irqreturn
Dave Airlie13cf5502014-06-18 11:29:35 +10004916intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4917{
4918 struct intel_dp *intel_dp = &intel_dig_port->dp;
Imre Deak1c767b32014-08-18 14:42:42 +03004919 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Dave Airlie0e32b392014-05-02 14:02:48 +10004920 struct drm_device *dev = intel_dig_port->base.base.dev;
4921 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak1c767b32014-08-18 14:42:42 +03004922 enum intel_display_power_domain power_domain;
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004923 enum irqreturn ret = IRQ_NONE;
Imre Deak1c767b32014-08-18 14:42:42 +03004924
Dave Airlie0e32b392014-05-02 14:02:48 +10004925 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4926 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
Dave Airlie13cf5502014-06-18 11:29:35 +10004927
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03004928 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4929 /*
4930 * vdd off can generate a long pulse on eDP which
4931 * would require vdd on to handle it, and thus we
4932 * would end up in an endless cycle of
4933 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4934 */
4935 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4936 port_name(intel_dig_port->port));
Ville Syrjäläa8b3d522015-02-10 14:11:46 +02004937 return IRQ_HANDLED;
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03004938 }
4939
Ville Syrjälä26fbb772014-08-11 18:37:37 +03004940 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4941 port_name(intel_dig_port->port),
Dave Airlie0e32b392014-05-02 14:02:48 +10004942 long_hpd ? "long" : "short");
Dave Airlie13cf5502014-06-18 11:29:35 +10004943
Imre Deak1c767b32014-08-18 14:42:42 +03004944 power_domain = intel_display_port_power_domain(intel_encoder);
4945 intel_display_power_get(dev_priv, power_domain);
4946
Dave Airlie0e32b392014-05-02 14:02:48 +10004947 if (long_hpd) {
Mika Kahola5fa836a2015-04-29 09:17:40 +03004948 /* indicate that we need to restart link training */
4949 intel_dp->train_set_valid = false;
Dave Airlie2a592be2014-09-01 16:58:12 +10004950
4951 if (HAS_PCH_SPLIT(dev)) {
4952 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4953 goto mst_fail;
4954 } else {
4955 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4956 goto mst_fail;
4957 }
Dave Airlie0e32b392014-05-02 14:02:48 +10004958
4959 if (!intel_dp_get_dpcd(intel_dp)) {
4960 goto mst_fail;
4961 }
4962
4963 intel_dp_probe_oui(intel_dp);
4964
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03004965 if (!intel_dp_probe_mst(intel_dp)) {
4966 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4967 intel_dp_check_link_status(intel_dp);
4968 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10004969 goto mst_fail;
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03004970 }
Dave Airlie0e32b392014-05-02 14:02:48 +10004971 } else {
4972 if (intel_dp->is_mst) {
Imre Deak1c767b32014-08-18 14:42:42 +03004973 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
Dave Airlie0e32b392014-05-02 14:02:48 +10004974 goto mst_fail;
4975 }
4976
4977 if (!intel_dp->is_mst) {
Dave Airlie5b215bc2014-08-05 10:40:20 +10004978 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
Dave Airlie0e32b392014-05-02 14:02:48 +10004979 intel_dp_check_link_status(intel_dp);
Dave Airlie5b215bc2014-08-05 10:40:20 +10004980 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10004981 }
4982 }
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004983
4984 ret = IRQ_HANDLED;
4985
Imre Deak1c767b32014-08-18 14:42:42 +03004986 goto put_power;
Dave Airlie0e32b392014-05-02 14:02:48 +10004987mst_fail:
4988 /* if we were in MST mode, and device is not there get out of MST mode */
4989 if (intel_dp->is_mst) {
4990 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4991 intel_dp->is_mst = false;
4992 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4993 }
Imre Deak1c767b32014-08-18 14:42:42 +03004994put_power:
4995 intel_display_power_put(dev_priv, power_domain);
4996
4997 return ret;
Dave Airlie13cf5502014-06-18 11:29:35 +10004998}
4999
Zhenyu Wange3421a12010-04-08 09:43:27 +08005000/* Return which DP Port should be selected for Transcoder DP control */
5001int
Akshay Joshi0206e352011-08-16 15:34:10 -04005002intel_trans_dp_port_sel(struct drm_crtc *crtc)
Zhenyu Wange3421a12010-04-08 09:43:27 +08005003{
5004 struct drm_device *dev = crtc->dev;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005005 struct intel_encoder *intel_encoder;
5006 struct intel_dp *intel_dp;
Zhenyu Wange3421a12010-04-08 09:43:27 +08005007
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005008 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5009 intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonea5b2132010-08-04 13:50:23 +01005010
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005011 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5012 intel_encoder->type == INTEL_OUTPUT_EDP)
Chris Wilsonea5b2132010-08-04 13:50:23 +01005013 return intel_dp->output_reg;
Zhenyu Wange3421a12010-04-08 09:43:27 +08005014 }
Chris Wilsonea5b2132010-08-04 13:50:23 +01005015
Zhenyu Wange3421a12010-04-08 09:43:27 +08005016 return -1;
5017}
5018
Rodrigo Vivi477ec322015-08-06 15:51:39 +08005019/* check the VBT to see whether the eDP is on another port */
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005020bool intel_dp_is_edp(struct drm_device *dev, enum port port)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005021{
5022 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni768f69c2013-09-11 18:02:47 -03005023 union child_device_config *p_child;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005024 int i;
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005025 static const short port_mapping[] = {
Rodrigo Vivi477ec322015-08-06 15:51:39 +08005026 [PORT_B] = DVO_PORT_DPB,
5027 [PORT_C] = DVO_PORT_DPC,
5028 [PORT_D] = DVO_PORT_DPD,
5029 [PORT_E] = DVO_PORT_DPE,
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005030 };
Zhao Yakui36e83a12010-06-12 14:32:21 +08005031
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005032 if (port == PORT_A)
5033 return true;
5034
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005035 if (!dev_priv->vbt.child_dev_num)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005036 return false;
5037
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005038 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5039 p_child = dev_priv->vbt.child_dev + i;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005040
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005041 if (p_child->common.dvo_port == port_mapping[port] &&
Ville Syrjäläf02586d2013-11-01 20:32:08 +02005042 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5043 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
Zhao Yakui36e83a12010-06-12 14:32:21 +08005044 return true;
5045 }
5046 return false;
5047}
5048
Dave Airlie0e32b392014-05-02 14:02:48 +10005049void
Chris Wilsonf6849602010-09-19 09:29:33 +01005050intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5051{
Yuly Novikov53b41832012-10-26 12:04:00 +03005052 struct intel_connector *intel_connector = to_intel_connector(connector);
5053
Chris Wilson3f43c482011-05-12 22:17:24 +01005054 intel_attach_force_audio_property(connector);
Chris Wilsone953fd72011-02-21 22:23:52 +00005055 intel_attach_broadcast_rgb_property(connector);
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02005056 intel_dp->color_range_auto = true;
Yuly Novikov53b41832012-10-26 12:04:00 +03005057
5058 if (is_edp(intel_dp)) {
5059 drm_mode_create_scaling_mode_property(connector->dev);
Rob Clark6de6d842012-10-11 20:36:04 -05005060 drm_object_attach_property(
5061 &connector->base,
Yuly Novikov53b41832012-10-26 12:04:00 +03005062 connector->dev->mode_config.scaling_mode_property,
Yuly Novikov8e740cd2012-10-26 12:04:01 +03005063 DRM_MODE_SCALE_ASPECT);
5064 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
Yuly Novikov53b41832012-10-26 12:04:00 +03005065 }
Chris Wilsonf6849602010-09-19 09:29:33 +01005066}
5067
Imre Deakdada1a92014-01-29 13:25:41 +02005068static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5069{
5070 intel_dp->last_power_cycle = jiffies;
5071 intel_dp->last_power_on = jiffies;
5072 intel_dp->last_backlight_off = jiffies;
5073}
5074
Daniel Vetter67a54562012-10-20 20:57:45 +02005075static void
5076intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005077 struct intel_dp *intel_dp)
Daniel Vetter67a54562012-10-20 20:57:45 +02005078{
5079 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005080 struct edp_power_seq cur, vbt, spec,
5081 *final = &intel_dp->pps_delays;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305082 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5083 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
Jesse Barnes453c5422013-03-28 09:55:41 -07005084
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005085 lockdep_assert_held(&dev_priv->pps_mutex);
5086
Ville Syrjälä81ddbc62014-10-16 21:27:31 +03005087 /* already initialized? */
5088 if (final->t11_t12 != 0)
5089 return;
5090
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305091 if (IS_BROXTON(dev)) {
5092 /*
5093 * TODO: BXT has 2 sets of PPS registers.
5094 * Correct Register for Broxton need to be identified
5095 * using VBT. hardcoding for now
5096 */
5097 pp_ctrl_reg = BXT_PP_CONTROL(0);
5098 pp_on_reg = BXT_PP_ON_DELAYS(0);
5099 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5100 } else if (HAS_PCH_SPLIT(dev)) {
Jani Nikulabf13e812013-09-06 07:40:05 +03005101 pp_ctrl_reg = PCH_PP_CONTROL;
Jesse Barnes453c5422013-03-28 09:55:41 -07005102 pp_on_reg = PCH_PP_ON_DELAYS;
5103 pp_off_reg = PCH_PP_OFF_DELAYS;
5104 pp_div_reg = PCH_PP_DIVISOR;
5105 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005106 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5107
5108 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5109 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5110 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5111 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005112 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005113
5114 /* Workaround: Need to write PP_CONTROL with the unlock key as
5115 * the very first thing. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305116 pp_ctl = ironlake_get_pp_control(intel_dp);
Daniel Vetter67a54562012-10-20 20:57:45 +02005117
Jesse Barnes453c5422013-03-28 09:55:41 -07005118 pp_on = I915_READ(pp_on_reg);
5119 pp_off = I915_READ(pp_off_reg);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305120 if (!IS_BROXTON(dev)) {
5121 I915_WRITE(pp_ctrl_reg, pp_ctl);
5122 pp_div = I915_READ(pp_div_reg);
5123 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005124
5125 /* Pull timing values out of registers */
5126 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5127 PANEL_POWER_UP_DELAY_SHIFT;
5128
5129 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5130 PANEL_LIGHT_ON_DELAY_SHIFT;
5131
5132 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5133 PANEL_LIGHT_OFF_DELAY_SHIFT;
5134
5135 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5136 PANEL_POWER_DOWN_DELAY_SHIFT;
5137
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305138 if (IS_BROXTON(dev)) {
5139 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5140 BXT_POWER_CYCLE_DELAY_SHIFT;
5141 if (tmp > 0)
5142 cur.t11_t12 = (tmp - 1) * 1000;
5143 else
5144 cur.t11_t12 = 0;
5145 } else {
5146 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
Daniel Vetter67a54562012-10-20 20:57:45 +02005147 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305148 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005149
5150 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5151 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5152
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005153 vbt = dev_priv->vbt.edp_pps;
Daniel Vetter67a54562012-10-20 20:57:45 +02005154
5155 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5156 * our hw here, which are all in 100usec. */
5157 spec.t1_t3 = 210 * 10;
5158 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5159 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5160 spec.t10 = 500 * 10;
5161 /* This one is special and actually in units of 100ms, but zero
5162 * based in the hw (so we need to add 100 ms). But the sw vbt
5163 * table multiplies it with 1000 to make it in units of 100usec,
5164 * too. */
5165 spec.t11_t12 = (510 + 100) * 10;
5166
5167 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5168 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5169
5170 /* Use the max of the register settings and vbt. If both are
5171 * unset, fall back to the spec limits. */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005172#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
Daniel Vetter67a54562012-10-20 20:57:45 +02005173 spec.field : \
5174 max(cur.field, vbt.field))
5175 assign_final(t1_t3);
5176 assign_final(t8);
5177 assign_final(t9);
5178 assign_final(t10);
5179 assign_final(t11_t12);
5180#undef assign_final
5181
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005182#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
Daniel Vetter67a54562012-10-20 20:57:45 +02005183 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5184 intel_dp->backlight_on_delay = get_delay(t8);
5185 intel_dp->backlight_off_delay = get_delay(t9);
5186 intel_dp->panel_power_down_delay = get_delay(t10);
5187 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5188#undef get_delay
5189
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005190 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5191 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5192 intel_dp->panel_power_cycle_delay);
5193
5194 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5195 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005196}
5197
5198static void
5199intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005200 struct intel_dp *intel_dp)
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005201{
5202 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07005203 u32 pp_on, pp_off, pp_div, port_sel = 0;
5204 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305205 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
Ville Syrjäläad933b52014-08-18 22:15:56 +03005206 enum port port = dp_to_dig_port(intel_dp)->port;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005207 const struct edp_power_seq *seq = &intel_dp->pps_delays;
Jesse Barnes453c5422013-03-28 09:55:41 -07005208
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005209 lockdep_assert_held(&dev_priv->pps_mutex);
Jesse Barnes453c5422013-03-28 09:55:41 -07005210
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305211 if (IS_BROXTON(dev)) {
5212 /*
5213 * TODO: BXT has 2 sets of PPS registers.
5214 * Correct Register for Broxton need to be identified
5215 * using VBT. hardcoding for now
5216 */
5217 pp_ctrl_reg = BXT_PP_CONTROL(0);
5218 pp_on_reg = BXT_PP_ON_DELAYS(0);
5219 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5220
5221 } else if (HAS_PCH_SPLIT(dev)) {
Jesse Barnes453c5422013-03-28 09:55:41 -07005222 pp_on_reg = PCH_PP_ON_DELAYS;
5223 pp_off_reg = PCH_PP_OFF_DELAYS;
5224 pp_div_reg = PCH_PP_DIVISOR;
5225 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005226 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5227
5228 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5229 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5230 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005231 }
5232
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005233 /*
5234 * And finally store the new values in the power sequencer. The
5235 * backlight delays are set to 1 because we do manual waits on them. For
5236 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5237 * we'll end up waiting for the backlight off delay twice: once when we
5238 * do the manual sleep, and once when we disable the panel and wait for
5239 * the PP_STATUS bit to become zero.
5240 */
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005241 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005242 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5243 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005244 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
Daniel Vetter67a54562012-10-20 20:57:45 +02005245 /* Compute the divisor for the pp clock, simply match the Bspec
5246 * formula. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305247 if (IS_BROXTON(dev)) {
5248 pp_div = I915_READ(pp_ctrl_reg);
5249 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5250 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5251 << BXT_POWER_CYCLE_DELAY_SHIFT);
5252 } else {
5253 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5254 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5255 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5256 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005257
5258 /* Haswell doesn't have any port selection bits for the panel
5259 * power sequencer any more. */
Imre Deakbc7d38a2013-05-16 14:40:36 +03005260 if (IS_VALLEYVIEW(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005261 port_sel = PANEL_PORT_SELECT_VLV(port);
Imre Deakbc7d38a2013-05-16 14:40:36 +03005262 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005263 if (port == PORT_A)
Jani Nikulaa24c1442013-09-05 16:44:46 +03005264 port_sel = PANEL_PORT_SELECT_DPA;
Daniel Vetter67a54562012-10-20 20:57:45 +02005265 else
Jani Nikulaa24c1442013-09-05 16:44:46 +03005266 port_sel = PANEL_PORT_SELECT_DPD;
Daniel Vetter67a54562012-10-20 20:57:45 +02005267 }
5268
Jesse Barnes453c5422013-03-28 09:55:41 -07005269 pp_on |= port_sel;
5270
5271 I915_WRITE(pp_on_reg, pp_on);
5272 I915_WRITE(pp_off_reg, pp_off);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305273 if (IS_BROXTON(dev))
5274 I915_WRITE(pp_ctrl_reg, pp_div);
5275 else
5276 I915_WRITE(pp_div_reg, pp_div);
Daniel Vetter67a54562012-10-20 20:57:45 +02005277
Daniel Vetter67a54562012-10-20 20:57:45 +02005278 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07005279 I915_READ(pp_on_reg),
5280 I915_READ(pp_off_reg),
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305281 IS_BROXTON(dev) ?
5282 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
Jesse Barnes453c5422013-03-28 09:55:41 -07005283 I915_READ(pp_div_reg));
Keith Packardc8110e52009-05-06 11:51:10 -07005284}
5285
Vandana Kannanb33a2812015-02-13 15:33:03 +05305286/**
5287 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5288 * @dev: DRM device
5289 * @refresh_rate: RR to be programmed
5290 *
5291 * This function gets called when refresh rate (RR) has to be changed from
5292 * one frequency to another. Switches can be between high and low RR
5293 * supported by the panel or to any other RR based on media playback (in
5294 * this case, RR value needs to be passed from user space).
5295 *
5296 * The caller of this function needs to take a lock on dev_priv->drrs.
5297 */
Vandana Kannan96178ee2015-01-10 02:25:56 +05305298static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305299{
5300 struct drm_i915_private *dev_priv = dev->dev_private;
5301 struct intel_encoder *encoder;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305302 struct intel_digital_port *dig_port = NULL;
5303 struct intel_dp *intel_dp = dev_priv->drrs.dp;
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02005304 struct intel_crtc_state *config = NULL;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305305 struct intel_crtc *intel_crtc = NULL;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305306 u32 reg, val;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305307 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305308
5309 if (refresh_rate <= 0) {
5310 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5311 return;
5312 }
5313
Vandana Kannan96178ee2015-01-10 02:25:56 +05305314 if (intel_dp == NULL) {
5315 DRM_DEBUG_KMS("DRRS not supported.\n");
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305316 return;
5317 }
5318
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005319 /*
Rodrigo Vivie4d59f62014-11-20 02:22:08 -08005320 * FIXME: This needs proper synchronization with psr state for some
5321 * platforms that cannot have PSR and DRRS enabled at the same time.
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005322 */
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305323
Vandana Kannan96178ee2015-01-10 02:25:56 +05305324 dig_port = dp_to_dig_port(intel_dp);
5325 encoder = &dig_port->base;
Ander Conselvan de Oliveira723f9aa2015-03-20 16:18:18 +02005326 intel_crtc = to_intel_crtc(encoder->base.crtc);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305327
5328 if (!intel_crtc) {
5329 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5330 return;
5331 }
5332
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005333 config = intel_crtc->config;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305334
Vandana Kannan96178ee2015-01-10 02:25:56 +05305335 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305336 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5337 return;
5338 }
5339
Vandana Kannan96178ee2015-01-10 02:25:56 +05305340 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5341 refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305342 index = DRRS_LOW_RR;
5343
Vandana Kannan96178ee2015-01-10 02:25:56 +05305344 if (index == dev_priv->drrs.refresh_rate_type) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305345 DRM_DEBUG_KMS(
5346 "DRRS requested for previously set RR...ignoring\n");
5347 return;
5348 }
5349
5350 if (!intel_crtc->active) {
5351 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5352 return;
5353 }
5354
Durgadoss R44395bf2015-02-13 15:33:02 +05305355 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
Vandana Kannana4c30b12015-02-13 15:33:00 +05305356 switch (index) {
5357 case DRRS_HIGH_RR:
5358 intel_dp_set_m_n(intel_crtc, M1_N1);
5359 break;
5360 case DRRS_LOW_RR:
5361 intel_dp_set_m_n(intel_crtc, M2_N2);
5362 break;
5363 case DRRS_MAX_RR:
5364 default:
5365 DRM_ERROR("Unsupported refreshrate type\n");
5366 }
5367 } else if (INTEL_INFO(dev)->gen > 6) {
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005368 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305369 val = I915_READ(reg);
Vandana Kannana4c30b12015-02-13 15:33:00 +05305370
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305371 if (index > DRRS_HIGH_RR) {
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305372 if (IS_VALLEYVIEW(dev))
5373 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5374 else
5375 val |= PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305376 } else {
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305377 if (IS_VALLEYVIEW(dev))
5378 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5379 else
5380 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305381 }
5382 I915_WRITE(reg, val);
5383 }
5384
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305385 dev_priv->drrs.refresh_rate_type = index;
5386
5387 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5388}
5389
Vandana Kannanb33a2812015-02-13 15:33:03 +05305390/**
5391 * intel_edp_drrs_enable - init drrs struct if supported
5392 * @intel_dp: DP struct
5393 *
5394 * Initializes frontbuffer_bits and drrs.dp
5395 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305396void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5397{
5398 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5399 struct drm_i915_private *dev_priv = dev->dev_private;
5400 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5401 struct drm_crtc *crtc = dig_port->base.base.crtc;
5402 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5403
5404 if (!intel_crtc->config->has_drrs) {
5405 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5406 return;
5407 }
5408
5409 mutex_lock(&dev_priv->drrs.mutex);
5410 if (WARN_ON(dev_priv->drrs.dp)) {
5411 DRM_ERROR("DRRS already enabled\n");
5412 goto unlock;
5413 }
5414
5415 dev_priv->drrs.busy_frontbuffer_bits = 0;
5416
5417 dev_priv->drrs.dp = intel_dp;
5418
5419unlock:
5420 mutex_unlock(&dev_priv->drrs.mutex);
5421}
5422
Vandana Kannanb33a2812015-02-13 15:33:03 +05305423/**
5424 * intel_edp_drrs_disable - Disable DRRS
5425 * @intel_dp: DP struct
5426 *
5427 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305428void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5429{
5430 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5431 struct drm_i915_private *dev_priv = dev->dev_private;
5432 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5433 struct drm_crtc *crtc = dig_port->base.base.crtc;
5434 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5435
5436 if (!intel_crtc->config->has_drrs)
5437 return;
5438
5439 mutex_lock(&dev_priv->drrs.mutex);
5440 if (!dev_priv->drrs.dp) {
5441 mutex_unlock(&dev_priv->drrs.mutex);
5442 return;
5443 }
5444
5445 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5446 intel_dp_set_drrs_state(dev_priv->dev,
5447 intel_dp->attached_connector->panel.
5448 fixed_mode->vrefresh);
5449
5450 dev_priv->drrs.dp = NULL;
5451 mutex_unlock(&dev_priv->drrs.mutex);
5452
5453 cancel_delayed_work_sync(&dev_priv->drrs.work);
5454}
5455
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305456static void intel_edp_drrs_downclock_work(struct work_struct *work)
5457{
5458 struct drm_i915_private *dev_priv =
5459 container_of(work, typeof(*dev_priv), drrs.work.work);
5460 struct intel_dp *intel_dp;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305461
Vandana Kannan96178ee2015-01-10 02:25:56 +05305462 mutex_lock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305463
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305464 intel_dp = dev_priv->drrs.dp;
5465
5466 if (!intel_dp)
5467 goto unlock;
5468
5469 /*
5470 * The delayed work can race with an invalidate hence we need to
5471 * recheck.
5472 */
5473
5474 if (dev_priv->drrs.busy_frontbuffer_bits)
5475 goto unlock;
5476
5477 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5478 intel_dp_set_drrs_state(dev_priv->dev,
5479 intel_dp->attached_connector->panel.
5480 downclock_mode->vrefresh);
5481
5482unlock:
Vandana Kannan96178ee2015-01-10 02:25:56 +05305483 mutex_unlock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305484}
5485
Vandana Kannanb33a2812015-02-13 15:33:03 +05305486/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305487 * intel_edp_drrs_invalidate - Disable Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305488 * @dev: DRM device
5489 * @frontbuffer_bits: frontbuffer plane tracking bits
5490 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305491 * This function gets called everytime rendering on the given planes start.
5492 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
Vandana Kannanb33a2812015-02-13 15:33:03 +05305493 *
5494 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5495 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305496void intel_edp_drrs_invalidate(struct drm_device *dev,
5497 unsigned frontbuffer_bits)
5498{
5499 struct drm_i915_private *dev_priv = dev->dev_private;
5500 struct drm_crtc *crtc;
5501 enum pipe pipe;
5502
Daniel Vetter9da7d692015-04-09 16:44:15 +02005503 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305504 return;
5505
Daniel Vetter88f933a2015-04-09 16:44:16 +02005506 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305507
Vandana Kannana93fad02015-01-10 02:25:59 +05305508 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005509 if (!dev_priv->drrs.dp) {
5510 mutex_unlock(&dev_priv->drrs.mutex);
5511 return;
5512 }
5513
Vandana Kannana93fad02015-01-10 02:25:59 +05305514 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5515 pipe = to_intel_crtc(crtc)->pipe;
5516
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005517 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5518 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5519
Ramalingam C0ddfd202015-06-15 20:50:05 +05305520 /* invalidate means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005521 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Vandana Kannana93fad02015-01-10 02:25:59 +05305522 intel_dp_set_drrs_state(dev_priv->dev,
5523 dev_priv->drrs.dp->attached_connector->panel.
5524 fixed_mode->vrefresh);
Vandana Kannana93fad02015-01-10 02:25:59 +05305525
Vandana Kannana93fad02015-01-10 02:25:59 +05305526 mutex_unlock(&dev_priv->drrs.mutex);
5527}
5528
Vandana Kannanb33a2812015-02-13 15:33:03 +05305529/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305530 * intel_edp_drrs_flush - Restart Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305531 * @dev: DRM device
5532 * @frontbuffer_bits: frontbuffer plane tracking bits
5533 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305534 * This function gets called every time rendering on the given planes has
5535 * completed or flip on a crtc is completed. So DRRS should be upclocked
5536 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5537 * if no other planes are dirty.
Vandana Kannanb33a2812015-02-13 15:33:03 +05305538 *
5539 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5540 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305541void intel_edp_drrs_flush(struct drm_device *dev,
5542 unsigned frontbuffer_bits)
5543{
5544 struct drm_i915_private *dev_priv = dev->dev_private;
5545 struct drm_crtc *crtc;
5546 enum pipe pipe;
5547
Daniel Vetter9da7d692015-04-09 16:44:15 +02005548 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305549 return;
5550
Daniel Vetter88f933a2015-04-09 16:44:16 +02005551 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305552
Vandana Kannana93fad02015-01-10 02:25:59 +05305553 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005554 if (!dev_priv->drrs.dp) {
5555 mutex_unlock(&dev_priv->drrs.mutex);
5556 return;
5557 }
5558
Vandana Kannana93fad02015-01-10 02:25:59 +05305559 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5560 pipe = to_intel_crtc(crtc)->pipe;
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005561
5562 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Vandana Kannana93fad02015-01-10 02:25:59 +05305563 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5564
Ramalingam C0ddfd202015-06-15 20:50:05 +05305565 /* flush means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005566 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Ramalingam C0ddfd202015-06-15 20:50:05 +05305567 intel_dp_set_drrs_state(dev_priv->dev,
5568 dev_priv->drrs.dp->attached_connector->panel.
5569 fixed_mode->vrefresh);
5570
5571 /*
5572 * flush also means no more activity hence schedule downclock, if all
5573 * other fbs are quiescent too
5574 */
5575 if (!dev_priv->drrs.busy_frontbuffer_bits)
Vandana Kannana93fad02015-01-10 02:25:59 +05305576 schedule_delayed_work(&dev_priv->drrs.work,
5577 msecs_to_jiffies(1000));
5578 mutex_unlock(&dev_priv->drrs.mutex);
5579}
5580
Vandana Kannanb33a2812015-02-13 15:33:03 +05305581/**
5582 * DOC: Display Refresh Rate Switching (DRRS)
5583 *
5584 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5585 * which enables swtching between low and high refresh rates,
5586 * dynamically, based on the usage scenario. This feature is applicable
5587 * for internal panels.
5588 *
5589 * Indication that the panel supports DRRS is given by the panel EDID, which
5590 * would list multiple refresh rates for one resolution.
5591 *
5592 * DRRS is of 2 types - static and seamless.
5593 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5594 * (may appear as a blink on screen) and is used in dock-undock scenario.
5595 * Seamless DRRS involves changing RR without any visual effect to the user
5596 * and can be used during normal system usage. This is done by programming
5597 * certain registers.
5598 *
5599 * Support for static/seamless DRRS may be indicated in the VBT based on
5600 * inputs from the panel spec.
5601 *
5602 * DRRS saves power by switching to low RR based on usage scenarios.
5603 *
5604 * eDP DRRS:-
5605 * The implementation is based on frontbuffer tracking implementation.
5606 * When there is a disturbance on the screen triggered by user activity or a
5607 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5608 * When there is no movement on screen, after a timeout of 1 second, a switch
5609 * to low RR is made.
5610 * For integration with frontbuffer tracking code,
5611 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5612 *
5613 * DRRS can be further extended to support other internal panels and also
5614 * the scenario of video playback wherein RR is set based on the rate
5615 * requested by userspace.
5616 */
5617
5618/**
5619 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5620 * @intel_connector: eDP connector
5621 * @fixed_mode: preferred mode of panel
5622 *
5623 * This function is called only once at driver load to initialize basic
5624 * DRRS stuff.
5625 *
5626 * Returns:
5627 * Downclock mode if panel supports it, else return NULL.
5628 * DRRS support is determined by the presence of downclock mode (apart
5629 * from VBT setting).
5630 */
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305631static struct drm_display_mode *
Vandana Kannan96178ee2015-01-10 02:25:56 +05305632intel_dp_drrs_init(struct intel_connector *intel_connector,
5633 struct drm_display_mode *fixed_mode)
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305634{
5635 struct drm_connector *connector = &intel_connector->base;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305636 struct drm_device *dev = connector->dev;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305637 struct drm_i915_private *dev_priv = dev->dev_private;
5638 struct drm_display_mode *downclock_mode = NULL;
5639
Daniel Vetter9da7d692015-04-09 16:44:15 +02005640 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5641 mutex_init(&dev_priv->drrs.mutex);
5642
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305643 if (INTEL_INFO(dev)->gen <= 6) {
5644 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5645 return NULL;
5646 }
5647
5648 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005649 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305650 return NULL;
5651 }
5652
5653 downclock_mode = intel_find_panel_downclock
5654 (dev, fixed_mode, connector);
5655
5656 if (!downclock_mode) {
Ramalingam Ca1d26342015-02-23 17:38:33 +05305657 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305658 return NULL;
5659 }
5660
Vandana Kannan96178ee2015-01-10 02:25:56 +05305661 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305662
Vandana Kannan96178ee2015-01-10 02:25:56 +05305663 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005664 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305665 return downclock_mode;
5666}
5667
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005668static bool intel_edp_init_connector(struct intel_dp *intel_dp,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005669 struct intel_connector *intel_connector)
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005670{
5671 struct drm_connector *connector = &intel_connector->base;
5672 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005673 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5674 struct drm_device *dev = intel_encoder->base.dev;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005675 struct drm_i915_private *dev_priv = dev->dev_private;
5676 struct drm_display_mode *fixed_mode = NULL;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305677 struct drm_display_mode *downclock_mode = NULL;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005678 bool has_dpcd;
5679 struct drm_display_mode *scan;
5680 struct edid *edid;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005681 enum pipe pipe = INVALID_PIPE;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005682
5683 if (!is_edp(intel_dp))
5684 return true;
5685
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02005686 pps_lock(intel_dp);
5687 intel_edp_panel_vdd_sanitize(intel_dp);
5688 pps_unlock(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005689
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005690 /* Cache DPCD and EDID for edp. */
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005691 has_dpcd = intel_dp_get_dpcd(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005692
5693 if (has_dpcd) {
5694 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5695 dev_priv->no_aux_handshake =
5696 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5697 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5698 } else {
5699 /* if this fails, presume the device is a ghost */
5700 DRM_INFO("failed to retrieve link info, disabling eDP\n");
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005701 return false;
5702 }
5703
5704 /* We now know it's not a ghost, init power sequence regs. */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005705 pps_lock(intel_dp);
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005706 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005707 pps_unlock(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005708
Daniel Vetter060c8772014-03-21 23:22:35 +01005709 mutex_lock(&dev->mode_config.mutex);
Jani Nikula0b998362014-03-14 16:51:17 +02005710 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005711 if (edid) {
5712 if (drm_add_edid_modes(connector, edid)) {
5713 drm_mode_connector_update_edid_property(connector,
5714 edid);
5715 drm_edid_to_eld(connector, edid);
5716 } else {
5717 kfree(edid);
5718 edid = ERR_PTR(-EINVAL);
5719 }
5720 } else {
5721 edid = ERR_PTR(-ENOENT);
5722 }
5723 intel_connector->edid = edid;
5724
5725 /* prefer fixed mode from EDID if available */
5726 list_for_each_entry(scan, &connector->probed_modes, head) {
5727 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5728 fixed_mode = drm_mode_duplicate(dev, scan);
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305729 downclock_mode = intel_dp_drrs_init(
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305730 intel_connector, fixed_mode);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005731 break;
5732 }
5733 }
5734
5735 /* fallback to VBT if available for eDP */
5736 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5737 fixed_mode = drm_mode_duplicate(dev,
5738 dev_priv->vbt.lfp_lvds_vbt_mode);
5739 if (fixed_mode)
5740 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5741 }
Daniel Vetter060c8772014-03-21 23:22:35 +01005742 mutex_unlock(&dev->mode_config.mutex);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005743
Clint Taylor01527b32014-07-07 13:01:46 -07005744 if (IS_VALLEYVIEW(dev)) {
5745 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5746 register_reboot_notifier(&intel_dp->edp_notifier);
Ville Syrjälä6517d272014-11-07 11:16:02 +02005747
5748 /*
5749 * Figure out the current pipe for the initial backlight setup.
5750 * If the current pipe isn't valid, try the PPS pipe, and if that
5751 * fails just assume pipe A.
5752 */
5753 if (IS_CHERRYVIEW(dev))
5754 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5755 else
5756 pipe = PORT_TO_PIPE(intel_dp->DP);
5757
5758 if (pipe != PIPE_A && pipe != PIPE_B)
5759 pipe = intel_dp->pps_pipe;
5760
5761 if (pipe != PIPE_A && pipe != PIPE_B)
5762 pipe = PIPE_A;
5763
5764 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5765 pipe_name(pipe));
Clint Taylor01527b32014-07-07 13:01:46 -07005766 }
5767
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305768 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
Jani Nikula73580fb72014-08-12 17:11:41 +03005769 intel_connector->panel.backlight_power = intel_edp_backlight_power;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005770 intel_panel_setup_backlight(connector, pipe);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005771
5772 return true;
5773}
5774
Paulo Zanoni16c25532013-06-12 17:27:25 -03005775bool
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005776intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5777 struct intel_connector *intel_connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005778{
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005779 struct drm_connector *connector = &intel_connector->base;
5780 struct intel_dp *intel_dp = &intel_dig_port->dp;
5781 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5782 struct drm_device *dev = intel_encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005783 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni174edf12012-10-26 19:05:50 -02005784 enum port port = intel_dig_port->port;
Jani Nikula0b998362014-03-14 16:51:17 +02005785 int type;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005786
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005787 intel_dp->pps_pipe = INVALID_PIPE;
5788
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005789 /* intel_dp vfuncs */
Damien Lespiaub6b5e382014-01-20 16:00:59 +00005790 if (INTEL_INFO(dev)->gen >= 9)
5791 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5792 else if (IS_VALLEYVIEW(dev))
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005793 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5794 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5795 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5796 else if (HAS_PCH_SPLIT(dev))
5797 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5798 else
5799 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5800
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +00005801 if (INTEL_INFO(dev)->gen >= 9)
5802 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5803 else
5804 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
Damien Lespiau153b1102014-01-21 13:37:15 +00005805
Daniel Vetter07679352012-09-06 22:15:42 +02005806 /* Preserve the current hw state. */
5807 intel_dp->DP = I915_READ(intel_dp->output_reg);
Jani Nikuladd06f902012-10-19 14:51:50 +03005808 intel_dp->attached_connector = intel_connector;
Chris Wilson3d3dc142011-02-12 10:33:12 +00005809
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005810 if (intel_dp_is_edp(dev, port))
Gajanan Bhat19c03922012-09-27 19:13:07 +05305811 type = DRM_MODE_CONNECTOR_eDP;
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005812 else
5813 type = DRM_MODE_CONNECTOR_DisplayPort;
Adam Jacksonb3295302010-07-16 14:46:28 -04005814
Imre Deakf7d24902013-05-08 13:14:05 +03005815 /*
5816 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5817 * for DP the encoder type can be set by the caller to
5818 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5819 */
5820 if (type == DRM_MODE_CONNECTOR_eDP)
5821 intel_encoder->type = INTEL_OUTPUT_EDP;
5822
Ville Syrjäläc17ed5b2014-10-16 21:27:27 +03005823 /* eDP only on port B and/or C on vlv/chv */
5824 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5825 port != PORT_B && port != PORT_C))
5826 return false;
5827
Imre Deake7281ea2013-05-08 13:14:08 +03005828 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5829 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5830 port_name(port));
5831
Adam Jacksonb3295302010-07-16 14:46:28 -04005832 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005833 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5834
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005835 connector->interlace_allowed = true;
5836 connector->doublescan_allowed = 0;
Ma Lingf8aed702009-08-24 13:50:24 +08005837
Daniel Vetter66a92782012-07-12 20:08:18 +02005838 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
Daniel Vetter4be73782014-01-17 14:39:48 +01005839 edp_panel_vdd_work);
Zhenyu Wang6251ec02010-01-12 05:38:32 +08005840
Chris Wilsondf0e9242010-09-09 16:20:55 +01005841 intel_connector_attach_encoder(intel_connector, intel_encoder);
Thomas Wood34ea3d32014-05-29 16:57:41 +01005842 drm_connector_register(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005843
Paulo Zanoniaffa9352012-11-23 15:30:39 -02005844 if (HAS_DDI(dev))
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005845 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5846 else
5847 intel_connector->get_hw_state = intel_connector_get_hw_state;
Imre Deak80f65de2014-02-11 17:12:49 +02005848 intel_connector->unregister = intel_dp_connector_unregister;
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005849
Jani Nikula0b998362014-03-14 16:51:17 +02005850 /* Set up the hotplug pin. */
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005851 switch (port) {
5852 case PORT_A:
Egbert Eich1d843f92013-02-25 12:06:49 -05005853 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005854 break;
5855 case PORT_B:
Egbert Eich1d843f92013-02-25 12:06:49 -05005856 intel_encoder->hpd_pin = HPD_PORT_B;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005857 break;
5858 case PORT_C:
Egbert Eich1d843f92013-02-25 12:06:49 -05005859 intel_encoder->hpd_pin = HPD_PORT_C;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005860 break;
5861 case PORT_D:
Egbert Eich1d843f92013-02-25 12:06:49 -05005862 intel_encoder->hpd_pin = HPD_PORT_D;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005863 break;
Xiong Zhang26951ca2015-08-17 15:55:50 +08005864 case PORT_E:
5865 intel_encoder->hpd_pin = HPD_PORT_E;
5866 break;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005867 default:
Damien Lespiauad1c0b12013-03-07 15:30:28 +00005868 BUG();
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08005869 }
5870
Imre Deakdada1a92014-01-29 13:25:41 +02005871 if (is_edp(intel_dp)) {
Ville Syrjälä773538e82014-09-04 14:54:56 +03005872 pps_lock(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005873 intel_dp_init_panel_power_timestamps(intel_dp);
5874 if (IS_VALLEYVIEW(dev))
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005875 vlv_initial_power_sequencer_setup(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005876 else
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005877 intel_dp_init_panel_power_sequencer(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005878 pps_unlock(intel_dp);
Imre Deakdada1a92014-01-29 13:25:41 +02005879 }
Paulo Zanoni0095e6d2013-12-19 14:29:39 -02005880
Jani Nikula9d1a1032014-03-14 16:51:15 +02005881 intel_dp_aux_init(intel_dp, intel_connector);
Dave Airliec1f05262012-08-30 11:06:18 +10005882
Dave Airlie0e32b392014-05-02 14:02:48 +10005883 /* init MST on ports that can support it */
Jani Nikula0c9b3712015-05-18 17:10:01 +03005884 if (HAS_DP_MST(dev) &&
5885 (port == PORT_B || port == PORT_C || port == PORT_D))
5886 intel_dp_mst_encoder_init(intel_dig_port,
5887 intel_connector->base.base.id);
Dave Airlie0e32b392014-05-02 14:02:48 +10005888
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005889 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10005890 drm_dp_aux_unregister(&intel_dp->aux);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03005891 if (is_edp(intel_dp)) {
5892 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03005893 /*
5894 * vdd might still be enabled do to the delayed vdd off.
5895 * Make sure vdd is actually turned off here.
5896 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005897 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01005898 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005899 pps_unlock(intel_dp);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03005900 }
Thomas Wood34ea3d32014-05-29 16:57:41 +01005901 drm_connector_unregister(connector);
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005902 drm_connector_cleanup(connector);
Paulo Zanoni16c25532013-06-12 17:27:25 -03005903 return false;
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005904 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08005905
Chris Wilsonf6849602010-09-19 09:29:33 +01005906 intel_dp_add_properties(intel_dp, connector);
5907
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005908 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5909 * 0xd. Failure to do so will result in spurious interrupts being
5910 * generated on the port when a cable is not attached.
5911 */
5912 if (IS_G4X(dev) && !IS_GM45(dev)) {
5913 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5914 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5915 }
Paulo Zanoni16c25532013-06-12 17:27:25 -03005916
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005917 i915_debugfs_connector_add(connector);
5918
Paulo Zanoni16c25532013-06-12 17:27:25 -03005919 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005920}
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005921
5922void
5923intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5924{
Dave Airlie13cf5502014-06-18 11:29:35 +10005925 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005926 struct intel_digital_port *intel_dig_port;
5927 struct intel_encoder *intel_encoder;
5928 struct drm_encoder *encoder;
5929 struct intel_connector *intel_connector;
5930
Daniel Vetterb14c5672013-09-19 12:18:32 +02005931 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005932 if (!intel_dig_port)
5933 return;
5934
Ander Conselvan de Oliveira08d9bc92015-04-10 10:59:10 +03005935 intel_connector = intel_connector_alloc();
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005936 if (!intel_connector) {
5937 kfree(intel_dig_port);
5938 return;
5939 }
5940
5941 intel_encoder = &intel_dig_port->base;
5942 encoder = &intel_encoder->base;
5943
5944 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5945 DRM_MODE_ENCODER_TMDS);
5946
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01005947 intel_encoder->compute_config = intel_dp_compute_config;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005948 intel_encoder->disable = intel_disable_dp;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005949 intel_encoder->get_hw_state = intel_dp_get_hw_state;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07005950 intel_encoder->get_config = intel_dp_get_config;
Imre Deak07f9cd02014-08-18 14:42:45 +03005951 intel_encoder->suspend = intel_dp_encoder_suspend;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005952 if (IS_CHERRYVIEW(dev)) {
Ville Syrjälä9197c882014-04-09 13:29:05 +03005953 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005954 intel_encoder->pre_enable = chv_pre_enable_dp;
5955 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä580d3812014-04-09 13:29:00 +03005956 intel_encoder->post_disable = chv_post_disable_dp;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005957 } else if (IS_VALLEYVIEW(dev)) {
Jani Nikulaecff4f32013-09-06 07:38:29 +03005958 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005959 intel_encoder->pre_enable = vlv_pre_enable_dp;
5960 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä49277c32014-03-31 18:21:26 +03005961 intel_encoder->post_disable = vlv_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005962 } else {
Jani Nikulaecff4f32013-09-06 07:38:29 +03005963 intel_encoder->pre_enable = g4x_pre_enable_dp;
5964 intel_encoder->enable = g4x_enable_dp;
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03005965 if (INTEL_INFO(dev)->gen >= 5)
5966 intel_encoder->post_disable = ilk_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005967 }
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005968
Paulo Zanoni174edf12012-10-26 19:05:50 -02005969 intel_dig_port->port = port;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005970 intel_dig_port->dp.output_reg = output_reg;
5971
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005972 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Ville Syrjälä882ec382014-04-28 14:07:43 +03005973 if (IS_CHERRYVIEW(dev)) {
5974 if (port == PORT_D)
5975 intel_encoder->crtc_mask = 1 << 2;
5976 else
5977 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5978 } else {
5979 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5980 }
Ville Syrjäläbc079e82014-03-03 16:15:28 +02005981 intel_encoder->cloneable = 0;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005982
Dave Airlie13cf5502014-06-18 11:29:35 +10005983 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
Jani Nikula5fcece82015-05-27 15:03:42 +03005984 dev_priv->hotplug.irq_port[port] = intel_dig_port;
Dave Airlie13cf5502014-06-18 11:29:35 +10005985
Paulo Zanoni15b1d172013-06-12 17:27:27 -03005986 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5987 drm_encoder_cleanup(encoder);
5988 kfree(intel_dig_port);
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005989 kfree(intel_connector);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03005990 }
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005991}
Dave Airlie0e32b392014-05-02 14:02:48 +10005992
5993void intel_dp_mst_suspend(struct drm_device *dev)
5994{
5995 struct drm_i915_private *dev_priv = dev->dev_private;
5996 int i;
5997
5998 /* disable MST */
5999 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006000 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006001 if (!intel_dig_port)
6002 continue;
6003
6004 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6005 if (!intel_dig_port->dp.can_mst)
6006 continue;
6007 if (intel_dig_port->dp.is_mst)
6008 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6009 }
6010 }
6011}
6012
6013void intel_dp_mst_resume(struct drm_device *dev)
6014{
6015 struct drm_i915_private *dev_priv = dev->dev_private;
6016 int i;
6017
6018 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006019 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006020 if (!intel_dig_port)
6021 continue;
6022 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6023 int ret;
6024
6025 if (!intel_dig_port->dp.can_mst)
6026 continue;
6027
6028 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6029 if (ret != 0) {
6030 intel_dp_check_mst_status(&intel_dig_port->dp);
6031 }
6032 }
6033 }
6034}