blob: 4e5bd87b75db5ada0d56cdda1f7c90e80bf06fe0 [file] [log] [blame]
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040030#include <linux/export.h>
Clint Taylor01527b32014-07-07 13:01:46 -070031#include <linux/notifier.h>
32#include <linux/reboot.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
Matt Roperc6f95f22015-01-22 16:50:32 -080034#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010035#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070038#include "intel_drv.h"
David Howells760285e2012-10-02 18:01:07 +010039#include <drm/i915_drm.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070040#include "i915_drv.h"
Keith Packarda4fc5ed2009-04-07 16:16:42 -070041
Keith Packarda4fc5ed2009-04-07 16:16:42 -070042#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
Todd Previte559be302015-05-04 07:48:20 -070044/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080050struct dp_link_dpll {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030051 int clock;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080052 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030056 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080057 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030058 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080059 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030063 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080064 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030065 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080066 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080069static const struct dp_link_dpll vlv_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030070 { 162000,
Chon Ming Lee58f6e632013-09-25 15:47:51 +080071 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030072 { 270000,
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080073 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
Chon Ming Leeef9348c2014-04-09 13:28:18 +030076/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
Ville Syrjälä840b32b2015-08-11 20:21:46 +030086 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030087 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030088 { 270000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030089 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030090 { 540000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030091 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
Sonika Jindal637a9c62015-05-07 09:52:08 +053093
Sonika Jindal64987fc2015-05-26 17:50:13 +053094static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
Sonika Jindal637a9c62015-05-07 09:52:08 +053096static const int skl_rates[] = { 162000, 216000, 270000,
Ville Syrjäläf4896f12015-03-12 17:10:27 +020097 324000, 432000, 540000 };
Ville Syrjäläfe51bfb2015-03-12 17:10:38 +020098static const int chv_rates[] = { 162000, 202500, 210000, 216000,
99 243000, 270000, 324000, 405000,
100 420000, 432000, 540000 };
Ville Syrjäläf4896f12015-03-12 17:10:27 +0200101static const int default_rates[] = { 162000, 270000, 540000 };
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300102
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700103/**
104 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
105 * @intel_dp: DP struct
106 *
107 * If a CPU or PCH DP output is attached to an eDP panel, this function
108 * will return true, and false otherwise.
109 */
110static bool is_edp(struct intel_dp *intel_dp)
111{
Paulo Zanonida63a9f2012-10-26 19:05:46 -0200112 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113
114 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700115}
116
Imre Deak68b4d822013-05-08 13:14:06 +0300117static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700118{
Imre Deak68b4d822013-05-08 13:14:06 +0300119 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
120
121 return intel_dig_port->base.base.dev;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700122}
123
Chris Wilsondf0e9242010-09-09 16:20:55 +0100124static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
125{
Paulo Zanonifa90ece2012-10-26 19:05:44 -0200126 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Chris Wilsondf0e9242010-09-09 16:20:55 +0100127}
128
Chris Wilsonea5b2132010-08-04 13:50:23 +0100129static void intel_dp_link_down(struct intel_dp *intel_dp);
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300130static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +0100131static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
Ville Syrjälä093e3f12014-10-16 21:27:33 +0300132static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300133static void vlv_steal_power_sequencer(struct drm_device *dev,
134 enum pipe pipe);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700135
Ville Syrjäläed4e9c12015-03-12 17:10:36 +0200136static int
137intel_dp_max_link_bw(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700138{
Jesse Barnes7183dc22011-07-07 11:10:58 -0700139 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700140
141 switch (max_link_bw) {
142 case DP_LINK_BW_1_62:
143 case DP_LINK_BW_2_7:
Ville Syrjälä1db10e22015-03-12 17:10:32 +0200144 case DP_LINK_BW_5_4:
Imre Deakd4eead52013-07-09 17:05:26 +0300145 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700146 default:
Imre Deakd4eead52013-07-09 17:05:26 +0300147 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
148 max_link_bw);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700149 max_link_bw = DP_LINK_BW_1_62;
150 break;
151 }
152 return max_link_bw;
153}
154
Paulo Zanonieeb63242014-05-06 14:56:50 +0300155static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
156{
157 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
158 struct drm_device *dev = intel_dig_port->base.base.dev;
159 u8 source_max, sink_max;
160
161 source_max = 4;
162 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
163 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
164 source_max = 2;
165
166 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
167
168 return min(source_max, sink_max);
169}
170
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400171/*
172 * The units on the numbers in the next two are... bizarre. Examples will
173 * make it clearer; this one parallels an example in the eDP spec.
174 *
175 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
176 *
177 * 270000 * 1 * 8 / 10 == 216000
178 *
179 * The actual data capacity of that configuration is 2.16Gbit/s, so the
180 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
181 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
182 * 119000. At 18bpp that's 2142000 kilobits per second.
183 *
184 * Thus the strange-looking division by 10 in intel_dp_link_required, to
185 * get the result in decakilobits instead of kilobits.
186 */
187
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700188static int
Keith Packardc8982612012-01-25 08:16:25 -0800189intel_dp_link_required(int pixel_clock, int bpp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700190{
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400191 return (pixel_clock * bpp + 9) / 10;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700192}
193
194static int
Dave Airliefe27d532010-06-30 11:46:17 +1000195intel_dp_max_data_rate(int max_link_clock, int max_lanes)
196{
197 return (max_link_clock * max_lanes * 8) / 10;
198}
199
Damien Lespiauc19de8e2013-11-28 15:29:18 +0000200static enum drm_mode_status
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700201intel_dp_mode_valid(struct drm_connector *connector,
202 struct drm_display_mode *mode)
203{
Chris Wilsondf0e9242010-09-09 16:20:55 +0100204 struct intel_dp *intel_dp = intel_attached_dp(connector);
Jani Nikuladd06f902012-10-19 14:51:50 +0300205 struct intel_connector *intel_connector = to_intel_connector(connector);
206 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
Daniel Vetter36008362013-03-27 00:44:59 +0100207 int target_clock = mode->clock;
208 int max_rate, mode_rate, max_lanes, max_link_clock;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700209
Jani Nikuladd06f902012-10-19 14:51:50 +0300210 if (is_edp(intel_dp) && fixed_mode) {
211 if (mode->hdisplay > fixed_mode->hdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100212 return MODE_PANEL;
213
Jani Nikuladd06f902012-10-19 14:51:50 +0300214 if (mode->vdisplay > fixed_mode->vdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100215 return MODE_PANEL;
Daniel Vetter03afc4a2013-04-02 23:42:31 +0200216
217 target_clock = fixed_mode->clock;
Zhao Yakui7de56f42010-07-19 09:43:14 +0100218 }
219
Ville Syrjälä50fec212015-03-12 17:10:34 +0200220 max_link_clock = intel_dp_max_link_rate(intel_dp);
Paulo Zanonieeb63242014-05-06 14:56:50 +0300221 max_lanes = intel_dp_max_lane_count(intel_dp);
Daniel Vetter36008362013-03-27 00:44:59 +0100222
223 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
224 mode_rate = intel_dp_link_required(target_clock, 18);
225
226 if (mode_rate > max_rate)
Daniel Vetterc4867932012-04-10 10:42:36 +0200227 return MODE_CLOCK_HIGH;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700228
229 if (mode->clock < 10000)
230 return MODE_CLOCK_LOW;
231
Daniel Vetter0af78a22012-05-23 11:30:55 +0200232 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
233 return MODE_H_ILLEGAL;
234
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700235 return MODE_OK;
236}
237
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800238uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700239{
240 int i;
241 uint32_t v = 0;
242
243 if (src_bytes > 4)
244 src_bytes = 4;
245 for (i = 0; i < src_bytes; i++)
246 v |= ((uint32_t) src[i]) << ((3-i) * 8);
247 return v;
248}
249
Damien Lespiauc2af70e2015-02-10 19:32:23 +0000250static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700251{
252 int i;
253 if (dst_bytes > 4)
254 dst_bytes = 4;
255 for (i = 0; i < dst_bytes; i++)
256 dst[i] = src >> ((3-i) * 8);
257}
258
Keith Packardfb0f8fb2009-06-11 22:31:31 -0700259/* hrawclock is 1/4 the FSB frequency */
260static int
261intel_hrawclk(struct drm_device *dev)
262{
263 struct drm_i915_private *dev_priv = dev->dev_private;
264 uint32_t clkcfg;
265
Vijay Purushothaman9473c8f2012-09-27 19:13:01 +0530266 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
267 if (IS_VALLEYVIEW(dev))
268 return 200;
269
Keith Packardfb0f8fb2009-06-11 22:31:31 -0700270 clkcfg = I915_READ(CLKCFG);
271 switch (clkcfg & CLKCFG_FSB_MASK) {
272 case CLKCFG_FSB_400:
273 return 100;
274 case CLKCFG_FSB_533:
275 return 133;
276 case CLKCFG_FSB_667:
277 return 166;
278 case CLKCFG_FSB_800:
279 return 200;
280 case CLKCFG_FSB_1067:
281 return 266;
282 case CLKCFG_FSB_1333:
283 return 333;
284 /* these two are just a guess; one of them might be right */
285 case CLKCFG_FSB_1600:
286 case CLKCFG_FSB_1600_ALT:
287 return 400;
288 default:
289 return 133;
290 }
291}
292
Jani Nikulabf13e812013-09-06 07:40:05 +0300293static void
294intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300295 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300296static void
297intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300298 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300299
Ville Syrjälä773538e82014-09-04 14:54:56 +0300300static void pps_lock(struct intel_dp *intel_dp)
301{
302 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
303 struct intel_encoder *encoder = &intel_dig_port->base;
304 struct drm_device *dev = encoder->base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum intel_display_power_domain power_domain;
307
308 /*
309 * See vlv_power_sequencer_reset() why we need
310 * a power domain reference here.
311 */
312 power_domain = intel_display_port_power_domain(encoder);
313 intel_display_power_get(dev_priv, power_domain);
314
315 mutex_lock(&dev_priv->pps_mutex);
316}
317
318static void pps_unlock(struct intel_dp *intel_dp)
319{
320 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
321 struct intel_encoder *encoder = &intel_dig_port->base;
322 struct drm_device *dev = encoder->base.dev;
323 struct drm_i915_private *dev_priv = dev->dev_private;
324 enum intel_display_power_domain power_domain;
325
326 mutex_unlock(&dev_priv->pps_mutex);
327
328 power_domain = intel_display_port_power_domain(encoder);
329 intel_display_power_put(dev_priv, power_domain);
330}
331
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300332static void
333vlv_power_sequencer_kick(struct intel_dp *intel_dp)
334{
335 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
336 struct drm_device *dev = intel_dig_port->base.base.dev;
337 struct drm_i915_private *dev_priv = dev->dev_private;
338 enum pipe pipe = intel_dp->pps_pipe;
Ville Syrjäläd288f652014-10-28 13:20:22 +0200339 bool pll_enabled;
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300340 uint32_t DP;
341
342 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
343 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
344 pipe_name(pipe), port_name(intel_dig_port->port)))
345 return;
346
347 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
348 pipe_name(pipe), port_name(intel_dig_port->port));
349
350 /* Preserve the BIOS-computed detected bit. This is
351 * supposed to be read-only.
352 */
353 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
354 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
355 DP |= DP_PORT_WIDTH(1);
356 DP |= DP_LINK_TRAIN_PAT_1;
357
358 if (IS_CHERRYVIEW(dev))
359 DP |= DP_PIPE_SELECT_CHV(pipe);
360 else if (pipe == PIPE_B)
361 DP |= DP_PIPEB_SELECT;
362
Ville Syrjäläd288f652014-10-28 13:20:22 +0200363 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
364
365 /*
366 * The DPLL for the pipe must be enabled for this to work.
367 * So enable temporarily it if it's not already enabled.
368 */
369 if (!pll_enabled)
370 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
371 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
372
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300373 /*
374 * Similar magic as in intel_dp_enable_port().
375 * We _must_ do this port enable + disable trick
376 * to make this power seqeuencer lock onto the port.
377 * Otherwise even VDD force bit won't work.
378 */
379 I915_WRITE(intel_dp->output_reg, DP);
380 POSTING_READ(intel_dp->output_reg);
381
382 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
384
385 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
386 POSTING_READ(intel_dp->output_reg);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200387
388 if (!pll_enabled)
389 vlv_force_pll_off(dev, pipe);
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300390}
391
Jani Nikulabf13e812013-09-06 07:40:05 +0300392static enum pipe
393vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
394{
395 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300396 struct drm_device *dev = intel_dig_port->base.base.dev;
397 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300398 struct intel_encoder *encoder;
399 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300400 enum pipe pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300401
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300402 lockdep_assert_held(&dev_priv->pps_mutex);
403
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300404 /* We should never land here with regular DP ports */
405 WARN_ON(!is_edp(intel_dp));
406
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300407 if (intel_dp->pps_pipe != INVALID_PIPE)
408 return intel_dp->pps_pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300409
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300410 /*
411 * We don't have power sequencer currently.
412 * Pick one that's not used by other ports.
413 */
414 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
415 base.head) {
416 struct intel_dp *tmp;
417
418 if (encoder->type != INTEL_OUTPUT_EDP)
419 continue;
420
421 tmp = enc_to_intel_dp(&encoder->base);
422
423 if (tmp->pps_pipe != INVALID_PIPE)
424 pipes &= ~(1 << tmp->pps_pipe);
425 }
426
427 /*
428 * Didn't find one. This should not happen since there
429 * are two power sequencers and up to two eDP ports.
430 */
431 if (WARN_ON(pipes == 0))
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300432 pipe = PIPE_A;
433 else
434 pipe = ffs(pipes) - 1;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300435
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300436 vlv_steal_power_sequencer(dev, pipe);
437 intel_dp->pps_pipe = pipe;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300438
439 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
440 pipe_name(intel_dp->pps_pipe),
441 port_name(intel_dig_port->port));
442
443 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300444 intel_dp_init_panel_power_sequencer(dev, intel_dp);
445 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300446
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300447 /*
448 * Even vdd force doesn't work until we've made
449 * the power sequencer lock in on the port.
450 */
451 vlv_power_sequencer_kick(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300452
453 return intel_dp->pps_pipe;
454}
455
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300456typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
457 enum pipe pipe);
458
459static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
460 enum pipe pipe)
461{
462 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
463}
464
465static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
466 enum pipe pipe)
467{
468 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
469}
470
471static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
472 enum pipe pipe)
473{
474 return true;
475}
476
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300477static enum pipe
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300478vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
479 enum port port,
480 vlv_pipe_check pipe_check)
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300481{
Jani Nikulabf13e812013-09-06 07:40:05 +0300482 enum pipe pipe;
483
Jani Nikulabf13e812013-09-06 07:40:05 +0300484 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
485 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
486 PANEL_PORT_SELECT_MASK;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300487
488 if (port_sel != PANEL_PORT_SELECT_VLV(port))
489 continue;
490
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300491 if (!pipe_check(dev_priv, pipe))
492 continue;
493
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300494 return pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300495 }
496
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300497 return INVALID_PIPE;
498}
499
500static void
501vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
502{
503 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
504 struct drm_device *dev = intel_dig_port->base.base.dev;
505 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300506 enum port port = intel_dig_port->port;
507
508 lockdep_assert_held(&dev_priv->pps_mutex);
509
510 /* try to find a pipe with this port selected */
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300511 /* first pick one where the panel is on */
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_has_pp_on);
514 /* didn't find one? pick one where vdd is on */
515 if (intel_dp->pps_pipe == INVALID_PIPE)
516 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
517 vlv_pipe_has_vdd_on);
518 /* didn't find one? pick one with just the correct port */
519 if (intel_dp->pps_pipe == INVALID_PIPE)
520 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
521 vlv_pipe_any);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300522
523 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
524 if (intel_dp->pps_pipe == INVALID_PIPE) {
525 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
526 port_name(port));
527 return;
528 }
529
530 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
531 port_name(port), pipe_name(intel_dp->pps_pipe));
532
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300533 intel_dp_init_panel_power_sequencer(dev, intel_dp);
534 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300535}
536
Ville Syrjälä773538e82014-09-04 14:54:56 +0300537void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
538{
539 struct drm_device *dev = dev_priv->dev;
540 struct intel_encoder *encoder;
541
542 if (WARN_ON(!IS_VALLEYVIEW(dev)))
543 return;
544
545 /*
546 * We can't grab pps_mutex here due to deadlock with power_domain
547 * mutex when power_domain functions are called while holding pps_mutex.
548 * That also means that in order to use pps_pipe the code needs to
549 * hold both a power domain reference and pps_mutex, and the power domain
550 * reference get/put must be done while _not_ holding pps_mutex.
551 * pps_{lock,unlock}() do these steps in the correct order, so one
552 * should use them always.
553 */
554
555 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
556 struct intel_dp *intel_dp;
557
558 if (encoder->type != INTEL_OUTPUT_EDP)
559 continue;
560
561 intel_dp = enc_to_intel_dp(&encoder->base);
562 intel_dp->pps_pipe = INVALID_PIPE;
563 }
Jani Nikulabf13e812013-09-06 07:40:05 +0300564}
565
566static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
567{
568 struct drm_device *dev = intel_dp_to_dev(intel_dp);
569
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530570 if (IS_BROXTON(dev))
571 return BXT_PP_CONTROL(0);
572 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300573 return PCH_PP_CONTROL;
574 else
575 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
576}
577
578static u32 _pp_stat_reg(struct intel_dp *intel_dp)
579{
580 struct drm_device *dev = intel_dp_to_dev(intel_dp);
581
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530582 if (IS_BROXTON(dev))
583 return BXT_PP_STATUS(0);
584 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300585 return PCH_PP_STATUS;
586 else
587 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
588}
589
Clint Taylor01527b32014-07-07 13:01:46 -0700590/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
591 This function only applicable when panel PM state is not to be tracked */
592static int edp_notify_handler(struct notifier_block *this, unsigned long code,
593 void *unused)
594{
595 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
596 edp_notifier);
597 struct drm_device *dev = intel_dp_to_dev(intel_dp);
598 struct drm_i915_private *dev_priv = dev->dev_private;
599 u32 pp_div;
600 u32 pp_ctrl_reg, pp_div_reg;
Clint Taylor01527b32014-07-07 13:01:46 -0700601
602 if (!is_edp(intel_dp) || code != SYS_RESTART)
603 return 0;
604
Ville Syrjälä773538e82014-09-04 14:54:56 +0300605 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300606
Clint Taylor01527b32014-07-07 13:01:46 -0700607 if (IS_VALLEYVIEW(dev)) {
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300608 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
609
Clint Taylor01527b32014-07-07 13:01:46 -0700610 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
611 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
612 pp_div = I915_READ(pp_div_reg);
613 pp_div &= PP_REFERENCE_DIVIDER_MASK;
614
615 /* 0x1F write to PP_DIV_REG sets max cycle delay */
616 I915_WRITE(pp_div_reg, pp_div | 0x1F);
617 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
618 msleep(intel_dp->panel_power_cycle_delay);
619 }
620
Ville Syrjälä773538e82014-09-04 14:54:56 +0300621 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300622
Clint Taylor01527b32014-07-07 13:01:46 -0700623 return 0;
624}
625
Daniel Vetter4be73782014-01-17 14:39:48 +0100626static bool edp_have_panel_power(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700627{
Paulo Zanoni30add222012-10-26 19:05:45 -0200628 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700629 struct drm_i915_private *dev_priv = dev->dev_private;
630
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300631 lockdep_assert_held(&dev_priv->pps_mutex);
632
Ville Syrjälä9a423562014-10-16 21:29:48 +0300633 if (IS_VALLEYVIEW(dev) &&
634 intel_dp->pps_pipe == INVALID_PIPE)
635 return false;
636
Jani Nikulabf13e812013-09-06 07:40:05 +0300637 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
Keith Packardebf33b12011-09-29 15:53:27 -0700638}
639
Daniel Vetter4be73782014-01-17 14:39:48 +0100640static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700641{
Paulo Zanoni30add222012-10-26 19:05:45 -0200642 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700643 struct drm_i915_private *dev_priv = dev->dev_private;
644
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300645 lockdep_assert_held(&dev_priv->pps_mutex);
646
Ville Syrjälä9a423562014-10-16 21:29:48 +0300647 if (IS_VALLEYVIEW(dev) &&
648 intel_dp->pps_pipe == INVALID_PIPE)
649 return false;
650
Ville Syrjälä773538e82014-09-04 14:54:56 +0300651 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -0700652}
653
Keith Packard9b984da2011-09-19 13:54:47 -0700654static void
655intel_dp_check_edp(struct intel_dp *intel_dp)
656{
Paulo Zanoni30add222012-10-26 19:05:45 -0200657 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard9b984da2011-09-19 13:54:47 -0700658 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packardebf33b12011-09-29 15:53:27 -0700659
Keith Packard9b984da2011-09-19 13:54:47 -0700660 if (!is_edp(intel_dp))
661 return;
Jesse Barnes453c5422013-03-28 09:55:41 -0700662
Daniel Vetter4be73782014-01-17 14:39:48 +0100663 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
Keith Packard9b984da2011-09-19 13:54:47 -0700664 WARN(1, "eDP powered off while attempting aux channel communication.\n");
665 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
Jani Nikulabf13e812013-09-06 07:40:05 +0300666 I915_READ(_pp_stat_reg(intel_dp)),
667 I915_READ(_pp_ctrl_reg(intel_dp)));
Keith Packard9b984da2011-09-19 13:54:47 -0700668 }
669}
670
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100671static uint32_t
672intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
673{
674 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
675 struct drm_device *dev = intel_dig_port->base.base.dev;
676 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni9ed35ab2013-02-18 19:00:25 -0300677 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100678 uint32_t status;
679 bool done;
680
Daniel Vetteref04f002012-12-01 21:03:59 +0100681#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100682 if (has_aux_irq)
Paulo Zanonib18ac462013-02-18 19:00:24 -0300683 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
Imre Deak35987062013-05-21 20:03:20 +0300684 msecs_to_jiffies_timeout(10));
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100685 else
686 done = wait_for_atomic(C, 10) == 0;
687 if (!done)
688 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
689 has_aux_irq);
690#undef C
691
692 return status;
693}
694
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000695static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696{
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
699
700 /*
701 * The clock divider is based off the hrawclk, and would like to run at
702 * 2MHz. So, take the hrawclk value and divide by 2 and use that
703 */
704 return index ? 0 : intel_hrawclk(dev) / 2;
705}
706
707static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
708{
709 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
710 struct drm_device *dev = intel_dig_port->base.base.dev;
Ville Syrjälä469d4b22015-03-31 14:11:59 +0300711 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000712
713 if (index)
714 return 0;
715
716 if (intel_dig_port->port == PORT_A) {
Ville Syrjälä05024da2015-06-03 15:45:08 +0300717 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
718
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000719 } else {
720 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721 }
722}
723
724static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300725{
726 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
727 struct drm_device *dev = intel_dig_port->base.base.dev;
728 struct drm_i915_private *dev_priv = dev->dev_private;
729
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000730 if (intel_dig_port->port == PORT_A) {
Chris Wilsonbc866252013-07-21 16:00:03 +0100731 if (index)
732 return 0;
Ville Syrjälä05024da2015-06-03 15:45:08 +0300733 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300734 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
735 /* Workaround for non-ULT HSW */
Chris Wilsonbc866252013-07-21 16:00:03 +0100736 switch (index) {
737 case 0: return 63;
738 case 1: return 72;
739 default: return 0;
740 }
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000741 } else {
Chris Wilsonbc866252013-07-21 16:00:03 +0100742 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300743 }
744}
745
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000746static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
747{
748 return index ? 0 : 100;
749}
750
Damien Lespiaub6b5e382014-01-20 16:00:59 +0000751static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
752{
753 /*
754 * SKL doesn't need us to program the AUX clock divider (Hardware will
755 * derive the clock from CDCLK automatically). We still implement the
756 * get_aux_clock_divider vfunc to plug-in into the existing code.
757 */
758 return index ? 0 : 1;
759}
760
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000761static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
762 bool has_aux_irq,
763 int send_bytes,
764 uint32_t aux_clock_divider)
765{
766 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
767 struct drm_device *dev = intel_dig_port->base.base.dev;
768 uint32_t precharge, timeout;
769
770 if (IS_GEN6(dev))
771 precharge = 3;
772 else
773 precharge = 5;
774
775 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
776 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
777 else
778 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
779
780 return DP_AUX_CH_CTL_SEND_BUSY |
Damien Lespiau788d4432014-01-20 15:52:31 +0000781 DP_AUX_CH_CTL_DONE |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000782 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000783 DP_AUX_CH_CTL_TIME_OUT_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000784 timeout |
Damien Lespiau788d4432014-01-20 15:52:31 +0000785 DP_AUX_CH_CTL_RECEIVE_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000786 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
787 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000788 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000789}
790
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +0000791static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
792 bool has_aux_irq,
793 int send_bytes,
794 uint32_t unused)
795{
796 return DP_AUX_CH_CTL_SEND_BUSY |
797 DP_AUX_CH_CTL_DONE |
798 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
799 DP_AUX_CH_CTL_TIME_OUT_ERROR |
800 DP_AUX_CH_CTL_TIME_OUT_1600us |
801 DP_AUX_CH_CTL_RECEIVE_ERROR |
802 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
803 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
804}
805
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700806static int
Chris Wilsonea5b2132010-08-04 13:50:23 +0100807intel_dp_aux_ch(struct intel_dp *intel_dp,
Daniel Vetterbd9f74a2014-10-02 09:45:35 +0200808 const uint8_t *send, int send_bytes,
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700809 uint8_t *recv, int recv_size)
810{
Paulo Zanoni174edf12012-10-26 19:05:50 -0200811 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
812 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700813 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni9ed35ab2013-02-18 19:00:25 -0300814 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700815 uint32_t ch_data = ch_ctl + 4;
Chris Wilsonbc866252013-07-21 16:00:03 +0100816 uint32_t aux_clock_divider;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100817 int i, ret, recv_bytes;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700818 uint32_t status;
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000819 int try, clock = 0;
Daniel Vetter4e6b7882014-02-07 16:33:20 +0100820 bool has_aux_irq = HAS_AUX_IRQ(dev);
Jani Nikula884f19e2014-03-14 16:51:14 +0200821 bool vdd;
822
Ville Syrjälä773538e82014-09-04 14:54:56 +0300823 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300824
Ville Syrjälä72c35002014-08-18 22:16:00 +0300825 /*
826 * We will be called with VDD already enabled for dpcd/edid/oui reads.
827 * In such cases we want to leave VDD enabled and it's up to upper layers
828 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
829 * ourselves.
830 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300831 vdd = edp_panel_vdd_on(intel_dp);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100832
833 /* dp aux is extremely sensitive to irq latency, hence request the
834 * lowest possible wakeup latency and so prevent the cpu from going into
835 * deep sleep states.
836 */
837 pm_qos_update_request(&dev_priv->pm_qos, 0);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700838
Keith Packard9b984da2011-09-19 13:54:47 -0700839 intel_dp_check_edp(intel_dp);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +0800840
Paulo Zanonic67a4702013-08-19 13:18:09 -0300841 intel_aux_display_runtime_get(dev_priv);
842
Jesse Barnes11bee432011-08-01 15:02:20 -0700843 /* Try to wait for any previous AUX channel activity */
844 for (try = 0; try < 3; try++) {
Daniel Vetteref04f002012-12-01 21:03:59 +0100845 status = I915_READ_NOTRACE(ch_ctl);
Jesse Barnes11bee432011-08-01 15:02:20 -0700846 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
847 break;
848 msleep(1);
849 }
850
851 if (try == 3) {
Mika Kuoppala02196c72015-08-06 16:48:58 +0300852 static u32 last_status = -1;
853 const u32 status = I915_READ(ch_ctl);
854
855 if (status != last_status) {
856 WARN(1, "dp_aux_ch not started status 0x%08x\n",
857 status);
858 last_status = status;
859 }
860
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100861 ret = -EBUSY;
862 goto out;
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100863 }
864
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300865 /* Only 5 data registers! */
866 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
867 ret = -E2BIG;
868 goto out;
869 }
870
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000871 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
Damien Lespiau153b1102014-01-21 13:37:15 +0000872 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
873 has_aux_irq,
874 send_bytes,
875 aux_clock_divider);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000876
Chris Wilsonbc866252013-07-21 16:00:03 +0100877 /* Must try at least 3 times according to DP spec */
878 for (try = 0; try < 5; try++) {
879 /* Load the send data into the aux channel data registers */
880 for (i = 0; i < send_bytes; i += 4)
881 I915_WRITE(ch_data + i,
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800882 intel_dp_pack_aux(send + i,
883 send_bytes - i));
Akshay Joshi0206e352011-08-16 15:34:10 -0400884
Chris Wilsonbc866252013-07-21 16:00:03 +0100885 /* Send the command and wait for it to complete */
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000886 I915_WRITE(ch_ctl, send_ctl);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100887
Chris Wilsonbc866252013-07-21 16:00:03 +0100888 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
Akshay Joshi0206e352011-08-16 15:34:10 -0400889
Chris Wilsonbc866252013-07-21 16:00:03 +0100890 /* Clear done status and any errors */
891 I915_WRITE(ch_ctl,
892 status |
893 DP_AUX_CH_CTL_DONE |
894 DP_AUX_CH_CTL_TIME_OUT_ERROR |
895 DP_AUX_CH_CTL_RECEIVE_ERROR);
Adam Jacksond7e96fe2011-07-26 15:39:46 -0400896
Todd Previte74ebf292015-04-15 08:38:41 -0700897 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
Chris Wilsonbc866252013-07-21 16:00:03 +0100898 continue;
Todd Previte74ebf292015-04-15 08:38:41 -0700899
900 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
901 * 400us delay required for errors and timeouts
902 * Timeout errors from the HW already meet this
903 * requirement so skip to next iteration
904 */
905 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
906 usleep_range(400, 500);
907 continue;
908 }
Chris Wilsonbc866252013-07-21 16:00:03 +0100909 if (status & DP_AUX_CH_CTL_DONE)
Jim Bridee058c942015-05-27 10:21:48 -0700910 goto done;
Chris Wilsonbc866252013-07-21 16:00:03 +0100911 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700912 }
913
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700914 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700915 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100916 ret = -EBUSY;
917 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700918 }
919
Jim Bridee058c942015-05-27 10:21:48 -0700920done:
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700921 /* Check for timeout or receive error.
922 * Timeouts occur when the sink is not connected
923 */
Keith Packarda5b3da52009-06-11 22:30:32 -0700924 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700925 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100926 ret = -EIO;
927 goto out;
Keith Packarda5b3da52009-06-11 22:30:32 -0700928 }
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700929
930 /* Timeouts occur when the device isn't connected, so they're
931 * "normal" -- don't fill the kernel log with these */
Keith Packarda5b3da52009-06-11 22:30:32 -0700932 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
Zhao Yakui28c97732009-10-09 11:39:41 +0800933 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100934 ret = -ETIMEDOUT;
935 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700936 }
937
938 /* Unload any bytes sent back from the other side */
939 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
940 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700941 if (recv_bytes > recv_size)
942 recv_bytes = recv_size;
Akshay Joshi0206e352011-08-16 15:34:10 -0400943
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100944 for (i = 0; i < recv_bytes; i += 4)
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800945 intel_dp_unpack_aux(I915_READ(ch_data + i),
946 recv + i, recv_bytes - i);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700947
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100948 ret = recv_bytes;
949out:
950 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
Paulo Zanonic67a4702013-08-19 13:18:09 -0300951 intel_aux_display_runtime_put(dev_priv);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100952
Jani Nikula884f19e2014-03-14 16:51:14 +0200953 if (vdd)
954 edp_panel_vdd_off(intel_dp, false);
955
Ville Syrjälä773538e82014-09-04 14:54:56 +0300956 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300957
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100958 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700959}
960
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300961#define BARE_ADDRESS_SIZE 3
962#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
Jani Nikula9d1a1032014-03-14 16:51:15 +0200963static ssize_t
964intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700965{
Jani Nikula9d1a1032014-03-14 16:51:15 +0200966 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
967 uint8_t txbuf[20], rxbuf[20];
968 size_t txsize, rxsize;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700969 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700970
Ville Syrjäläd2d9cbb2015-03-19 11:44:06 +0200971 txbuf[0] = (msg->request << 4) |
972 ((msg->address >> 16) & 0xf);
973 txbuf[1] = (msg->address >> 8) & 0xff;
Jani Nikula9d1a1032014-03-14 16:51:15 +0200974 txbuf[2] = msg->address & 0xff;
975 txbuf[3] = msg->size - 1;
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300976
Jani Nikula9d1a1032014-03-14 16:51:15 +0200977 switch (msg->request & ~DP_AUX_I2C_MOT) {
978 case DP_AUX_NATIVE_WRITE:
979 case DP_AUX_I2C_WRITE:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300980 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200981 rxsize = 2; /* 0 or 1 data bytes */
Jani Nikulaf51a44b2014-02-11 11:52:05 +0200982
Jani Nikula9d1a1032014-03-14 16:51:15 +0200983 if (WARN_ON(txsize > 20))
984 return -E2BIG;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700985
Jani Nikula9d1a1032014-03-14 16:51:15 +0200986 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700987
Jani Nikula9d1a1032014-03-14 16:51:15 +0200988 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989 if (ret > 0) {
990 msg->reply = rxbuf[0] >> 4;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700991
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200992 if (ret > 1) {
993 /* Number of bytes written in a short write. */
994 ret = clamp_t(int, rxbuf[1], 0, msg->size);
995 } else {
996 /* Return payload size. */
997 ret = msg->size;
998 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700999 }
Jani Nikula9d1a1032014-03-14 16:51:15 +02001000 break;
1001
1002 case DP_AUX_NATIVE_READ:
1003 case DP_AUX_I2C_READ:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +03001004 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
Jani Nikula9d1a1032014-03-14 16:51:15 +02001005 rxsize = msg->size + 1;
1006
1007 if (WARN_ON(rxsize > 20))
1008 return -E2BIG;
1009
1010 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1011 if (ret > 0) {
1012 msg->reply = rxbuf[0] >> 4;
1013 /*
1014 * Assume happy day, and copy the data. The caller is
1015 * expected to check msg->reply before touching it.
1016 *
1017 * Return payload size.
1018 */
1019 ret--;
1020 memcpy(msg->buffer, rxbuf + 1, ret);
1021 }
1022 break;
1023
1024 default:
1025 ret = -EINVAL;
1026 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001027 }
Jani Nikulaf51a44b2014-02-11 11:52:05 +02001028
Jani Nikula9d1a1032014-03-14 16:51:15 +02001029 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001030}
1031
Jani Nikula9d1a1032014-03-14 16:51:15 +02001032static void
1033intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001034{
Jani Nikula9d1a1032014-03-14 16:51:15 +02001035 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001036 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikula33ad6622014-03-14 16:51:16 +02001037 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1038 enum port port = intel_dig_port->port;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001039 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
Jani Nikula0b998362014-03-14 16:51:17 +02001040 const char *name = NULL;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001041 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
Dave Airlieab2c0672009-12-04 10:55:24 +10001042 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001043
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001044 /* On SKL we don't have Aux for port E so we rely on VBT to set
1045 * a proper alternate aux channel.
1046 */
1047 if (IS_SKYLAKE(dev) && port == PORT_E) {
1048 switch (info->alternate_aux_channel) {
1049 case DP_AUX_B:
1050 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1051 break;
1052 case DP_AUX_C:
1053 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1054 break;
1055 case DP_AUX_D:
1056 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1057 break;
1058 case DP_AUX_A:
1059 default:
1060 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1061 }
1062 }
1063
Jani Nikula33ad6622014-03-14 16:51:16 +02001064 switch (port) {
1065 case PORT_A:
1066 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001067 name = "DPDDC-A";
Dave Airlieab2c0672009-12-04 10:55:24 +10001068 break;
Jani Nikula33ad6622014-03-14 16:51:16 +02001069 case PORT_B:
1070 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001071 name = "DPDDC-B";
Jani Nikula33ad6622014-03-14 16:51:16 +02001072 break;
1073 case PORT_C:
1074 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001075 name = "DPDDC-C";
Jani Nikula33ad6622014-03-14 16:51:16 +02001076 break;
1077 case PORT_D:
1078 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001079 name = "DPDDC-D";
Dave Airlieab2c0672009-12-04 10:55:24 +10001080 break;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001081 case PORT_E:
1082 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1083 name = "DPDDC-E";
1084 break;
Dave Airlieab2c0672009-12-04 10:55:24 +10001085 default:
Jani Nikula33ad6622014-03-14 16:51:16 +02001086 BUG();
Dave Airlieab2c0672009-12-04 10:55:24 +10001087 }
1088
Damien Lespiau1b1aad72013-12-03 13:56:29 +00001089 /*
1090 * The AUX_CTL register is usually DP_CTL + 0x10.
1091 *
1092 * On Haswell and Broadwell though:
1093 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1094 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1095 *
1096 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1097 */
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001098 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
Jani Nikula33ad6622014-03-14 16:51:16 +02001099 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
David Flynn8316f332010-12-08 16:10:21 +00001100
Jani Nikula0b998362014-03-14 16:51:17 +02001101 intel_dp->aux.name = name;
Jani Nikula9d1a1032014-03-14 16:51:15 +02001102 intel_dp->aux.dev = dev->dev;
1103 intel_dp->aux.transfer = intel_dp_aux_transfer;
David Flynn8316f332010-12-08 16:10:21 +00001104
Jani Nikula0b998362014-03-14 16:51:17 +02001105 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1106 connector->base.kdev->kobj.name);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001107
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001108 ret = drm_dp_aux_register(&intel_dp->aux);
Jani Nikula0b998362014-03-14 16:51:17 +02001109 if (ret < 0) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001110 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
Jani Nikula0b998362014-03-14 16:51:17 +02001111 name, ret);
1112 return;
Dave Airlieab2c0672009-12-04 10:55:24 +10001113 }
David Flynn8316f332010-12-08 16:10:21 +00001114
Jani Nikula0b998362014-03-14 16:51:17 +02001115 ret = sysfs_create_link(&connector->base.kdev->kobj,
1116 &intel_dp->aux.ddc.dev.kobj,
1117 intel_dp->aux.ddc.dev.kobj.name);
1118 if (ret < 0) {
1119 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001120 drm_dp_aux_unregister(&intel_dp->aux);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001121 }
1122}
1123
Imre Deak80f65de2014-02-11 17:12:49 +02001124static void
1125intel_dp_connector_unregister(struct intel_connector *intel_connector)
1126{
1127 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1128
Dave Airlie0e32b392014-05-02 14:02:48 +10001129 if (!intel_connector->mst_port)
1130 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1131 intel_dp->aux.ddc.dev.kobj.name);
Imre Deak80f65de2014-02-11 17:12:49 +02001132 intel_connector_unregister(intel_connector);
1133}
1134
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001135static void
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001136skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
Damien Lespiau5416d872014-11-14 17:24:33 +00001137{
1138 u32 ctrl1;
1139
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03001140 memset(&pipe_config->dpll_hw_state, 0,
1141 sizeof(pipe_config->dpll_hw_state));
1142
Damien Lespiau5416d872014-11-14 17:24:33 +00001143 pipe_config->ddi_pll_sel = SKL_DPLL0;
1144 pipe_config->dpll_hw_state.cfgcr1 = 0;
1145 pipe_config->dpll_hw_state.cfgcr2 = 0;
1146
1147 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001148 switch (pipe_config->port_clock / 2) {
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301149 case 81000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001150 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
Damien Lespiau5416d872014-11-14 17:24:33 +00001151 SKL_DPLL0);
1152 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301153 case 135000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001154 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
Damien Lespiau5416d872014-11-14 17:24:33 +00001155 SKL_DPLL0);
1156 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301157 case 270000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001158 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
Damien Lespiau5416d872014-11-14 17:24:33 +00001159 SKL_DPLL0);
1160 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301161 case 162000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001162 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301163 SKL_DPLL0);
1164 break;
1165 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1166 results in CDCLK change. Need to handle the change of CDCLK by
1167 disabling pipes and re-enabling them */
1168 case 108000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001169 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301170 SKL_DPLL0);
1171 break;
1172 case 216000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001173 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301174 SKL_DPLL0);
1175 break;
1176
Damien Lespiau5416d872014-11-14 17:24:33 +00001177 }
1178 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1179}
1180
1181static void
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001182hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
Daniel Vetter0e503382014-07-04 11:26:04 -03001183{
Ander Conselvan de Oliveiraee46f3c72015-06-30 16:10:38 +03001184 memset(&pipe_config->dpll_hw_state, 0,
1185 sizeof(pipe_config->dpll_hw_state));
1186
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001187 switch (pipe_config->port_clock / 2) {
1188 case 81000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001189 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1190 break;
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001191 case 135000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001192 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1193 break;
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001194 case 270000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001195 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1196 break;
1197 }
1198}
1199
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301200static int
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001201intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301202{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001203 if (intel_dp->num_sink_rates) {
1204 *sink_rates = intel_dp->sink_rates;
1205 return intel_dp->num_sink_rates;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301206 }
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001207
1208 *sink_rates = default_rates;
1209
1210 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301211}
1212
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301213static int
Ville Syrjälä1db10e22015-03-12 17:10:32 +02001214intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301215{
Sonika Jindal64987fc2015-05-26 17:50:13 +05301216 if (IS_BROXTON(dev)) {
1217 *source_rates = bxt_rates;
1218 return ARRAY_SIZE(bxt_rates);
1219 } else if (IS_SKYLAKE(dev)) {
Sonika Jindal637a9c62015-05-07 09:52:08 +05301220 *source_rates = skl_rates;
1221 return ARRAY_SIZE(skl_rates);
Ville Syrjäläfe51bfb2015-03-12 17:10:38 +02001222 } else if (IS_CHERRYVIEW(dev)) {
1223 *source_rates = chv_rates;
1224 return ARRAY_SIZE(chv_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301225 }
Ville Syrjälä636280b2015-03-12 17:10:29 +02001226
1227 *source_rates = default_rates;
1228
Ville Syrjälä1db10e22015-03-12 17:10:32 +02001229 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1230 /* WaDisableHBR2:skl */
1231 return (DP_LINK_BW_2_7 >> 3) + 1;
1232 else if (INTEL_INFO(dev)->gen >= 8 ||
1233 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1234 return (DP_LINK_BW_5_4 >> 3) + 1;
1235 else
1236 return (DP_LINK_BW_2_7 >> 3) + 1;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301237}
1238
Daniel Vetter0e503382014-07-04 11:26:04 -03001239static void
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001240intel_dp_set_clock(struct intel_encoder *encoder,
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001241 struct intel_crtc_state *pipe_config)
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001242{
1243 struct drm_device *dev = encoder->base.dev;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001244 const struct dp_link_dpll *divisor = NULL;
1245 int i, count = 0;
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001246
1247 if (IS_G4X(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001248 divisor = gen4_dpll;
1249 count = ARRAY_SIZE(gen4_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001250 } else if (HAS_PCH_SPLIT(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001251 divisor = pch_dpll;
1252 count = ARRAY_SIZE(pch_dpll);
Chon Ming Leeef9348c2014-04-09 13:28:18 +03001253 } else if (IS_CHERRYVIEW(dev)) {
1254 divisor = chv_dpll;
1255 count = ARRAY_SIZE(chv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001256 } else if (IS_VALLEYVIEW(dev)) {
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +08001257 divisor = vlv_dpll;
1258 count = ARRAY_SIZE(vlv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001259 }
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001260
1261 if (divisor && count) {
1262 for (i = 0; i < count; i++) {
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001263 if (pipe_config->port_clock == divisor[i].clock) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001264 pipe_config->dpll = divisor[i].dpll;
1265 pipe_config->clock_set = true;
1266 break;
1267 }
1268 }
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001269 }
1270}
1271
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001272static int intersect_rates(const int *source_rates, int source_len,
1273 const int *sink_rates, int sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001274 int *common_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301275{
1276 int i = 0, j = 0, k = 0;
1277
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301278 while (i < source_len && j < sink_len) {
1279 if (source_rates[i] == sink_rates[j]) {
Ville Syrjäläe6bda3e2015-03-12 17:10:37 +02001280 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1281 return k;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001282 common_rates[k] = source_rates[i];
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301283 ++k;
1284 ++i;
1285 ++j;
1286 } else if (source_rates[i] < sink_rates[j]) {
1287 ++i;
1288 } else {
1289 ++j;
1290 }
1291 }
1292 return k;
1293}
1294
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001295static int intel_dp_common_rates(struct intel_dp *intel_dp,
1296 int *common_rates)
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001297{
1298 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1299 const int *source_rates, *sink_rates;
1300 int source_len, sink_len;
1301
1302 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1303 source_len = intel_dp_source_rates(dev, &source_rates);
1304
1305 return intersect_rates(source_rates, source_len,
1306 sink_rates, sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001307 common_rates);
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001308}
1309
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001310static void snprintf_int_array(char *str, size_t len,
1311 const int *array, int nelem)
1312{
1313 int i;
1314
1315 str[0] = '\0';
1316
1317 for (i = 0; i < nelem; i++) {
Jani Nikulab2f505b2015-05-18 16:01:45 +03001318 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001319 if (r >= len)
1320 return;
1321 str += r;
1322 len -= r;
1323 }
1324}
1325
1326static void intel_dp_print_rates(struct intel_dp *intel_dp)
1327{
1328 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1329 const int *source_rates, *sink_rates;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001330 int source_len, sink_len, common_len;
1331 int common_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001332 char str[128]; /* FIXME: too big for stack? */
1333
1334 if ((drm_debug & DRM_UT_KMS) == 0)
1335 return;
1336
1337 source_len = intel_dp_source_rates(dev, &source_rates);
1338 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1339 DRM_DEBUG_KMS("source rates: %s\n", str);
1340
1341 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1342 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1343 DRM_DEBUG_KMS("sink rates: %s\n", str);
1344
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001345 common_len = intel_dp_common_rates(intel_dp, common_rates);
1346 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1347 DRM_DEBUG_KMS("common rates: %s\n", str);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001348}
1349
Ville Syrjäläf4896f12015-03-12 17:10:27 +02001350static int rate_to_index(int find, const int *rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301351{
1352 int i = 0;
1353
1354 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1355 if (find == rates[i])
1356 break;
1357
1358 return i;
1359}
1360
Ville Syrjälä50fec212015-03-12 17:10:34 +02001361int
1362intel_dp_max_link_rate(struct intel_dp *intel_dp)
1363{
1364 int rates[DP_MAX_SUPPORTED_RATES] = {};
1365 int len;
1366
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001367 len = intel_dp_common_rates(intel_dp, rates);
Ville Syrjälä50fec212015-03-12 17:10:34 +02001368 if (WARN_ON(len <= 0))
1369 return 162000;
1370
1371 return rates[rate_to_index(0, rates) - 1];
1372}
1373
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001374int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1375{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001376 return rate_to_index(rate, intel_dp->sink_rates);
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001377}
1378
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001379static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1380 uint8_t *link_bw, uint8_t *rate_select)
1381{
1382 if (intel_dp->num_sink_rates) {
1383 *link_bw = 0;
1384 *rate_select =
1385 intel_dp_rate_select(intel_dp, port_clock);
1386 } else {
1387 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1388 *rate_select = 0;
1389 }
1390}
1391
Paulo Zanoni00c09d72012-10-26 19:05:52 -02001392bool
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001393intel_dp_compute_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02001394 struct intel_crtc_state *pipe_config)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001395{
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001396 struct drm_device *dev = encoder->base.dev;
Daniel Vetter36008362013-03-27 00:44:59 +01001397 struct drm_i915_private *dev_priv = dev->dev_private;
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02001398 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001399 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001400 enum port port = dp_to_dig_port(intel_dp)->port;
Ander Conselvan de Oliveira84556d52015-03-20 16:18:10 +02001401 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
Jani Nikuladd06f902012-10-19 14:51:50 +03001402 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001403 int lane_count, clock;
Jani Nikula56071a22014-05-06 14:56:52 +03001404 int min_lane_count = 1;
Paulo Zanonieeb63242014-05-06 14:56:50 +03001405 int max_lane_count = intel_dp_max_lane_count(intel_dp);
Todd Previte06ea66b2014-01-20 10:19:39 -07001406 /* Conveniently, the link BW constants become indices with a shift...*/
Jani Nikula56071a22014-05-06 14:56:52 +03001407 int min_clock = 0;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301408 int max_clock;
Daniel Vetter083f9562012-04-20 20:23:49 +02001409 int bpp, mode_rate;
Daniel Vetterff9a6752013-06-01 17:16:21 +02001410 int link_avail, link_clock;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001411 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1412 int common_len;
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001413 uint8_t link_bw, rate_select;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301414
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001415 common_len = intel_dp_common_rates(intel_dp, common_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301416
1417 /* No common link rates between source and sink */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001418 WARN_ON(common_len <= 0);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301419
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001420 max_clock = common_len - 1;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001421
Imre Deakbc7d38a2013-05-16 14:40:36 +03001422 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001423 pipe_config->has_pch_encoder = true;
1424
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001425 pipe_config->has_dp_encoder = true;
Vandana Kannanf769cd22014-08-05 07:51:22 -07001426 pipe_config->has_drrs = false;
Jani Nikula9fcb1702015-05-05 16:32:12 +03001427 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001428
Jani Nikuladd06f902012-10-19 14:51:50 +03001429 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1430 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1431 adjusted_mode);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001432
1433 if (INTEL_INFO(dev)->gen >= 9) {
1434 int ret;
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02001435 ret = skl_update_scaler_crtc(pipe_config);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001436 if (ret)
1437 return ret;
1438 }
1439
Jesse Barnes2dd24552013-04-25 12:55:01 -07001440 if (!HAS_PCH_SPLIT(dev))
1441 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1442 intel_connector->panel.fitting_mode);
1443 else
Jesse Barnesb074cec2013-04-25 12:55:02 -07001444 intel_pch_panel_fitting(intel_crtc, pipe_config,
1445 intel_connector->panel.fitting_mode);
Zhao Yakui0d3a1be2010-07-19 09:43:13 +01001446 }
1447
Daniel Vettercb1793c2012-06-04 18:39:21 +02001448 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
Daniel Vetter0af78a22012-05-23 11:30:55 +02001449 return false;
1450
Daniel Vetter083f9562012-04-20 20:23:49 +02001451 DRM_DEBUG_KMS("DP link computation with max lane count %i "
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301452 "max bw %d pixel clock %iKHz\n",
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001453 max_lane_count, common_rates[max_clock],
Damien Lespiau241bfc32013-09-25 16:45:37 +01001454 adjusted_mode->crtc_clock);
Daniel Vetter083f9562012-04-20 20:23:49 +02001455
Daniel Vetter36008362013-03-27 00:44:59 +01001456 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1457 * bpc in between. */
Daniel Vetter3e7ca982013-06-01 19:45:56 +02001458 bpp = pipe_config->pipe_bpp;
Jani Nikula56071a22014-05-06 14:56:52 +03001459 if (is_edp(intel_dp)) {
Thulasimani,Sivakumar22ce5622015-07-31 11:05:27 +05301460
1461 /* Get bpp from vbt only for panels that dont have bpp in edid */
1462 if (intel_connector->base.display_info.bpc == 0 &&
1463 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
Jani Nikula56071a22014-05-06 14:56:52 +03001464 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1465 dev_priv->vbt.edp_bpp);
1466 bpp = dev_priv->vbt.edp_bpp;
1467 }
1468
Jani Nikula344c5bb2014-09-09 11:25:13 +03001469 /*
1470 * Use the maximum clock and number of lanes the eDP panel
1471 * advertizes being capable of. The panels are generally
1472 * designed to support only a single clock and lane
1473 * configuration, and typically these values correspond to the
1474 * native resolution of the panel.
1475 */
1476 min_lane_count = max_lane_count;
1477 min_clock = max_clock;
Imre Deak79842112013-07-18 17:44:13 +03001478 }
Daniel Vetter657445f2013-05-04 10:09:18 +02001479
Daniel Vetter36008362013-03-27 00:44:59 +01001480 for (; bpp >= 6*3; bpp -= 2*3) {
Damien Lespiau241bfc32013-09-25 16:45:37 +01001481 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1482 bpp);
Daniel Vetterc4867932012-04-10 10:42:36 +02001483
Dave Airliec6930992014-07-14 11:04:39 +10001484 for (clock = min_clock; clock <= max_clock; clock++) {
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301485 for (lane_count = min_lane_count;
1486 lane_count <= max_lane_count;
1487 lane_count <<= 1) {
1488
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001489 link_clock = common_rates[clock];
Daniel Vetter36008362013-03-27 00:44:59 +01001490 link_avail = intel_dp_max_data_rate(link_clock,
1491 lane_count);
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001492
Daniel Vetter36008362013-03-27 00:44:59 +01001493 if (mode_rate <= link_avail) {
1494 goto found;
1495 }
1496 }
1497 }
1498 }
1499
1500 return false;
1501
1502found:
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001503 if (intel_dp->color_range_auto) {
1504 /*
1505 * See:
1506 * CEA-861-E - 5.1 Default Encoding Parameters
1507 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1508 */
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001509 pipe_config->limited_color_range =
1510 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1511 } else {
1512 pipe_config->limited_color_range =
1513 intel_dp->limited_color_range;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001514 }
1515
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03001516 pipe_config->lane_count = lane_count;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301517
Daniel Vetter657445f2013-05-04 10:09:18 +02001518 pipe_config->pipe_bpp = bpp;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001519 pipe_config->port_clock = common_rates[clock];
Daniel Vetterc4867932012-04-10 10:42:36 +02001520
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001521 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1522 &link_bw, &rate_select);
1523
1524 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1525 link_bw, rate_select, pipe_config->lane_count,
Daniel Vetterff9a6752013-06-01 17:16:21 +02001526 pipe_config->port_clock, bpp);
Daniel Vetter36008362013-03-27 00:44:59 +01001527 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1528 mode_rate, link_avail);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001529
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001530 intel_link_compute_m_n(bpp, lane_count,
Damien Lespiau241bfc32013-09-25 16:45:37 +01001531 adjusted_mode->crtc_clock,
1532 pipe_config->port_clock,
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001533 &pipe_config->dp_m_n);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001534
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301535 if (intel_connector->panel.downclock_mode != NULL &&
Vandana Kannan96178ee2015-01-10 02:25:56 +05301536 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
Vandana Kannanf769cd22014-08-05 07:51:22 -07001537 pipe_config->has_drrs = true;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301538 intel_link_compute_m_n(bpp, lane_count,
1539 intel_connector->panel.downclock_mode->clock,
1540 pipe_config->port_clock,
1541 &pipe_config->dp_m2_n2);
1542 }
1543
Damien Lespiau5416d872014-11-14 17:24:33 +00001544 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001545 skl_edp_set_pll_config(pipe_config);
Satheeshakrishna M977bb382014-08-22 09:49:12 +05301546 else if (IS_BROXTON(dev))
1547 /* handled in ddi */;
Damien Lespiau5416d872014-11-14 17:24:33 +00001548 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001549 hsw_dp_set_ddi_pll_sel(pipe_config);
Daniel Vetter0e503382014-07-04 11:26:04 -03001550 else
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001551 intel_dp_set_clock(encoder, pipe_config);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001552
Daniel Vetter36008362013-03-27 00:44:59 +01001553 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001554}
1555
Daniel Vetter7c62a162013-06-01 17:16:20 +02001556static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
Daniel Vetterea9b6002012-11-29 15:59:31 +01001557{
Daniel Vetter7c62a162013-06-01 17:16:20 +02001558 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1559 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1560 struct drm_device *dev = crtc->base.dev;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001561 struct drm_i915_private *dev_priv = dev->dev_private;
1562 u32 dpa_ctl;
1563
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001564 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1565 crtc->config->port_clock);
Daniel Vetterea9b6002012-11-29 15:59:31 +01001566 dpa_ctl = I915_READ(DP_A);
1567 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1568
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001569 if (crtc->config->port_clock == 162000) {
Daniel Vetter1ce17032012-11-29 15:59:32 +01001570 /* For a long time we've carried around a ILK-DevA w/a for the
1571 * 160MHz clock. If we're really unlucky, it's still required.
1572 */
1573 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
Daniel Vetterea9b6002012-11-29 15:59:31 +01001574 dpa_ctl |= DP_PLL_FREQ_160MHZ;
Daniel Vetter7c62a162013-06-01 17:16:20 +02001575 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001576 } else {
1577 dpa_ctl |= DP_PLL_FREQ_270MHZ;
Daniel Vetter7c62a162013-06-01 17:16:20 +02001578 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001579 }
Daniel Vetter1ce17032012-11-29 15:59:32 +01001580
Daniel Vetterea9b6002012-11-29 15:59:31 +01001581 I915_WRITE(DP_A, dpa_ctl);
1582
1583 POSTING_READ(DP_A);
1584 udelay(500);
1585}
1586
Ville Syrjälä901c2da2015-08-17 18:05:12 +03001587void intel_dp_set_link_params(struct intel_dp *intel_dp,
1588 const struct intel_crtc_state *pipe_config)
1589{
1590 intel_dp->link_rate = pipe_config->port_clock;
1591 intel_dp->lane_count = pipe_config->lane_count;
1592}
1593
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02001594static void intel_dp_prepare(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001595{
Daniel Vetterb934223d2013-07-21 21:37:05 +02001596 struct drm_device *dev = encoder->base.dev;
Keith Packard417e8222011-11-01 19:54:11 -07001597 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001598 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001599 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001600 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001601 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001602
Ville Syrjälä901c2da2015-08-17 18:05:12 +03001603 intel_dp_set_link_params(intel_dp, crtc->config);
1604
Keith Packard417e8222011-11-01 19:54:11 -07001605 /*
Keith Packard1a2eb462011-11-16 16:26:07 -08001606 * There are four kinds of DP registers:
Keith Packard417e8222011-11-01 19:54:11 -07001607 *
1608 * IBX PCH
Keith Packard1a2eb462011-11-16 16:26:07 -08001609 * SNB CPU
1610 * IVB CPU
Keith Packard417e8222011-11-01 19:54:11 -07001611 * CPT PCH
1612 *
1613 * IBX PCH and CPU are the same for almost everything,
1614 * except that the CPU DP PLL is configured in this
1615 * register
1616 *
1617 * CPT PCH is quite different, having many bits moved
1618 * to the TRANS_DP_CTL register instead. That
1619 * configuration happens (oddly) in ironlake_pch_enable
1620 */
Adam Jackson9c9e7922010-04-05 17:57:59 -04001621
Keith Packard417e8222011-11-01 19:54:11 -07001622 /* Preserve the BIOS-computed detected bit. This is
1623 * supposed to be read-only.
1624 */
1625 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001626
Keith Packard417e8222011-11-01 19:54:11 -07001627 /* Handle DP bits in common between all three register formats */
Keith Packard417e8222011-11-01 19:54:11 -07001628 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03001629 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001630
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001631 if (crtc->config->has_audio)
Chris Wilsonea5b2132010-08-04 13:50:23 +01001632 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
Paulo Zanoni247d89f2012-10-15 15:51:33 -03001633
Keith Packard417e8222011-11-01 19:54:11 -07001634 /* Split out the IBX/CPU vs CPT settings */
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001635
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001636 if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08001637 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1638 intel_dp->DP |= DP_SYNC_HS_HIGH;
1639 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1640 intel_dp->DP |= DP_SYNC_VS_HIGH;
1641 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1642
Jani Nikula6aba5b62013-10-04 15:08:10 +03001643 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard1a2eb462011-11-16 16:26:07 -08001644 intel_dp->DP |= DP_ENHANCED_FRAMING;
1645
Daniel Vetter7c62a162013-06-01 17:16:20 +02001646 intel_dp->DP |= crtc->pipe << 29;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001647 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001648 u32 trans_dp;
1649
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001650 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001651
1652 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1653 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1654 trans_dp |= TRANS_DP_ENH_FRAMING;
1655 else
1656 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1657 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001658 } else {
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001659 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1660 crtc->config->limited_color_range)
1661 intel_dp->DP |= DP_COLOR_RANGE_16_235;
Keith Packard417e8222011-11-01 19:54:11 -07001662
1663 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1664 intel_dp->DP |= DP_SYNC_HS_HIGH;
1665 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1666 intel_dp->DP |= DP_SYNC_VS_HIGH;
1667 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1668
Jani Nikula6aba5b62013-10-04 15:08:10 +03001669 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard417e8222011-11-01 19:54:11 -07001670 intel_dp->DP |= DP_ENHANCED_FRAMING;
1671
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001672 if (IS_CHERRYVIEW(dev))
Chon Ming Lee44f37d12014-04-09 13:28:21 +03001673 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001674 else if (crtc->pipe == PIPE_B)
1675 intel_dp->DP |= DP_PIPEB_SELECT;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001676 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001677}
1678
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001679#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1680#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001681
Paulo Zanoni1a5ef5b2013-12-19 14:29:43 -02001682#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1683#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
Keith Packard99ea7122011-11-01 19:57:50 -07001684
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001685#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1686#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001687
Daniel Vetter4be73782014-01-17 14:39:48 +01001688static void wait_panel_status(struct intel_dp *intel_dp,
Keith Packard99ea7122011-11-01 19:57:50 -07001689 u32 mask,
1690 u32 value)
1691{
Paulo Zanoni30add222012-10-26 19:05:45 -02001692 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001693 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07001694 u32 pp_stat_reg, pp_ctrl_reg;
1695
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001696 lockdep_assert_held(&dev_priv->pps_mutex);
1697
Jani Nikulabf13e812013-09-06 07:40:05 +03001698 pp_stat_reg = _pp_stat_reg(intel_dp);
1699 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001700
1701 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001702 mask, value,
1703 I915_READ(pp_stat_reg),
1704 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001705
Jesse Barnes453c5422013-03-28 09:55:41 -07001706 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
Keith Packard99ea7122011-11-01 19:57:50 -07001707 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001708 I915_READ(pp_stat_reg),
1709 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001710 }
Chris Wilson54c136d2013-12-02 09:57:16 +00001711
1712 DRM_DEBUG_KMS("Wait complete\n");
Keith Packard99ea7122011-11-01 19:57:50 -07001713}
1714
Daniel Vetter4be73782014-01-17 14:39:48 +01001715static void wait_panel_on(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001716{
1717 DRM_DEBUG_KMS("Wait for panel power on\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001718 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001719}
1720
Daniel Vetter4be73782014-01-17 14:39:48 +01001721static void wait_panel_off(struct intel_dp *intel_dp)
Keith Packardbd943152011-09-18 23:09:52 -07001722{
Keith Packardbd943152011-09-18 23:09:52 -07001723 DRM_DEBUG_KMS("Wait for panel power off time\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001724 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
Keith Packardbd943152011-09-18 23:09:52 -07001725}
Keith Packardbd943152011-09-18 23:09:52 -07001726
Daniel Vetter4be73782014-01-17 14:39:48 +01001727static void wait_panel_power_cycle(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001728{
1729 DRM_DEBUG_KMS("Wait for panel power cycle\n");
Paulo Zanonidce56b32013-12-19 14:29:40 -02001730
1731 /* When we disable the VDD override bit last we have to do the manual
1732 * wait. */
1733 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1734 intel_dp->panel_power_cycle_delay);
1735
Daniel Vetter4be73782014-01-17 14:39:48 +01001736 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001737}
Keith Packardbd943152011-09-18 23:09:52 -07001738
Daniel Vetter4be73782014-01-17 14:39:48 +01001739static void wait_backlight_on(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001740{
1741 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1742 intel_dp->backlight_on_delay);
1743}
1744
Daniel Vetter4be73782014-01-17 14:39:48 +01001745static void edp_wait_backlight_off(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001746{
1747 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1748 intel_dp->backlight_off_delay);
1749}
Keith Packard99ea7122011-11-01 19:57:50 -07001750
Keith Packard832dd3c2011-11-01 19:34:06 -07001751/* Read the current pp_control value, unlocking the register if it
1752 * is locked
1753 */
1754
Jesse Barnes453c5422013-03-28 09:55:41 -07001755static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
Keith Packard832dd3c2011-11-01 19:34:06 -07001756{
Jesse Barnes453c5422013-03-28 09:55:41 -07001757 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1758 struct drm_i915_private *dev_priv = dev->dev_private;
1759 u32 control;
Jesse Barnes453c5422013-03-28 09:55:41 -07001760
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001761 lockdep_assert_held(&dev_priv->pps_mutex);
1762
Jani Nikulabf13e812013-09-06 07:40:05 +03001763 control = I915_READ(_pp_ctrl_reg(intel_dp));
Vandana Kannanb0a08be2015-06-18 11:00:55 +05301764 if (!IS_BROXTON(dev)) {
1765 control &= ~PANEL_UNLOCK_MASK;
1766 control |= PANEL_UNLOCK_REGS;
1767 }
Keith Packard832dd3c2011-11-01 19:34:06 -07001768 return control;
Keith Packardbd943152011-09-18 23:09:52 -07001769}
1770
Ville Syrjälä951468f2014-09-04 14:55:31 +03001771/*
1772 * Must be paired with edp_panel_vdd_off().
1773 * Must hold pps_mutex around the whole on/off sequence.
1774 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1775 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +03001776static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001777{
Paulo Zanoni30add222012-10-26 19:05:45 -02001778 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001779 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1780 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Jesse Barnes5d613502011-01-24 17:10:54 -08001781 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02001782 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001783 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001784 u32 pp_stat_reg, pp_ctrl_reg;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001785 bool need_to_disable = !intel_dp->want_panel_vdd;
Jesse Barnes5d613502011-01-24 17:10:54 -08001786
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001787 lockdep_assert_held(&dev_priv->pps_mutex);
1788
Keith Packard97af61f572011-09-28 16:23:51 -07001789 if (!is_edp(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001790 return false;
Keith Packardbd943152011-09-18 23:09:52 -07001791
Egbert Eich2c623c12014-11-25 12:54:57 +01001792 cancel_delayed_work(&intel_dp->panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001793 intel_dp->want_panel_vdd = true;
Keith Packard99ea7122011-11-01 19:57:50 -07001794
Daniel Vetter4be73782014-01-17 14:39:48 +01001795 if (edp_have_panel_vdd(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001796 return need_to_disable;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001797
Imre Deak4e6e1a52014-03-27 17:45:11 +02001798 power_domain = intel_display_port_power_domain(intel_encoder);
1799 intel_display_power_get(dev_priv, power_domain);
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001800
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001801 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1802 port_name(intel_dig_port->port));
Keith Packardbd943152011-09-18 23:09:52 -07001803
Daniel Vetter4be73782014-01-17 14:39:48 +01001804 if (!edp_have_panel_power(intel_dp))
1805 wait_panel_power_cycle(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001806
Jesse Barnes453c5422013-03-28 09:55:41 -07001807 pp = ironlake_get_pp_control(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001808 pp |= EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -07001809
Jani Nikulabf13e812013-09-06 07:40:05 +03001810 pp_stat_reg = _pp_stat_reg(intel_dp);
1811 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001812
1813 I915_WRITE(pp_ctrl_reg, pp);
1814 POSTING_READ(pp_ctrl_reg);
1815 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1816 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Keith Packardebf33b12011-09-29 15:53:27 -07001817 /*
1818 * If the panel wasn't on, delay before accessing aux channel
1819 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001820 if (!edp_have_panel_power(intel_dp)) {
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001821 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1822 port_name(intel_dig_port->port));
Keith Packardf01eca22011-09-28 16:48:10 -07001823 msleep(intel_dp->panel_power_up_delay);
Keith Packardf01eca22011-09-28 16:48:10 -07001824 }
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001825
1826 return need_to_disable;
1827}
1828
Ville Syrjälä951468f2014-09-04 14:55:31 +03001829/*
1830 * Must be paired with intel_edp_panel_vdd_off() or
1831 * intel_edp_panel_off().
1832 * Nested calls to these functions are not allowed since
1833 * we drop the lock. Caller must use some higher level
1834 * locking to prevent nested calls from other threads.
1835 */
Daniel Vetterb80d6c72014-03-19 15:54:37 +01001836void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001837{
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001838 bool vdd;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001839
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001840 if (!is_edp(intel_dp))
1841 return;
1842
Ville Syrjälä773538e82014-09-04 14:54:56 +03001843 pps_lock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001844 vdd = edp_panel_vdd_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001845 pps_unlock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001846
Rob Clarke2c719b2014-12-15 13:56:32 -05001847 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001848 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes5d613502011-01-24 17:10:54 -08001849}
1850
Daniel Vetter4be73782014-01-17 14:39:48 +01001851static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001852{
Paulo Zanoni30add222012-10-26 19:05:45 -02001853 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001854 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001855 struct intel_digital_port *intel_dig_port =
1856 dp_to_dig_port(intel_dp);
1857 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1858 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001859 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001860 u32 pp_stat_reg, pp_ctrl_reg;
Jesse Barnes5d613502011-01-24 17:10:54 -08001861
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001862 lockdep_assert_held(&dev_priv->pps_mutex);
Daniel Vettera0e99e62012-12-02 01:05:46 +01001863
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001864 WARN_ON(intel_dp->want_panel_vdd);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001865
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001866 if (!edp_have_panel_vdd(intel_dp))
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001867 return;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001868
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001869 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1870 port_name(intel_dig_port->port));
Jesse Barnes453c5422013-03-28 09:55:41 -07001871
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001872 pp = ironlake_get_pp_control(intel_dp);
1873 pp &= ~EDP_FORCE_VDD;
Jesse Barnes453c5422013-03-28 09:55:41 -07001874
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001875 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1876 pp_stat_reg = _pp_stat_reg(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001877
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001878 I915_WRITE(pp_ctrl_reg, pp);
1879 POSTING_READ(pp_ctrl_reg);
Paulo Zanoni90791a52013-12-06 17:32:42 -02001880
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001881 /* Make sure sequencer is idle before allowing subsequent activity */
1882 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1883 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001884
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001885 if ((pp & POWER_TARGET_ON) == 0)
1886 intel_dp->last_power_cycle = jiffies;
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001887
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001888 power_domain = intel_display_port_power_domain(intel_encoder);
1889 intel_display_power_put(dev_priv, power_domain);
Keith Packardbd943152011-09-18 23:09:52 -07001890}
1891
Daniel Vetter4be73782014-01-17 14:39:48 +01001892static void edp_panel_vdd_work(struct work_struct *__work)
Keith Packardbd943152011-09-18 23:09:52 -07001893{
1894 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1895 struct intel_dp, panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001896
Ville Syrjälä773538e82014-09-04 14:54:56 +03001897 pps_lock(intel_dp);
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001898 if (!intel_dp->want_panel_vdd)
1899 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001900 pps_unlock(intel_dp);
Keith Packardbd943152011-09-18 23:09:52 -07001901}
1902
Imre Deakaba86892014-07-30 15:57:31 +03001903static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1904{
1905 unsigned long delay;
1906
1907 /*
1908 * Queue the timer to fire a long time from now (relative to the power
1909 * down delay) to keep the panel power up across a sequence of
1910 * operations.
1911 */
1912 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1913 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1914}
1915
Ville Syrjälä951468f2014-09-04 14:55:31 +03001916/*
1917 * Must be paired with edp_panel_vdd_on().
1918 * Must hold pps_mutex around the whole on/off sequence.
1919 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1920 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001921static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
Keith Packardbd943152011-09-18 23:09:52 -07001922{
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001923 struct drm_i915_private *dev_priv =
1924 intel_dp_to_dev(intel_dp)->dev_private;
1925
1926 lockdep_assert_held(&dev_priv->pps_mutex);
1927
Keith Packard97af61f572011-09-28 16:23:51 -07001928 if (!is_edp(intel_dp))
1929 return;
Jesse Barnes5d613502011-01-24 17:10:54 -08001930
Rob Clarke2c719b2014-12-15 13:56:32 -05001931 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001932 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packardf2e8b182011-11-01 20:01:35 -07001933
Keith Packardbd943152011-09-18 23:09:52 -07001934 intel_dp->want_panel_vdd = false;
1935
Imre Deakaba86892014-07-30 15:57:31 +03001936 if (sync)
Daniel Vetter4be73782014-01-17 14:39:48 +01001937 edp_panel_vdd_off_sync(intel_dp);
Imre Deakaba86892014-07-30 15:57:31 +03001938 else
1939 edp_panel_vdd_schedule_off(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001940}
1941
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001942static void edp_panel_on(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07001943{
Paulo Zanoni30add222012-10-26 19:05:45 -02001944 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001945 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packard99ea7122011-11-01 19:57:50 -07001946 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001947 u32 pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07001948
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001949 lockdep_assert_held(&dev_priv->pps_mutex);
1950
Keith Packard97af61f572011-09-28 16:23:51 -07001951 if (!is_edp(intel_dp))
Keith Packardbd943152011-09-18 23:09:52 -07001952 return;
Keith Packard99ea7122011-11-01 19:57:50 -07001953
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001954 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1955 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packard99ea7122011-11-01 19:57:50 -07001956
Ville Syrjäläe7a89ac2014-10-16 21:30:07 +03001957 if (WARN(edp_have_panel_power(intel_dp),
1958 "eDP port %c panel power already on\n",
1959 port_name(dp_to_dig_port(intel_dp)->port)))
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001960 return;
Jesse Barnes9934c132010-07-22 13:18:19 -07001961
Daniel Vetter4be73782014-01-17 14:39:48 +01001962 wait_panel_power_cycle(intel_dp);
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07001963
Jani Nikulabf13e812013-09-06 07:40:05 +03001964 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001965 pp = ironlake_get_pp_control(intel_dp);
Keith Packard05ce1a42011-09-29 16:33:01 -07001966 if (IS_GEN5(dev)) {
1967 /* ILK workaround: disable reset around power sequence */
1968 pp &= ~PANEL_POWER_RESET;
Jani Nikulabf13e812013-09-06 07:40:05 +03001969 I915_WRITE(pp_ctrl_reg, pp);
1970 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07001971 }
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07001972
Keith Packard1c0ae802011-09-19 13:59:29 -07001973 pp |= POWER_TARGET_ON;
Keith Packard99ea7122011-11-01 19:57:50 -07001974 if (!IS_GEN5(dev))
1975 pp |= PANEL_POWER_RESET;
1976
Jesse Barnes453c5422013-03-28 09:55:41 -07001977 I915_WRITE(pp_ctrl_reg, pp);
1978 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07001979
Daniel Vetter4be73782014-01-17 14:39:48 +01001980 wait_panel_on(intel_dp);
Paulo Zanonidce56b32013-12-19 14:29:40 -02001981 intel_dp->last_power_on = jiffies;
Jesse Barnes9934c132010-07-22 13:18:19 -07001982
Keith Packard05ce1a42011-09-29 16:33:01 -07001983 if (IS_GEN5(dev)) {
1984 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
Jani Nikulabf13e812013-09-06 07:40:05 +03001985 I915_WRITE(pp_ctrl_reg, pp);
1986 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07001987 }
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001988}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001989
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001990void intel_edp_panel_on(struct intel_dp *intel_dp)
1991{
1992 if (!is_edp(intel_dp))
1993 return;
1994
1995 pps_lock(intel_dp);
1996 edp_panel_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001997 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001998}
1999
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002000
2001static void edp_panel_off(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07002002{
Imre Deak4e6e1a52014-03-27 17:45:11 +02002003 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2004 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanoni30add222012-10-26 19:05:45 -02002005 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002006 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02002007 enum intel_display_power_domain power_domain;
Keith Packard99ea7122011-11-01 19:57:50 -07002008 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002009 u32 pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07002010
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002011 lockdep_assert_held(&dev_priv->pps_mutex);
2012
Keith Packard97af61f572011-09-28 16:23:51 -07002013 if (!is_edp(intel_dp))
2014 return;
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002015
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002016 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2017 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002018
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002019 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2020 port_name(dp_to_dig_port(intel_dp)->port));
Jani Nikula24f3e092014-03-17 16:43:36 +02002021
Jesse Barnes453c5422013-03-28 09:55:41 -07002022 pp = ironlake_get_pp_control(intel_dp);
Daniel Vetter35a38552012-08-12 22:17:14 +02002023 /* We need to switch off panel power _and_ force vdd, for otherwise some
2024 * panels get very unhappy and cease to work. */
Patrik Jakobssonb3064152014-03-04 00:42:44 +01002025 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2026 EDP_BLC_ENABLE);
Jesse Barnes453c5422013-03-28 09:55:41 -07002027
Jani Nikulabf13e812013-09-06 07:40:05 +03002028 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002029
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002030 intel_dp->want_panel_vdd = false;
2031
Jesse Barnes453c5422013-03-28 09:55:41 -07002032 I915_WRITE(pp_ctrl_reg, pp);
2033 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07002034
Paulo Zanonidce56b32013-12-19 14:29:40 -02002035 intel_dp->last_power_cycle = jiffies;
Daniel Vetter4be73782014-01-17 14:39:48 +01002036 wait_panel_off(intel_dp);
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002037
2038 /* We got a reference when we enabled the VDD. */
Imre Deak4e6e1a52014-03-27 17:45:11 +02002039 power_domain = intel_display_port_power_domain(intel_encoder);
2040 intel_display_power_put(dev_priv, power_domain);
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002041}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002042
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002043void intel_edp_panel_off(struct intel_dp *intel_dp)
2044{
2045 if (!is_edp(intel_dp))
2046 return;
2047
2048 pps_lock(intel_dp);
2049 edp_panel_off(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002050 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002051}
2052
Jani Nikula1250d102014-08-12 17:11:39 +03002053/* Enable backlight in the panel power control. */
2054static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002055{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002056 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2057 struct drm_device *dev = intel_dig_port->base.base.dev;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002058 struct drm_i915_private *dev_priv = dev->dev_private;
2059 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002060 u32 pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002061
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07002062 /*
2063 * If we enable the backlight right away following a panel power
2064 * on, we may see slight flicker as the panel syncs with the eDP
2065 * link. So delay a bit to make sure the image is solid before
2066 * allowing it to appear.
2067 */
Daniel Vetter4be73782014-01-17 14:39:48 +01002068 wait_backlight_on(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002069
Ville Syrjälä773538e82014-09-04 14:54:56 +03002070 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002071
Jesse Barnes453c5422013-03-28 09:55:41 -07002072 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002073 pp |= EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002074
Jani Nikulabf13e812013-09-06 07:40:05 +03002075 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002076
2077 I915_WRITE(pp_ctrl_reg, pp);
2078 POSTING_READ(pp_ctrl_reg);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002079
Ville Syrjälä773538e82014-09-04 14:54:56 +03002080 pps_unlock(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002081}
2082
Jani Nikula1250d102014-08-12 17:11:39 +03002083/* Enable backlight PWM and backlight PP control. */
2084void intel_edp_backlight_on(struct intel_dp *intel_dp)
2085{
2086 if (!is_edp(intel_dp))
2087 return;
2088
2089 DRM_DEBUG_KMS("\n");
2090
2091 intel_panel_enable_backlight(intel_dp->attached_connector);
2092 _intel_edp_backlight_on(intel_dp);
2093}
2094
2095/* Disable backlight in the panel power control. */
2096static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002097{
Paulo Zanoni30add222012-10-26 19:05:45 -02002098 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002099 struct drm_i915_private *dev_priv = dev->dev_private;
2100 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002101 u32 pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002102
Keith Packardf01eca22011-09-28 16:48:10 -07002103 if (!is_edp(intel_dp))
2104 return;
2105
Ville Syrjälä773538e82014-09-04 14:54:56 +03002106 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002107
Jesse Barnes453c5422013-03-28 09:55:41 -07002108 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002109 pp &= ~EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002110
Jani Nikulabf13e812013-09-06 07:40:05 +03002111 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002112
2113 I915_WRITE(pp_ctrl_reg, pp);
2114 POSTING_READ(pp_ctrl_reg);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002115
Ville Syrjälä773538e82014-09-04 14:54:56 +03002116 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002117
Paulo Zanonidce56b32013-12-19 14:29:40 -02002118 intel_dp->last_backlight_off = jiffies;
Jesse Barnesf7d23232014-03-31 11:13:56 -07002119 edp_wait_backlight_off(intel_dp);
Jani Nikula1250d102014-08-12 17:11:39 +03002120}
Jesse Barnesf7d23232014-03-31 11:13:56 -07002121
Jani Nikula1250d102014-08-12 17:11:39 +03002122/* Disable backlight PP control and backlight PWM. */
2123void intel_edp_backlight_off(struct intel_dp *intel_dp)
2124{
2125 if (!is_edp(intel_dp))
2126 return;
2127
2128 DRM_DEBUG_KMS("\n");
2129
2130 _intel_edp_backlight_off(intel_dp);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002131 intel_panel_disable_backlight(intel_dp->attached_connector);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002132}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002133
Jani Nikula73580fb72014-08-12 17:11:41 +03002134/*
2135 * Hook for controlling the panel power control backlight through the bl_power
2136 * sysfs attribute. Take care to handle multiple calls.
2137 */
2138static void intel_edp_backlight_power(struct intel_connector *connector,
2139 bool enable)
2140{
2141 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002142 bool is_enabled;
2143
Ville Syrjälä773538e82014-09-04 14:54:56 +03002144 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002145 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002146 pps_unlock(intel_dp);
Jani Nikula73580fb72014-08-12 17:11:41 +03002147
2148 if (is_enabled == enable)
2149 return;
2150
Jani Nikula23ba9372014-08-27 14:08:43 +03002151 DRM_DEBUG_KMS("panel power control backlight %s\n",
2152 enable ? "enable" : "disable");
Jani Nikula73580fb72014-08-12 17:11:41 +03002153
2154 if (enable)
2155 _intel_edp_backlight_on(intel_dp);
2156 else
2157 _intel_edp_backlight_off(intel_dp);
2158}
2159
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002160static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002161{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002162 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2163 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2164 struct drm_device *dev = crtc->dev;
Jesse Barnesd240f202010-08-13 15:43:26 -07002165 struct drm_i915_private *dev_priv = dev->dev_private;
2166 u32 dpa_ctl;
2167
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002168 assert_pipe_disabled(dev_priv,
2169 to_intel_crtc(crtc)->pipe);
2170
Jesse Barnesd240f202010-08-13 15:43:26 -07002171 DRM_DEBUG_KMS("\n");
2172 dpa_ctl = I915_READ(DP_A);
Daniel Vetter07679352012-09-06 22:15:42 +02002173 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2174 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2175
2176 /* We don't adjust intel_dp->DP while tearing down the link, to
2177 * facilitate link retraining (e.g. after hotplug). Hence clear all
2178 * enable bits here to ensure that we don't enable too much. */
2179 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2180 intel_dp->DP |= DP_PLL_ENABLE;
2181 I915_WRITE(DP_A, intel_dp->DP);
Jesse Barnes298b0b32010-10-07 16:01:24 -07002182 POSTING_READ(DP_A);
2183 udelay(200);
Jesse Barnesd240f202010-08-13 15:43:26 -07002184}
2185
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002186static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002187{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002188 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2189 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2190 struct drm_device *dev = crtc->dev;
Jesse Barnesd240f202010-08-13 15:43:26 -07002191 struct drm_i915_private *dev_priv = dev->dev_private;
2192 u32 dpa_ctl;
2193
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002194 assert_pipe_disabled(dev_priv,
2195 to_intel_crtc(crtc)->pipe);
2196
Jesse Barnesd240f202010-08-13 15:43:26 -07002197 dpa_ctl = I915_READ(DP_A);
Daniel Vetter07679352012-09-06 22:15:42 +02002198 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2199 "dp pll off, should be on\n");
2200 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2201
2202 /* We can't rely on the value tracked for the DP register in
2203 * intel_dp->DP because link_down must not change that (otherwise link
2204 * re-training will fail. */
Jesse Barnes298b0b32010-10-07 16:01:24 -07002205 dpa_ctl &= ~DP_PLL_ENABLE;
Jesse Barnesd240f202010-08-13 15:43:26 -07002206 I915_WRITE(DP_A, dpa_ctl);
Chris Wilson1af5fa12010-09-08 21:07:28 +01002207 POSTING_READ(DP_A);
Jesse Barnesd240f202010-08-13 15:43:26 -07002208 udelay(200);
2209}
2210
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002211/* If the sink supports it, try to set the power state appropriately */
Paulo Zanonic19b0662012-10-15 15:51:41 -03002212void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002213{
2214 int ret, i;
2215
2216 /* Should have a valid DPCD by this point */
2217 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2218 return;
2219
2220 if (mode != DRM_MODE_DPMS_ON) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002221 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2222 DP_SET_POWER_D3);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002223 } else {
2224 /*
2225 * When turning on, we need to retry for 1ms to give the sink
2226 * time to wake up.
2227 */
2228 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002229 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2230 DP_SET_POWER_D0);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002231 if (ret == 1)
2232 break;
2233 msleep(1);
2234 }
2235 }
Jani Nikulaf9cac722014-09-02 16:33:52 +03002236
2237 if (ret != 1)
2238 DRM_DEBUG_KMS("failed to %s sink power state\n",
2239 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002240}
2241
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002242static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2243 enum pipe *pipe)
Jesse Barnesd240f202010-08-13 15:43:26 -07002244{
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002245 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03002246 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002247 struct drm_device *dev = encoder->base.dev;
2248 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak6d129be2014-03-05 16:20:54 +02002249 enum intel_display_power_domain power_domain;
2250 u32 tmp;
2251
2252 power_domain = intel_display_port_power_domain(encoder);
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002253 if (!intel_display_power_is_enabled(dev_priv, power_domain))
Imre Deak6d129be2014-03-05 16:20:54 +02002254 return false;
2255
2256 tmp = I915_READ(intel_dp->output_reg);
Jesse Barnesd240f202010-08-13 15:43:26 -07002257
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002258 if (!(tmp & DP_PORT_EN))
2259 return false;
2260
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002261 if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002262 *pipe = PORT_TO_PIPE_CPT(tmp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002263 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002264 enum pipe p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002265
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002266 for_each_pipe(dev_priv, p) {
2267 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2268 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2269 *pipe = p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002270 return true;
2271 }
2272 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002273
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002274 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2275 intel_dp->output_reg);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002276 } else if (IS_CHERRYVIEW(dev)) {
2277 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2278 } else {
2279 *pipe = PORT_TO_PIPE(tmp);
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002280 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002281
2282 return true;
2283}
2284
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002285static void intel_dp_get_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02002286 struct intel_crtc_state *pipe_config)
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002287{
2288 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002289 u32 tmp, flags = 0;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002290 struct drm_device *dev = encoder->base.dev;
2291 struct drm_i915_private *dev_priv = dev->dev_private;
2292 enum port port = dp_to_dig_port(intel_dp)->port;
2293 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjälä18442d02013-09-13 16:00:08 +03002294 int dotclock;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002295
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002296 tmp = I915_READ(intel_dp->output_reg);
Jani Nikula9fcb1702015-05-05 16:32:12 +03002297
2298 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002299
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002300 if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläb81e34c2015-07-06 15:10:03 +03002301 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2302
2303 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
Xiong Zhang63000ef2013-06-28 12:59:06 +08002304 flags |= DRM_MODE_FLAG_PHSYNC;
2305 else
2306 flags |= DRM_MODE_FLAG_NHSYNC;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002307
Ville Syrjäläb81e34c2015-07-06 15:10:03 +03002308 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
Xiong Zhang63000ef2013-06-28 12:59:06 +08002309 flags |= DRM_MODE_FLAG_PVSYNC;
2310 else
2311 flags |= DRM_MODE_FLAG_NVSYNC;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002312 } else {
2313 if (tmp & DP_SYNC_HS_HIGH)
2314 flags |= DRM_MODE_FLAG_PHSYNC;
2315 else
2316 flags |= DRM_MODE_FLAG_NHSYNC;
2317
2318 if (tmp & DP_SYNC_VS_HIGH)
2319 flags |= DRM_MODE_FLAG_PVSYNC;
2320 else
2321 flags |= DRM_MODE_FLAG_NVSYNC;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002322 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002323
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002324 pipe_config->base.adjusted_mode.flags |= flags;
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002325
Ville Syrjälä8c875fc2014-09-12 15:46:29 +03002326 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2327 tmp & DP_COLOR_RANGE_16_235)
2328 pipe_config->limited_color_range = true;
2329
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002330 pipe_config->has_dp_encoder = true;
2331
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03002332 pipe_config->lane_count =
2333 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2334
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002335 intel_dp_get_m_n(crtc, pipe_config);
2336
Ville Syrjälä18442d02013-09-13 16:00:08 +03002337 if (port == PORT_A) {
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002338 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2339 pipe_config->port_clock = 162000;
2340 else
2341 pipe_config->port_clock = 270000;
2342 }
Ville Syrjälä18442d02013-09-13 16:00:08 +03002343
2344 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2345 &pipe_config->dp_m_n);
2346
2347 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2348 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2349
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002350 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
Daniel Vetter7f16e5c2013-11-04 16:28:47 +01002351
Jani Nikulac6cd2ee2013-10-21 10:52:07 +03002352 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2353 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2354 /*
2355 * This is a big fat ugly hack.
2356 *
2357 * Some machines in UEFI boot mode provide us a VBT that has 18
2358 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2359 * unknown we fail to light up. Yet the same BIOS boots up with
2360 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2361 * max, not what it tells us to use.
2362 *
2363 * Note: This will still be broken if the eDP panel is not lit
2364 * up by the BIOS, and thus we can't get the mode at module
2365 * load.
2366 */
2367 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2368 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2369 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2370 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002371}
2372
Daniel Vettere8cb4552012-07-01 13:05:48 +02002373static void intel_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002374{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002375 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002376 struct drm_device *dev = encoder->base.dev;
Jani Nikula495a5bb2014-10-27 16:26:55 +02002377 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2378
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002379 if (crtc->config->has_audio)
Jani Nikula495a5bb2014-10-27 16:26:55 +02002380 intel_audio_codec_disable(encoder);
Daniel Vetter6cb49832012-05-20 17:14:50 +02002381
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002382 if (HAS_PSR(dev) && !HAS_DDI(dev))
2383 intel_psr_disable(intel_dp);
2384
Daniel Vetter6cb49832012-05-20 17:14:50 +02002385 /* Make sure the panel is off before trying to change the mode. But also
2386 * ensure that we have vdd while we switch off the panel. */
Jani Nikula24f3e092014-03-17 16:43:36 +02002387 intel_edp_panel_vdd_on(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01002388 intel_edp_backlight_off(intel_dp);
Jani Nikulafdbc3b12013-11-12 17:10:13 +02002389 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
Daniel Vetter4be73782014-01-17 14:39:48 +01002390 intel_edp_panel_off(intel_dp);
Daniel Vetter37398502012-09-06 22:15:44 +02002391
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002392 /* disable the port before the pipe on g4x */
2393 if (INTEL_INFO(dev)->gen < 5)
Daniel Vetter37398502012-09-06 22:15:44 +02002394 intel_dp_link_down(intel_dp);
Jesse Barnesd240f202010-08-13 15:43:26 -07002395}
2396
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002397static void ilk_post_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002398{
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002399 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002400 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002401
Ville Syrjälä49277c32014-03-31 18:21:26 +03002402 intel_dp_link_down(intel_dp);
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002403 if (port == PORT_A)
2404 ironlake_edp_pll_off(intel_dp);
Ville Syrjälä49277c32014-03-31 18:21:26 +03002405}
2406
2407static void vlv_post_disable_dp(struct intel_encoder *encoder)
2408{
2409 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2410
2411 intel_dp_link_down(intel_dp);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002412}
2413
Ville Syrjälä580d3812014-04-09 13:29:00 +03002414static void chv_post_disable_dp(struct intel_encoder *encoder)
2415{
2416 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2417 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2418 struct drm_device *dev = encoder->base.dev;
2419 struct drm_i915_private *dev_priv = dev->dev_private;
2420 struct intel_crtc *intel_crtc =
2421 to_intel_crtc(encoder->base.crtc);
2422 enum dpio_channel ch = vlv_dport_to_channel(dport);
2423 enum pipe pipe = intel_crtc->pipe;
2424 u32 val;
2425
2426 intel_dp_link_down(intel_dp);
2427
Ville Syrjäläa5805162015-05-26 20:42:30 +03002428 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002429
2430 /* Propagate soft reset to data lane reset */
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002431 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
Ville Syrjäläd2152b22014-04-28 14:15:24 +03002432 val |= CHV_PCS_REQ_SOFTRESET_EN;
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002433 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
Ville Syrjäläd2152b22014-04-28 14:15:24 +03002434
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002435 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2436 val |= CHV_PCS_REQ_SOFTRESET_EN;
2437 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2438
2439 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
Ville Syrjälä580d3812014-04-09 13:29:00 +03002440 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002441 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2442
2443 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2444 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2445 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002446
Ville Syrjäläa5805162015-05-26 20:42:30 +03002447 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002448}
2449
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002450static void
2451_intel_dp_set_link_train(struct intel_dp *intel_dp,
2452 uint32_t *DP,
2453 uint8_t dp_train_pat)
2454{
2455 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2456 struct drm_device *dev = intel_dig_port->base.base.dev;
2457 struct drm_i915_private *dev_priv = dev->dev_private;
2458 enum port port = intel_dig_port->port;
2459
2460 if (HAS_DDI(dev)) {
2461 uint32_t temp = I915_READ(DP_TP_CTL(port));
2462
2463 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2464 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2465 else
2466 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2467
2468 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2469 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2470 case DP_TRAINING_PATTERN_DISABLE:
2471 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2472
2473 break;
2474 case DP_TRAINING_PATTERN_1:
2475 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2476 break;
2477 case DP_TRAINING_PATTERN_2:
2478 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2479 break;
2480 case DP_TRAINING_PATTERN_3:
2481 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2482 break;
2483 }
2484 I915_WRITE(DP_TP_CTL(port), temp);
2485
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002486 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2487 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002488 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2489
2490 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2491 case DP_TRAINING_PATTERN_DISABLE:
2492 *DP |= DP_LINK_TRAIN_OFF_CPT;
2493 break;
2494 case DP_TRAINING_PATTERN_1:
2495 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2496 break;
2497 case DP_TRAINING_PATTERN_2:
2498 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2499 break;
2500 case DP_TRAINING_PATTERN_3:
2501 DRM_ERROR("DP training pattern 3 not supported\n");
2502 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2503 break;
2504 }
2505
2506 } else {
2507 if (IS_CHERRYVIEW(dev))
2508 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2509 else
2510 *DP &= ~DP_LINK_TRAIN_MASK;
2511
2512 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2513 case DP_TRAINING_PATTERN_DISABLE:
2514 *DP |= DP_LINK_TRAIN_OFF;
2515 break;
2516 case DP_TRAINING_PATTERN_1:
2517 *DP |= DP_LINK_TRAIN_PAT_1;
2518 break;
2519 case DP_TRAINING_PATTERN_2:
2520 *DP |= DP_LINK_TRAIN_PAT_2;
2521 break;
2522 case DP_TRAINING_PATTERN_3:
2523 if (IS_CHERRYVIEW(dev)) {
2524 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2525 } else {
2526 DRM_ERROR("DP training pattern 3 not supported\n");
2527 *DP |= DP_LINK_TRAIN_PAT_2;
2528 }
2529 break;
2530 }
2531 }
2532}
2533
2534static void intel_dp_enable_port(struct intel_dp *intel_dp)
2535{
2536 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2537 struct drm_i915_private *dev_priv = dev->dev_private;
2538
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002539 /* enable with pattern 1 (as per spec) */
2540 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2541 DP_TRAINING_PATTERN_1);
2542
2543 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2544 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b713f52014-10-16 21:27:35 +03002545
2546 /*
2547 * Magic for VLV/CHV. We _must_ first set up the register
2548 * without actually enabling the port, and then do another
2549 * write to enable the port. Otherwise link training will
2550 * fail when the power sequencer is freshly used for this port.
2551 */
2552 intel_dp->DP |= DP_PORT_EN;
2553
2554 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2555 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002556}
2557
Daniel Vettere8cb4552012-07-01 13:05:48 +02002558static void intel_enable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002559{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002560 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2561 struct drm_device *dev = encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002562 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulac1dec792014-10-27 16:26:56 +02002563 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Chris Wilsonea5b2132010-08-04 13:50:23 +01002564 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03002565 unsigned int lane_mask = 0x0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002566
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02002567 if (WARN_ON(dp_reg & DP_PORT_EN))
2568 return;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002569
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002570 pps_lock(intel_dp);
2571
2572 if (IS_VALLEYVIEW(dev))
2573 vlv_init_panel_power_sequencer(intel_dp);
2574
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002575 intel_dp_enable_port(intel_dp);
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002576
2577 edp_panel_vdd_on(intel_dp);
2578 edp_panel_on(intel_dp);
2579 edp_panel_vdd_off(intel_dp, true);
2580
2581 pps_unlock(intel_dp);
2582
Ville Syrjälä61234fa2014-10-16 21:27:34 +03002583 if (IS_VALLEYVIEW(dev))
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03002584 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2585 lane_mask);
Ville Syrjälä61234fa2014-10-16 21:27:34 +03002586
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002587 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2588 intel_dp_start_link_train(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002589 intel_dp_complete_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03002590 intel_dp_stop_link_train(intel_dp);
Jani Nikulac1dec792014-10-27 16:26:56 +02002591
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002592 if (crtc->config->has_audio) {
Jani Nikulac1dec792014-10-27 16:26:56 +02002593 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2594 pipe_name(crtc->pipe));
2595 intel_audio_codec_enable(encoder);
2596 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002597}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002598
Jani Nikulaecff4f32013-09-06 07:38:29 +03002599static void g4x_enable_dp(struct intel_encoder *encoder)
2600{
Jani Nikula828f5c62013-09-05 16:44:45 +03002601 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2602
Jani Nikulaecff4f32013-09-06 07:38:29 +03002603 intel_enable_dp(encoder);
Daniel Vetter4be73782014-01-17 14:39:48 +01002604 intel_edp_backlight_on(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002605}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002606
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002607static void vlv_enable_dp(struct intel_encoder *encoder)
2608{
Jani Nikula828f5c62013-09-05 16:44:45 +03002609 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2610
Daniel Vetter4be73782014-01-17 14:39:48 +01002611 intel_edp_backlight_on(intel_dp);
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002612 intel_psr_enable(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002613}
2614
Jani Nikulaecff4f32013-09-06 07:38:29 +03002615static void g4x_pre_enable_dp(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002616{
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002617 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03002618 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002619
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002620 intel_dp_prepare(encoder);
2621
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002622 /* Only ilk+ has port A */
2623 if (dport->port == PORT_A) {
2624 ironlake_set_pll_cpu_edp(intel_dp);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002625 ironlake_edp_pll_on(intel_dp);
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002626 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002627}
2628
Ville Syrjälä83b84592014-10-16 21:29:51 +03002629static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2630{
2631 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2632 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2633 enum pipe pipe = intel_dp->pps_pipe;
2634 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2635
2636 edp_panel_vdd_off_sync(intel_dp);
2637
2638 /*
2639 * VLV seems to get confused when multiple power seqeuencers
2640 * have the same port selected (even if only one has power/vdd
2641 * enabled). The failure manifests as vlv_wait_port_ready() failing
2642 * CHV on the other hand doesn't seem to mind having the same port
2643 * selected in multiple power seqeuencers, but let's clear the
2644 * port select always when logically disconnecting a power sequencer
2645 * from a port.
2646 */
2647 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2648 pipe_name(pipe), port_name(intel_dig_port->port));
2649 I915_WRITE(pp_on_reg, 0);
2650 POSTING_READ(pp_on_reg);
2651
2652 intel_dp->pps_pipe = INVALID_PIPE;
2653}
2654
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002655static void vlv_steal_power_sequencer(struct drm_device *dev,
2656 enum pipe pipe)
2657{
2658 struct drm_i915_private *dev_priv = dev->dev_private;
2659 struct intel_encoder *encoder;
2660
2661 lockdep_assert_held(&dev_priv->pps_mutex);
2662
Ville Syrjäläac3c12e2014-10-16 21:29:56 +03002663 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2664 return;
2665
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002666 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2667 base.head) {
2668 struct intel_dp *intel_dp;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002669 enum port port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002670
2671 if (encoder->type != INTEL_OUTPUT_EDP)
2672 continue;
2673
2674 intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002675 port = dp_to_dig_port(intel_dp)->port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002676
2677 if (intel_dp->pps_pipe != pipe)
2678 continue;
2679
2680 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
Ville Syrjälä773538e82014-09-04 14:54:56 +03002681 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002682
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02002683 WARN(encoder->base.crtc,
Ville Syrjälä034e43c2014-10-16 21:27:28 +03002684 "stealing pipe %c power sequencer from active eDP port %c\n",
2685 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002686
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002687 /* make sure vdd is off before we steal it */
Ville Syrjälä83b84592014-10-16 21:29:51 +03002688 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002689 }
2690}
2691
2692static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2693{
2694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2695 struct intel_encoder *encoder = &intel_dig_port->base;
2696 struct drm_device *dev = encoder->base.dev;
2697 struct drm_i915_private *dev_priv = dev->dev_private;
2698 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002699
2700 lockdep_assert_held(&dev_priv->pps_mutex);
2701
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002702 if (!is_edp(intel_dp))
2703 return;
2704
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002705 if (intel_dp->pps_pipe == crtc->pipe)
2706 return;
2707
2708 /*
2709 * If another power sequencer was being used on this
2710 * port previously make sure to turn off vdd there while
2711 * we still have control of it.
2712 */
2713 if (intel_dp->pps_pipe != INVALID_PIPE)
Ville Syrjälä83b84592014-10-16 21:29:51 +03002714 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002715
2716 /*
2717 * We may be stealing the power
2718 * sequencer from another port.
2719 */
2720 vlv_steal_power_sequencer(dev, crtc->pipe);
2721
2722 /* now it's all ours */
2723 intel_dp->pps_pipe = crtc->pipe;
2724
2725 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2726 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2727
2728 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03002729 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2730 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002731}
2732
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002733static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2734{
2735 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2736 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Jesse Barnesb2634012013-03-28 09:55:40 -07002737 struct drm_device *dev = encoder->base.dev;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002738 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002739 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002740 enum dpio_channel port = vlv_dport_to_channel(dport);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002741 int pipe = intel_crtc->pipe;
2742 u32 val;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002743
Ville Syrjäläa5805162015-05-26 20:42:30 +03002744 mutex_lock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002745
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002746 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002747 val = 0;
2748 if (pipe)
2749 val |= (1<<21);
2750 else
2751 val &= ~(1<<21);
2752 val |= 0x001000c4;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002753 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2754 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2755 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002756
Ville Syrjäläa5805162015-05-26 20:42:30 +03002757 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002758
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002759 intel_enable_dp(encoder);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002760}
2761
Jani Nikulaecff4f32013-09-06 07:38:29 +03002762static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
Jesse Barnes89b667f2013-04-18 14:51:36 -07002763{
2764 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2765 struct drm_device *dev = encoder->base.dev;
2766 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002767 struct intel_crtc *intel_crtc =
2768 to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002769 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002770 int pipe = intel_crtc->pipe;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002771
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002772 intel_dp_prepare(encoder);
2773
Jesse Barnes89b667f2013-04-18 14:51:36 -07002774 /* Program Tx lane resets to default */
Ville Syrjäläa5805162015-05-26 20:42:30 +03002775 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002776 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002777 DPIO_PCS_TX_LANE2_RESET |
2778 DPIO_PCS_TX_LANE1_RESET);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002779 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002780 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2781 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2782 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2783 DPIO_PCS_CLK_SOFT_RESET);
2784
2785 /* Fix up inter-pair skew failure */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002786 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2787 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2788 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03002789 mutex_unlock(&dev_priv->sb_lock);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002790}
2791
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002792static void chv_pre_enable_dp(struct intel_encoder *encoder)
2793{
2794 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2795 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2796 struct drm_device *dev = encoder->base.dev;
2797 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002798 struct intel_crtc *intel_crtc =
2799 to_intel_crtc(encoder->base.crtc);
2800 enum dpio_channel ch = vlv_dport_to_channel(dport);
2801 int pipe = intel_crtc->pipe;
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002802 int data, i, stagger;
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002803 u32 val;
2804
Ville Syrjäläa5805162015-05-26 20:42:30 +03002805 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002806
Ville Syrjälä570e2a72014-08-18 14:42:46 +03002807 /* allow hardware to manage TX FIFO reset source */
2808 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2809 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2810 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2811
2812 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2813 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2814 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2815
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002816 /* Deassert soft data lane reset*/
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002817 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
Ville Syrjäläd2152b22014-04-28 14:15:24 +03002818 val |= CHV_PCS_REQ_SOFTRESET_EN;
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002819 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
Ville Syrjäläd2152b22014-04-28 14:15:24 +03002820
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002821 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2822 val |= CHV_PCS_REQ_SOFTRESET_EN;
2823 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2824
2825 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002826 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
Ville Syrjälä97fd4d52014-04-09 13:29:02 +03002827 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2828
2829 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2830 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2831 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002832
2833 /* Program Tx lane latency optimal setting*/
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002834 for (i = 0; i < 4; i++) {
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002835 /* Set the upar bit */
2836 data = (i == 1) ? 0x0 : 0x1;
2837 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2838 data << DPIO_UPAR_SHIFT);
2839 }
2840
2841 /* Data lane stagger programming */
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002842 if (intel_crtc->config->port_clock > 270000)
2843 stagger = 0x18;
2844 else if (intel_crtc->config->port_clock > 135000)
2845 stagger = 0xd;
2846 else if (intel_crtc->config->port_clock > 67500)
2847 stagger = 0x7;
2848 else if (intel_crtc->config->port_clock > 33750)
2849 stagger = 0x4;
2850 else
2851 stagger = 0x2;
2852
2853 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2854 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2855 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2856
2857 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2858 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2859 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2860
2861 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2862 DPIO_LANESTAGGER_STRAP(stagger) |
2863 DPIO_LANESTAGGER_STRAP_OVRD |
2864 DPIO_TX1_STAGGER_MASK(0x1f) |
2865 DPIO_TX1_STAGGER_MULT(6) |
2866 DPIO_TX2_STAGGER_MULT(0));
2867
2868 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2869 DPIO_LANESTAGGER_STRAP(stagger) |
2870 DPIO_LANESTAGGER_STRAP_OVRD |
2871 DPIO_TX1_STAGGER_MASK(0x1f) |
2872 DPIO_TX1_STAGGER_MULT(7) |
2873 DPIO_TX2_STAGGER_MULT(5));
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002874
Ville Syrjäläa5805162015-05-26 20:42:30 +03002875 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002876
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002877 intel_enable_dp(encoder);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002878}
2879
Ville Syrjälä9197c882014-04-09 13:29:05 +03002880static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2881{
2882 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2883 struct drm_device *dev = encoder->base.dev;
2884 struct drm_i915_private *dev_priv = dev->dev_private;
2885 struct intel_crtc *intel_crtc =
2886 to_intel_crtc(encoder->base.crtc);
2887 enum dpio_channel ch = vlv_dport_to_channel(dport);
2888 enum pipe pipe = intel_crtc->pipe;
2889 u32 val;
2890
Ville Syrjälä625695f2014-06-28 02:04:02 +03002891 intel_dp_prepare(encoder);
2892
Ville Syrjäläa5805162015-05-26 20:42:30 +03002893 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002894
Ville Syrjäläb9e5ac32014-05-27 16:30:18 +03002895 /* program left/right clock distribution */
2896 if (pipe != PIPE_B) {
2897 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2898 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2899 if (ch == DPIO_CH0)
2900 val |= CHV_BUFLEFTENA1_FORCE;
2901 if (ch == DPIO_CH1)
2902 val |= CHV_BUFRIGHTENA1_FORCE;
2903 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2904 } else {
2905 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2906 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2907 if (ch == DPIO_CH0)
2908 val |= CHV_BUFLEFTENA2_FORCE;
2909 if (ch == DPIO_CH1)
2910 val |= CHV_BUFRIGHTENA2_FORCE;
2911 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2912 }
2913
Ville Syrjälä9197c882014-04-09 13:29:05 +03002914 /* program clock channel usage */
2915 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2916 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2917 if (pipe != PIPE_B)
2918 val &= ~CHV_PCS_USEDCLKCHANNEL;
2919 else
2920 val |= CHV_PCS_USEDCLKCHANNEL;
2921 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2922
2923 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2924 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2925 if (pipe != PIPE_B)
2926 val &= ~CHV_PCS_USEDCLKCHANNEL;
2927 else
2928 val |= CHV_PCS_USEDCLKCHANNEL;
2929 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2930
2931 /*
2932 * This a a bit weird since generally CL
2933 * matches the pipe, but here we need to
2934 * pick the CL based on the port.
2935 */
2936 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2937 if (pipe != PIPE_B)
2938 val &= ~CHV_CMN_USEDCLKCHANNEL;
2939 else
2940 val |= CHV_CMN_USEDCLKCHANNEL;
2941 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2942
Ville Syrjäläa5805162015-05-26 20:42:30 +03002943 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002944}
2945
Ville Syrjäläd6db9952015-07-08 23:45:49 +03002946static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2947{
2948 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2949 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2950 u32 val;
2951
2952 mutex_lock(&dev_priv->sb_lock);
2953
2954 /* disable left/right clock distribution */
2955 if (pipe != PIPE_B) {
2956 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2957 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2958 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2959 } else {
2960 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2961 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2962 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2963 }
2964
2965 mutex_unlock(&dev_priv->sb_lock);
2966}
2967
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002968/*
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002969 * Native read with retry for link status and receiver capability reads for
2970 * cases where the sink may still be asleep.
Jani Nikula9d1a1032014-03-14 16:51:15 +02002971 *
2972 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2973 * supposed to retry 3 times per the spec.
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002974 */
Jani Nikula9d1a1032014-03-14 16:51:15 +02002975static ssize_t
2976intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2977 void *buffer, size_t size)
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002978{
Jani Nikula9d1a1032014-03-14 16:51:15 +02002979 ssize_t ret;
2980 int i;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002981
Ville Syrjäläf6a19062014-10-16 20:46:09 +03002982 /*
2983 * Sometime we just get the same incorrect byte repeated
2984 * over the entire buffer. Doing just one throw away read
2985 * initially seems to "solve" it.
2986 */
2987 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2988
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002989 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002990 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2991 if (ret == size)
2992 return ret;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07002993 msleep(1);
2994 }
2995
Jani Nikula9d1a1032014-03-14 16:51:15 +02002996 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002997}
2998
2999/*
3000 * Fetch AUX CH registers 0x202 - 0x207 which contain
3001 * link status information
3002 */
3003static bool
Keith Packard93f62da2011-11-01 19:45:03 -07003004intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003005{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003006 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3007 DP_LANE0_1_STATUS,
3008 link_status,
3009 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003010}
3011
Paulo Zanoni11002442014-06-13 18:45:41 -03003012/* These are source-specific values. */
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003013static uint8_t
Keith Packard1a2eb462011-11-16 16:26:07 -08003014intel_dp_voltage_max(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003015{
Paulo Zanoni30add222012-10-26 19:05:45 -02003016 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303017 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003018 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003019
Vandana Kannan93147262014-11-18 15:45:29 +05303020 if (IS_BROXTON(dev))
3021 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3022 else if (INTEL_INFO(dev)->gen >= 9) {
Sonika Jindal9e458032015-05-06 17:35:48 +05303023 if (dev_priv->edp_low_vswing && port == PORT_A)
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303024 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003025 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303026 } else if (IS_VALLEYVIEW(dev))
Sonika Jindalbd600182014-08-08 16:23:41 +05303027 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003028 else if (IS_GEN7(dev) && port == PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05303029 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003030 else if (HAS_PCH_CPT(dev) && port != PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05303031 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Keith Packard1a2eb462011-11-16 16:26:07 -08003032 else
Sonika Jindalbd600182014-08-08 16:23:41 +05303033 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Keith Packard1a2eb462011-11-16 16:26:07 -08003034}
3035
3036static uint8_t
3037intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3038{
Paulo Zanoni30add222012-10-26 19:05:45 -02003039 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003040 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003041
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003042 if (INTEL_INFO(dev)->gen >= 9) {
3043 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3044 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3045 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3046 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3047 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3048 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3049 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303050 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3051 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003052 default:
3053 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3054 }
3055 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003056 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303057 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3058 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3060 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3061 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3062 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3063 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003064 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303065 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003066 }
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003067 } else if (IS_VALLEYVIEW(dev)) {
3068 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303069 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3070 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3071 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3072 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3073 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3074 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3075 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003076 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303077 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003078 }
Imre Deakbc7d38a2013-05-16 14:40:36 +03003079 } else if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08003080 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303081 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3082 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3083 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3084 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3085 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Keith Packard1a2eb462011-11-16 16:26:07 -08003086 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303087 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003088 }
3089 } else {
3090 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3092 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3093 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3094 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3095 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3096 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3097 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packard1a2eb462011-11-16 16:26:07 -08003098 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303099 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003100 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003101 }
3102}
3103
Daniel Vetter5829975c2015-04-16 11:36:52 +02003104static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003105{
3106 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3107 struct drm_i915_private *dev_priv = dev->dev_private;
3108 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003109 struct intel_crtc *intel_crtc =
3110 to_intel_crtc(dport->base.base.crtc);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003111 unsigned long demph_reg_value, preemph_reg_value,
3112 uniqtranscale_reg_value;
3113 uint8_t train_set = intel_dp->train_set[0];
Chon Ming Leee4607fc2013-11-06 14:36:35 +08003114 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003115 int pipe = intel_crtc->pipe;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003116
3117 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303118 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003119 preemph_reg_value = 0x0004000;
3120 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303121 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003122 demph_reg_value = 0x2B405555;
3123 uniqtranscale_reg_value = 0x552AB83A;
3124 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303125 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003126 demph_reg_value = 0x2B404040;
3127 uniqtranscale_reg_value = 0x5548B83A;
3128 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003130 demph_reg_value = 0x2B245555;
3131 uniqtranscale_reg_value = 0x5560B83A;
3132 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303133 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003134 demph_reg_value = 0x2B405555;
3135 uniqtranscale_reg_value = 0x5598DA3A;
3136 break;
3137 default:
3138 return 0;
3139 }
3140 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303141 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003142 preemph_reg_value = 0x0002000;
3143 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303144 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003145 demph_reg_value = 0x2B404040;
3146 uniqtranscale_reg_value = 0x5552B83A;
3147 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303148 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003149 demph_reg_value = 0x2B404848;
3150 uniqtranscale_reg_value = 0x5580B83A;
3151 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303152 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003153 demph_reg_value = 0x2B404040;
3154 uniqtranscale_reg_value = 0x55ADDA3A;
3155 break;
3156 default:
3157 return 0;
3158 }
3159 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303160 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003161 preemph_reg_value = 0x0000000;
3162 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303163 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003164 demph_reg_value = 0x2B305555;
3165 uniqtranscale_reg_value = 0x5570B83A;
3166 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303167 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003168 demph_reg_value = 0x2B2B4040;
3169 uniqtranscale_reg_value = 0x55ADDA3A;
3170 break;
3171 default:
3172 return 0;
3173 }
3174 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303175 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003176 preemph_reg_value = 0x0006000;
3177 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303178 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003179 demph_reg_value = 0x1B405555;
3180 uniqtranscale_reg_value = 0x55ADDA3A;
3181 break;
3182 default:
3183 return 0;
3184 }
3185 break;
3186 default:
3187 return 0;
3188 }
3189
Ville Syrjäläa5805162015-05-26 20:42:30 +03003190 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003191 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3192 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3193 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003194 uniqtranscale_reg_value);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003195 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3196 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3197 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3198 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03003199 mutex_unlock(&dev_priv->sb_lock);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003200
3201 return 0;
3202}
3203
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003204static bool chv_need_uniq_trans_scale(uint8_t train_set)
3205{
3206 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3207 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3208}
3209
Daniel Vetter5829975c2015-04-16 11:36:52 +02003210static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003211{
3212 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3213 struct drm_i915_private *dev_priv = dev->dev_private;
3214 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3215 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003216 u32 deemph_reg_value, margin_reg_value, val;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003217 uint8_t train_set = intel_dp->train_set[0];
3218 enum dpio_channel ch = vlv_dport_to_channel(dport);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003219 enum pipe pipe = intel_crtc->pipe;
3220 int i;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003221
3222 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303223 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003224 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303225 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003226 deemph_reg_value = 128;
3227 margin_reg_value = 52;
3228 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303229 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003230 deemph_reg_value = 128;
3231 margin_reg_value = 77;
3232 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303233 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003234 deemph_reg_value = 128;
3235 margin_reg_value = 102;
3236 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303237 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003238 deemph_reg_value = 128;
3239 margin_reg_value = 154;
3240 /* FIXME extra to set for 1200 */
3241 break;
3242 default:
3243 return 0;
3244 }
3245 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303246 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003247 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303248 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003249 deemph_reg_value = 85;
3250 margin_reg_value = 78;
3251 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003253 deemph_reg_value = 85;
3254 margin_reg_value = 116;
3255 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303256 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003257 deemph_reg_value = 85;
3258 margin_reg_value = 154;
3259 break;
3260 default:
3261 return 0;
3262 }
3263 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303264 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003265 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003267 deemph_reg_value = 64;
3268 margin_reg_value = 104;
3269 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003271 deemph_reg_value = 64;
3272 margin_reg_value = 154;
3273 break;
3274 default:
3275 return 0;
3276 }
3277 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303278 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003279 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003281 deemph_reg_value = 43;
3282 margin_reg_value = 154;
3283 break;
3284 default:
3285 return 0;
3286 }
3287 break;
3288 default:
3289 return 0;
3290 }
3291
Ville Syrjäläa5805162015-05-26 20:42:30 +03003292 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003293
3294 /* Clear calc init */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003295 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3296 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003297 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3298 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
Ville Syrjälä1966e592014-04-09 13:29:04 +03003299 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3300
3301 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3302 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003303 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3304 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
Ville Syrjälä1966e592014-04-09 13:29:04 +03003305 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003306
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003307 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3308 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3309 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3310 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3311
3312 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3313 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3314 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3315 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3316
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003317 /* Program swing deemph */
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003318 for (i = 0; i < 4; i++) {
3319 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3320 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3321 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3322 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3323 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003324
3325 /* Program swing margin */
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003326 for (i = 0; i < 4; i++) {
3327 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003328
Ville Syrjälä1fb44502014-06-28 02:04:03 +03003329 val &= ~DPIO_SWING_MARGIN000_MASK;
3330 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003331
3332 /*
3333 * Supposedly this value shouldn't matter when unique transition
3334 * scale is disabled, but in fact it does matter. Let's just
3335 * always program the same value and hope it's OK.
3336 */
3337 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3338 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3339
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003340 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3341 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003342
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003343 /*
3344 * The document said it needs to set bit 27 for ch0 and bit 26
3345 * for ch1. Might be a typo in the doc.
3346 * For now, for this unique transition scale selection, set bit
3347 * 27 for ch0 and ch1.
3348 */
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003349 for (i = 0; i < 4; i++) {
3350 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003351 if (chv_need_uniq_trans_scale(train_set))
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003352 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003353 else
3354 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3355 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003356 }
3357
3358 /* Start swing calculation */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003359 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3360 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3361 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3362
3363 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3364 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3365 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003366
3367 /* LRC Bypass */
3368 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3369 val |= DPIO_LRC_BYPASS;
3370 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3371
Ville Syrjäläa5805162015-05-26 20:42:30 +03003372 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003373
3374 return 0;
3375}
3376
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003377static void
Jani Nikula0301b3a2013-10-15 09:36:08 +03003378intel_get_adjust_train(struct intel_dp *intel_dp,
3379 const uint8_t link_status[DP_LINK_STATUS_SIZE])
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003380{
3381 uint8_t v = 0;
3382 uint8_t p = 0;
3383 int lane;
Keith Packard1a2eb462011-11-16 16:26:07 -08003384 uint8_t voltage_max;
3385 uint8_t preemph_max;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003386
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003387 for (lane = 0; lane < intel_dp->lane_count; lane++) {
Daniel Vetter0f037bd2012-10-18 10:15:27 +02003388 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3389 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003390
3391 if (this_v > v)
3392 v = this_v;
3393 if (this_p > p)
3394 p = this_p;
3395 }
3396
Keith Packard1a2eb462011-11-16 16:26:07 -08003397 voltage_max = intel_dp_voltage_max(intel_dp);
Keith Packard417e8222011-11-01 19:54:11 -07003398 if (v >= voltage_max)
3399 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003400
Keith Packard1a2eb462011-11-16 16:26:07 -08003401 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3402 if (p >= preemph_max)
3403 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003404
3405 for (lane = 0; lane < 4; lane++)
Jesse Barnes33a34e42010-09-08 12:42:02 -07003406 intel_dp->train_set[lane] = v | p;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003407}
3408
3409static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003410gen4_signal_levels(uint8_t train_set)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003411{
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003412 uint32_t signal_levels = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003413
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003414 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303415 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003416 default:
3417 signal_levels |= DP_VOLTAGE_0_4;
3418 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303419 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003420 signal_levels |= DP_VOLTAGE_0_6;
3421 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303422 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003423 signal_levels |= DP_VOLTAGE_0_8;
3424 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303425 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003426 signal_levels |= DP_VOLTAGE_1_2;
3427 break;
3428 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003429 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303430 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003431 default:
3432 signal_levels |= DP_PRE_EMPHASIS_0;
3433 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303434 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003435 signal_levels |= DP_PRE_EMPHASIS_3_5;
3436 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303437 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003438 signal_levels |= DP_PRE_EMPHASIS_6;
3439 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303440 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003441 signal_levels |= DP_PRE_EMPHASIS_9_5;
3442 break;
3443 }
3444 return signal_levels;
3445}
3446
Zhenyu Wange3421a12010-04-08 09:43:27 +08003447/* Gen6's DP voltage swing and pre-emphasis control */
3448static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003449gen6_edp_signal_levels(uint8_t train_set)
Zhenyu Wange3421a12010-04-08 09:43:27 +08003450{
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003451 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3452 DP_TRAIN_PRE_EMPHASIS_MASK);
3453 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303454 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3455 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003456 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303457 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003458 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303459 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3460 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003461 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303462 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003464 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003467 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003468 default:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003469 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3470 "0x%x\n", signal_levels);
3471 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003472 }
3473}
3474
Keith Packard1a2eb462011-11-16 16:26:07 -08003475/* Gen7's DP voltage swing and pre-emphasis control */
3476static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003477gen7_edp_signal_levels(uint8_t train_set)
Keith Packard1a2eb462011-11-16 16:26:07 -08003478{
3479 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3480 DP_TRAIN_PRE_EMPHASIS_MASK);
3481 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303482 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003483 return EDP_LINK_TRAIN_400MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303484 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003485 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303486 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packard1a2eb462011-11-16 16:26:07 -08003487 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3488
Sonika Jindalbd600182014-08-08 16:23:41 +05303489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003490 return EDP_LINK_TRAIN_600MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303491 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003492 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3493
Sonika Jindalbd600182014-08-08 16:23:41 +05303494 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003495 return EDP_LINK_TRAIN_800MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303496 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003497 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3498
3499 default:
3500 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3501 "0x%x\n", signal_levels);
3502 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3503 }
3504}
3505
Paulo Zanonif0a34242012-12-06 16:51:50 -02003506/* Properly updates "DP" with the correct signal levels. */
3507static void
3508intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3509{
3510 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003511 enum port port = intel_dig_port->port;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003512 struct drm_device *dev = intel_dig_port->base.base.dev;
David Weinehallf8896f52015-06-25 11:11:03 +03003513 uint32_t signal_levels, mask = 0;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003514 uint8_t train_set = intel_dp->train_set[0];
3515
David Weinehallf8896f52015-06-25 11:11:03 +03003516 if (HAS_DDI(dev)) {
3517 signal_levels = ddi_signal_levels(intel_dp);
3518
3519 if (IS_BROXTON(dev))
3520 signal_levels = 0;
3521 else
3522 mask = DDI_BUF_EMP_MASK;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003523 } else if (IS_CHERRYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003524 signal_levels = chv_signal_levels(intel_dp);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003525 } else if (IS_VALLEYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003526 signal_levels = vlv_signal_levels(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003527 } else if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003528 signal_levels = gen7_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003529 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003530 } else if (IS_GEN6(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003531 signal_levels = gen6_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003532 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3533 } else {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003534 signal_levels = gen4_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003535 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3536 }
3537
Vandana Kannan96fb9f92014-11-18 15:45:27 +05303538 if (mask)
3539 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3540
3541 DRM_DEBUG_KMS("Using vswing level %d\n",
3542 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3543 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3544 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3545 DP_TRAIN_PRE_EMPHASIS_SHIFT);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003546
3547 *DP = (*DP & ~mask) | signal_levels;
3548}
3549
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003550static bool
Chris Wilsonea5b2132010-08-04 13:50:23 +01003551intel_dp_set_link_train(struct intel_dp *intel_dp,
Jani Nikula70aff662013-09-27 15:10:44 +03003552 uint32_t *DP,
Chris Wilson58e10eb2010-10-03 10:56:11 +01003553 uint8_t dp_train_pat)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003554{
Paulo Zanoni174edf12012-10-26 19:05:50 -02003555 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03003556 struct drm_i915_private *dev_priv =
3557 to_i915(intel_dig_port->base.base.dev);
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003558 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3559 int ret, len;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003560
Ville Syrjälä7b13b582014-08-18 22:16:08 +03003561 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003562
Jani Nikula70aff662013-09-27 15:10:44 +03003563 I915_WRITE(intel_dp->output_reg, *DP);
Chris Wilsonea5b2132010-08-04 13:50:23 +01003564 POSTING_READ(intel_dp->output_reg);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003565
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003566 buf[0] = dp_train_pat;
3567 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003568 DP_TRAINING_PATTERN_DISABLE) {
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003569 /* don't write DP_TRAINING_LANEx_SET on disable */
3570 len = 1;
3571 } else {
3572 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003573 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3574 len = intel_dp->lane_count + 1;
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003575 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003576
Jani Nikula9d1a1032014-03-14 16:51:15 +02003577 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3578 buf, len);
Jani Nikula2cdfe6c2013-10-04 15:08:48 +03003579
3580 return ret == len;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003581}
3582
Jani Nikula70aff662013-09-27 15:10:44 +03003583static bool
3584intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3585 uint8_t dp_train_pat)
3586{
Mika Kahola4e96c972015-04-29 09:17:39 +03003587 if (!intel_dp->train_set_valid)
3588 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
Jani Nikula70aff662013-09-27 15:10:44 +03003589 intel_dp_set_signal_levels(intel_dp, DP);
3590 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3591}
3592
3593static bool
3594intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
Jani Nikula0301b3a2013-10-15 09:36:08 +03003595 const uint8_t link_status[DP_LINK_STATUS_SIZE])
Jani Nikula70aff662013-09-27 15:10:44 +03003596{
3597 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03003598 struct drm_i915_private *dev_priv =
3599 to_i915(intel_dig_port->base.base.dev);
Jani Nikula70aff662013-09-27 15:10:44 +03003600 int ret;
3601
3602 intel_get_adjust_train(intel_dp, link_status);
3603 intel_dp_set_signal_levels(intel_dp, DP);
3604
3605 I915_WRITE(intel_dp->output_reg, *DP);
3606 POSTING_READ(intel_dp->output_reg);
3607
Jani Nikula9d1a1032014-03-14 16:51:15 +02003608 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003609 intel_dp->train_set, intel_dp->lane_count);
Jani Nikula70aff662013-09-27 15:10:44 +03003610
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003611 return ret == intel_dp->lane_count;
Jani Nikula70aff662013-09-27 15:10:44 +03003612}
3613
Imre Deak3ab9c632013-05-03 12:57:41 +03003614static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3615{
3616 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3617 struct drm_device *dev = intel_dig_port->base.base.dev;
3618 struct drm_i915_private *dev_priv = dev->dev_private;
3619 enum port port = intel_dig_port->port;
3620 uint32_t val;
3621
3622 if (!HAS_DDI(dev))
3623 return;
3624
3625 val = I915_READ(DP_TP_CTL(port));
3626 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3627 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3628 I915_WRITE(DP_TP_CTL(port), val);
3629
3630 /*
3631 * On PORT_A we can have only eDP in SST mode. There the only reason
3632 * we need to set idle transmission mode is to work around a HW issue
3633 * where we enable the pipe while not in idle link-training mode.
3634 * In this case there is requirement to wait for a minimum number of
3635 * idle patterns to be sent.
3636 */
3637 if (port == PORT_A)
3638 return;
3639
3640 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3641 1))
3642 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3643}
3644
Jesse Barnes33a34e42010-09-08 12:42:02 -07003645/* Enable corresponding port and start training pattern 1 */
Paulo Zanonic19b0662012-10-15 15:51:41 -03003646void
Jesse Barnes33a34e42010-09-08 12:42:02 -07003647intel_dp_start_link_train(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003648{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003649 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
Paulo Zanonic19b0662012-10-15 15:51:41 -03003650 struct drm_device *dev = encoder->dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003651 int i;
3652 uint8_t voltage;
Keith Packardcdb0e952011-11-01 20:00:06 -07003653 int voltage_tries, loop_tries;
Chris Wilsonea5b2132010-08-04 13:50:23 +01003654 uint32_t DP = intel_dp->DP;
Jani Nikula6aba5b62013-10-04 15:08:10 +03003655 uint8_t link_config[2];
Ville Syrjälä04a60f92015-07-06 15:10:06 +03003656 uint8_t link_bw, rate_select;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003657
Paulo Zanoniaffa9352012-11-23 15:30:39 -02003658 if (HAS_DDI(dev))
Paulo Zanonic19b0662012-10-15 15:51:41 -03003659 intel_ddi_prepare_link_retrain(encoder);
3660
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003661 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
Ville Syrjälä04a60f92015-07-06 15:10:06 +03003662 &link_bw, &rate_select);
3663
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003664 /* Write the link configuration data */
Ville Syrjälä04a60f92015-07-06 15:10:06 +03003665 link_config[0] = link_bw;
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003666 link_config[1] = intel_dp->lane_count;
Jani Nikula6aba5b62013-10-04 15:08:10 +03003667 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3668 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
Jani Nikula9d1a1032014-03-14 16:51:15 +02003669 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003670 if (intel_dp->num_sink_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05303671 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
Ville Syrjälä04a60f92015-07-06 15:10:06 +03003672 &rate_select, 1);
Jani Nikula6aba5b62013-10-04 15:08:10 +03003673
3674 link_config[0] = 0;
3675 link_config[1] = DP_SET_ANSI_8B10B;
Jani Nikula9d1a1032014-03-14 16:51:15 +02003676 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003677
3678 DP |= DP_PORT_EN;
Keith Packard1a2eb462011-11-16 16:26:07 -08003679
Jani Nikula70aff662013-09-27 15:10:44 +03003680 /* clock recovery */
3681 if (!intel_dp_reset_link_train(intel_dp, &DP,
3682 DP_TRAINING_PATTERN_1 |
3683 DP_LINK_SCRAMBLING_DISABLE)) {
3684 DRM_ERROR("failed to enable link training\n");
3685 return;
3686 }
3687
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003688 voltage = 0xff;
Keith Packardcdb0e952011-11-01 20:00:06 -07003689 voltage_tries = 0;
3690 loop_tries = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003691 for (;;) {
Jani Nikula70aff662013-09-27 15:10:44 +03003692 uint8_t link_status[DP_LINK_STATUS_SIZE];
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003693
Daniel Vettera7c96552012-10-18 10:15:30 +02003694 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
Keith Packard93f62da2011-11-01 19:45:03 -07003695 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3696 DRM_ERROR("failed to get link status\n");
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003697 break;
Keith Packard93f62da2011-11-01 19:45:03 -07003698 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003699
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003700 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
Keith Packard93f62da2011-11-01 19:45:03 -07003701 DRM_DEBUG_KMS("clock recovery OK\n");
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003702 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003703 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003704
Mika Kahola4e96c972015-04-29 09:17:39 +03003705 /*
3706 * if we used previously trained voltage and pre-emphasis values
3707 * and we don't get clock recovery, reset link training values
3708 */
3709 if (intel_dp->train_set_valid) {
3710 DRM_DEBUG_KMS("clock recovery not ok, reset");
3711 /* clear the flag as we are not reusing train set */
3712 intel_dp->train_set_valid = false;
3713 if (!intel_dp_reset_link_train(intel_dp, &DP,
3714 DP_TRAINING_PATTERN_1 |
3715 DP_LINK_SCRAMBLING_DISABLE)) {
3716 DRM_ERROR("failed to enable link training\n");
3717 return;
3718 }
3719 continue;
3720 }
3721
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003722 /* Check to see if we've tried the max voltage */
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003723 for (i = 0; i < intel_dp->lane_count; i++)
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003724 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3725 break;
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003726 if (i == intel_dp->lane_count) {
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003727 ++loop_tries;
3728 if (loop_tries == 5) {
Jani Nikula3def84b2013-10-05 16:13:56 +03003729 DRM_ERROR("too many full retries, give up\n");
Keith Packardcdb0e952011-11-01 20:00:06 -07003730 break;
3731 }
Jani Nikula70aff662013-09-27 15:10:44 +03003732 intel_dp_reset_link_train(intel_dp, &DP,
3733 DP_TRAINING_PATTERN_1 |
3734 DP_LINK_SCRAMBLING_DISABLE);
Keith Packardcdb0e952011-11-01 20:00:06 -07003735 voltage_tries = 0;
3736 continue;
3737 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003738
3739 /* Check to see if we've tried the same voltage 5 times */
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003740 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
Chris Wilson24773672012-09-26 16:48:30 +01003741 ++voltage_tries;
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003742 if (voltage_tries == 5) {
Jani Nikula3def84b2013-10-05 16:13:56 +03003743 DRM_ERROR("too many voltage retries, give up\n");
Daniel Vetterb06fbda2012-10-16 09:50:25 +02003744 break;
3745 }
3746 } else
3747 voltage_tries = 0;
3748 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003749
Jani Nikula70aff662013-09-27 15:10:44 +03003750 /* Update training set as requested by target */
3751 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3752 DRM_ERROR("failed to update link training\n");
3753 break;
3754 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003755 }
3756
Jesse Barnes33a34e42010-09-08 12:42:02 -07003757 intel_dp->DP = DP;
3758}
3759
Paulo Zanonic19b0662012-10-15 15:51:41 -03003760void
Jesse Barnes33a34e42010-09-08 12:42:02 -07003761intel_dp_complete_link_train(struct intel_dp *intel_dp)
3762{
Jesse Barnes33a34e42010-09-08 12:42:02 -07003763 bool channel_eq = false;
Jesse Barnes37f80972011-01-05 14:45:24 -08003764 int tries, cr_tries;
Jesse Barnes33a34e42010-09-08 12:42:02 -07003765 uint32_t DP = intel_dp->DP;
Todd Previte06ea66b2014-01-20 10:19:39 -07003766 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3767
Ville Syrjäläa79b8162015-07-06 15:10:05 +03003768 /* Training Pattern 3 for HBR2 or 1.2 devices that support it*/
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003769 if (intel_dp->link_rate == 540000 || intel_dp->use_tps3)
Todd Previte06ea66b2014-01-20 10:19:39 -07003770 training_pattern = DP_TRAINING_PATTERN_3;
Jesse Barnes33a34e42010-09-08 12:42:02 -07003771
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003772 /* channel equalization */
Jani Nikula70aff662013-09-27 15:10:44 +03003773 if (!intel_dp_set_link_train(intel_dp, &DP,
Todd Previte06ea66b2014-01-20 10:19:39 -07003774 training_pattern |
Jani Nikula70aff662013-09-27 15:10:44 +03003775 DP_LINK_SCRAMBLING_DISABLE)) {
3776 DRM_ERROR("failed to start channel equalization\n");
3777 return;
3778 }
3779
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003780 tries = 0;
Jesse Barnes37f80972011-01-05 14:45:24 -08003781 cr_tries = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003782 channel_eq = false;
3783 for (;;) {
Jani Nikula70aff662013-09-27 15:10:44 +03003784 uint8_t link_status[DP_LINK_STATUS_SIZE];
Zhenyu Wange3421a12010-04-08 09:43:27 +08003785
Jesse Barnes37f80972011-01-05 14:45:24 -08003786 if (cr_tries > 5) {
3787 DRM_ERROR("failed to train DP, aborting\n");
Jesse Barnes37f80972011-01-05 14:45:24 -08003788 break;
3789 }
3790
Daniel Vettera7c96552012-10-18 10:15:30 +02003791 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
Jani Nikula70aff662013-09-27 15:10:44 +03003792 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3793 DRM_ERROR("failed to get link status\n");
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003794 break;
Jani Nikula70aff662013-09-27 15:10:44 +03003795 }
Jesse Barnes869184a2010-10-07 16:01:22 -07003796
Jesse Barnes37f80972011-01-05 14:45:24 -08003797 /* Make sure clock is still ok */
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03003798 if (!drm_dp_clock_recovery_ok(link_status,
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003799 intel_dp->lane_count)) {
Mika Kahola4e96c972015-04-29 09:17:39 +03003800 intel_dp->train_set_valid = false;
Jesse Barnes37f80972011-01-05 14:45:24 -08003801 intel_dp_start_link_train(intel_dp);
Jani Nikula70aff662013-09-27 15:10:44 +03003802 intel_dp_set_link_train(intel_dp, &DP,
Todd Previte06ea66b2014-01-20 10:19:39 -07003803 training_pattern |
Jani Nikula70aff662013-09-27 15:10:44 +03003804 DP_LINK_SCRAMBLING_DISABLE);
Jesse Barnes37f80972011-01-05 14:45:24 -08003805 cr_tries++;
3806 continue;
3807 }
3808
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03003809 if (drm_dp_channel_eq_ok(link_status,
Ville Syrjälä901c2da2015-08-17 18:05:12 +03003810 intel_dp->lane_count)) {
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003811 channel_eq = true;
3812 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003813 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003814
Jesse Barnes37f80972011-01-05 14:45:24 -08003815 /* Try 5 times, then try clock recovery if that fails */
3816 if (tries > 5) {
Mika Kahola4e96c972015-04-29 09:17:39 +03003817 intel_dp->train_set_valid = false;
Jesse Barnes37f80972011-01-05 14:45:24 -08003818 intel_dp_start_link_train(intel_dp);
Jani Nikula70aff662013-09-27 15:10:44 +03003819 intel_dp_set_link_train(intel_dp, &DP,
Todd Previte06ea66b2014-01-20 10:19:39 -07003820 training_pattern |
Jani Nikula70aff662013-09-27 15:10:44 +03003821 DP_LINK_SCRAMBLING_DISABLE);
Jesse Barnes37f80972011-01-05 14:45:24 -08003822 tries = 0;
3823 cr_tries++;
3824 continue;
3825 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003826
Jani Nikula70aff662013-09-27 15:10:44 +03003827 /* Update training set as requested by target */
3828 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3829 DRM_ERROR("failed to update link training\n");
3830 break;
3831 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003832 ++tries;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003833 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003834
Imre Deak3ab9c632013-05-03 12:57:41 +03003835 intel_dp_set_idle_link_train(intel_dp);
3836
3837 intel_dp->DP = DP;
3838
Mika Kahola4e96c972015-04-29 09:17:39 +03003839 if (channel_eq) {
Mika Kahola5fa836a2015-04-29 09:17:40 +03003840 intel_dp->train_set_valid = true;
Masanari Iida07f42252013-03-20 11:00:34 +09003841 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
Mika Kahola4e96c972015-04-29 09:17:39 +03003842 }
Imre Deak3ab9c632013-05-03 12:57:41 +03003843}
3844
3845void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3846{
Jani Nikula70aff662013-09-27 15:10:44 +03003847 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
Imre Deak3ab9c632013-05-03 12:57:41 +03003848 DP_TRAINING_PATTERN_DISABLE);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003849}
3850
3851static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01003852intel_dp_link_down(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003853{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003854 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003855 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003856 enum port port = intel_dig_port->port;
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003857 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003858 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonea5b2132010-08-04 13:50:23 +01003859 uint32_t DP = intel_dp->DP;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003860
Daniel Vetterbc76e3202014-05-20 22:46:50 +02003861 if (WARN_ON(HAS_DDI(dev)))
Paulo Zanonic19b0662012-10-15 15:51:41 -03003862 return;
3863
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02003864 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
Chris Wilson1b39d6f2010-12-06 11:20:45 +00003865 return;
3866
Zhao Yakui28c97732009-10-09 11:39:41 +08003867 DRM_DEBUG_KMS("\n");
Zhenyu Wang32f9d652009-07-24 01:00:32 +08003868
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03003869 if ((IS_GEN7(dev) && port == PORT_A) ||
3870 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Zhenyu Wange3421a12010-04-08 09:43:27 +08003871 DP &= ~DP_LINK_TRAIN_MASK_CPT;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003872 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003873 } else {
Ville Syrjäläaad3d142014-06-28 02:04:25 +03003874 if (IS_CHERRYVIEW(dev))
3875 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3876 else
3877 DP &= ~DP_LINK_TRAIN_MASK;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003878 DP |= DP_LINK_TRAIN_PAT_IDLE;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003879 }
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003880 I915_WRITE(intel_dp->output_reg, DP);
Chris Wilsonfe255d02010-09-11 21:37:48 +01003881 POSTING_READ(intel_dp->output_reg);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08003882
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003883 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3884 I915_WRITE(intel_dp->output_reg, DP);
3885 POSTING_READ(intel_dp->output_reg);
3886
3887 /*
3888 * HW workaround for IBX, we need to move the port
3889 * to transcoder A after disabling it to allow the
3890 * matching HDMI port to be enabled on transcoder A.
3891 */
3892 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3893 /* always enable with pattern 1 (as per spec) */
3894 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3895 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3896 I915_WRITE(intel_dp->output_reg, DP);
3897 POSTING_READ(intel_dp->output_reg);
3898
3899 DP &= ~DP_PORT_EN;
Eric Anholt5bddd172010-11-18 09:32:59 +08003900 I915_WRITE(intel_dp->output_reg, DP);
Daniel Vetter0ca09682014-11-24 16:54:11 +01003901 POSTING_READ(intel_dp->output_reg);
Eric Anholt5bddd172010-11-18 09:32:59 +08003902 }
3903
Keith Packardf01eca22011-09-28 16:48:10 -07003904 msleep(intel_dp->panel_power_down_delay);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003905}
3906
Keith Packard26d61aa2011-07-25 20:01:09 -07003907static bool
3908intel_dp_get_dpcd(struct intel_dp *intel_dp)
Keith Packard92fd8fd2011-07-25 19:50:10 -07003909{
Rodrigo Vivia031d702013-10-03 16:15:06 -03003910 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3911 struct drm_device *dev = dig_port->base.base.dev;
3912 struct drm_i915_private *dev_priv = dev->dev_private;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303913 uint8_t rev;
Rodrigo Vivia031d702013-10-03 16:15:06 -03003914
Jani Nikula9d1a1032014-03-14 16:51:15 +02003915 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3916 sizeof(intel_dp->dpcd)) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003917 return false; /* aux transfer failed */
Keith Packard92fd8fd2011-07-25 19:50:10 -07003918
Andy Shevchenkoa8e98152014-09-01 14:12:01 +03003919 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
Damien Lespiau577c7a52012-12-13 16:09:02 +00003920
Adam Jacksonedb39242012-09-18 10:58:49 -04003921 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3922 return false; /* DPCD not present */
3923
Shobhit Kumar2293bb52013-07-11 18:44:56 -03003924 /* Check if the panel supports PSR */
3925 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
Jani Nikula50003932013-09-20 16:42:17 +03003926 if (is_edp(intel_dp)) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003927 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3928 intel_dp->psr_dpcd,
3929 sizeof(intel_dp->psr_dpcd));
Rodrigo Vivia031d702013-10-03 16:15:06 -03003930 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3931 dev_priv->psr.sink_support = true;
Jani Nikula50003932013-09-20 16:42:17 +03003932 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
Rodrigo Vivia031d702013-10-03 16:15:06 -03003933 }
Sonika Jindal474d1ec2015-04-02 11:02:44 +05303934
3935 if (INTEL_INFO(dev)->gen >= 9 &&
3936 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3937 uint8_t frame_sync_cap;
3938
3939 dev_priv->psr.sink_support = true;
3940 intel_dp_dpcd_read_wake(&intel_dp->aux,
3941 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3942 &frame_sync_cap, 1);
3943 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3944 /* PSR2 needs frame sync as well */
3945 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3946 DRM_DEBUG_KMS("PSR2 %s on sink",
3947 dev_priv->psr.psr2_support ? "supported" : "not supported");
3948 }
Jani Nikula50003932013-09-20 16:42:17 +03003949 }
3950
Jani Nikula7809a612014-10-29 11:03:26 +02003951 /* Training Pattern 3 support, both source and sink */
Todd Previte06ea66b2014-01-20 10:19:39 -07003952 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
Jani Nikula7809a612014-10-29 11:03:26 +02003953 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3954 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
Todd Previte06ea66b2014-01-20 10:19:39 -07003955 intel_dp->use_tps3 = true;
Jani Nikulaf8d8a672014-09-05 16:19:18 +03003956 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
Todd Previte06ea66b2014-01-20 10:19:39 -07003957 } else
3958 intel_dp->use_tps3 = false;
3959
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303960 /* Intermediate frequency support */
3961 if (is_edp(intel_dp) &&
3962 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3963 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3964 (rev >= 0x03)) { /* eDp v1.4 or higher */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003965 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003966 int i;
3967
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303968 intel_dp_dpcd_read_wake(&intel_dp->aux,
3969 DP_SUPPORTED_LINK_RATES,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003970 sink_rates,
3971 sizeof(sink_rates));
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003972
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003973 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3974 int val = le16_to_cpu(sink_rates[i]);
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003975
3976 if (val == 0)
3977 break;
3978
Sonika Jindalaf77b972015-05-07 13:59:28 +05303979 /* Value read is in kHz while drm clock is saved in deca-kHz */
3980 intel_dp->sink_rates[i] = (val * 200) / 10;
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003981 }
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003982 intel_dp->num_sink_rates = i;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303983 }
Ville Syrjälä0336400e2015-03-12 17:10:39 +02003984
3985 intel_dp_print_rates(intel_dp);
3986
Adam Jacksonedb39242012-09-18 10:58:49 -04003987 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3988 DP_DWN_STRM_PORT_PRESENT))
3989 return true; /* native DP sink */
3990
3991 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3992 return true; /* no per-port downstream info */
3993
Jani Nikula9d1a1032014-03-14 16:51:15 +02003994 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3995 intel_dp->downstream_ports,
3996 DP_MAX_DOWNSTREAM_PORTS) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003997 return false; /* downstream port status fetch failed */
3998
3999 return true;
Keith Packard92fd8fd2011-07-25 19:50:10 -07004000}
4001
Adam Jackson0d198322012-05-14 16:05:47 -04004002static void
4003intel_dp_probe_oui(struct intel_dp *intel_dp)
4004{
4005 u8 buf[3];
4006
4007 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4008 return;
4009
Jani Nikula9d1a1032014-03-14 16:51:15 +02004010 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04004011 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4012 buf[0], buf[1], buf[2]);
4013
Jani Nikula9d1a1032014-03-14 16:51:15 +02004014 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04004015 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4016 buf[0], buf[1], buf[2]);
4017}
4018
Dave Airlie0e32b392014-05-02 14:02:48 +10004019static bool
4020intel_dp_probe_mst(struct intel_dp *intel_dp)
4021{
4022 u8 buf[1];
4023
4024 if (!intel_dp->can_mst)
4025 return false;
4026
4027 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4028 return false;
4029
Dave Airlie0e32b392014-05-02 14:02:48 +10004030 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4031 if (buf[0] & DP_MST_CAP) {
4032 DRM_DEBUG_KMS("Sink is MST capable\n");
4033 intel_dp->is_mst = true;
4034 } else {
4035 DRM_DEBUG_KMS("Sink is not MST capable\n");
4036 intel_dp->is_mst = false;
4037 }
4038 }
Dave Airlie0e32b392014-05-02 14:02:48 +10004039
4040 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4041 return intel_dp->is_mst;
4042}
4043
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004044static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004045{
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004046 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4047 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004048 u8 buf;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004049 int ret = 0;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004050
4051 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004052 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004053 ret = -EIO;
4054 goto out;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004055 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004056
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004057 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004058 buf & ~DP_TEST_SINK_START) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004059 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004060 ret = -EIO;
4061 goto out;
4062 }
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004063
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004064 intel_dp->sink_crc.started = false;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004065 out:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004066 hsw_enable_ips(intel_crtc);
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004067 return ret;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004068}
4069
4070static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4071{
4072 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4073 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4074 u8 buf;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004075 int ret;
4076
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004077 if (intel_dp->sink_crc.started) {
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07004078 ret = intel_dp_sink_crc_stop(intel_dp);
4079 if (ret)
4080 return ret;
4081 }
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004082
4083 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4084 return -EIO;
4085
4086 if (!(buf & DP_TEST_CRC_SUPPORTED))
4087 return -ENOTTY;
4088
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004089 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4090
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004091 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4092 return -EIO;
4093
4094 hsw_disable_ips(intel_crtc);
4095
4096 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4097 buf | DP_TEST_SINK_START) < 0) {
4098 hsw_enable_ips(intel_crtc);
4099 return -EIO;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004100 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004101
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004102 intel_dp->sink_crc.started = true;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004103 return 0;
4104}
4105
4106int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4107{
4108 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4109 struct drm_device *dev = dig_port->base.base.dev;
4110 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4111 u8 buf;
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004112 int count, ret;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004113 int attempts = 6;
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07004114 bool old_equal_new;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004115
4116 ret = intel_dp_sink_crc_start(intel_dp);
4117 if (ret)
4118 return ret;
4119
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004120 do {
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004121 intel_wait_for_vblank(dev, intel_crtc->pipe);
4122
Rodrigo Vivi1dda5f92014-10-01 07:32:37 -07004123 if (drm_dp_dpcd_readb(&intel_dp->aux,
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004124 DP_TEST_SINK_MISC, &buf) < 0) {
4125 ret = -EIO;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004126 goto stop;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004127 }
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004128 count = buf & DP_TEST_COUNT_MASK;
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07004129
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004130 /*
4131 * Count might be reset during the loop. In this case
4132 * last known count needs to be reset as well.
4133 */
4134 if (count == 0)
4135 intel_dp->sink_crc.last_count = 0;
4136
4137 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4138 ret = -EIO;
4139 goto stop;
4140 }
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07004141
4142 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4143 !memcmp(intel_dp->sink_crc.last_crc, crc,
4144 6 * sizeof(u8)));
4145
4146 } while (--attempts && (count == 0 || old_equal_new));
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07004147
4148 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4149 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004150
4151 if (attempts == 0) {
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07004152 if (old_equal_new) {
4153 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4154 } else {
4155 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4156 ret = -ETIMEDOUT;
4157 goto stop;
4158 }
Rodrigo Viviad9dc912014-09-16 19:18:12 -04004159 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004160
Rodrigo Viviafe0d672015-07-23 16:35:45 -07004161stop:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07004162 intel_dp_sink_crc_stop(intel_dp);
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03004163 return ret;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004164}
4165
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004166static bool
4167intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4168{
Jani Nikula9d1a1032014-03-14 16:51:15 +02004169 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4170 DP_DEVICE_SERVICE_IRQ_VECTOR,
4171 sink_irq_vector, 1) == 1;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004172}
4173
Dave Airlie0e32b392014-05-02 14:02:48 +10004174static bool
4175intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4176{
4177 int ret;
4178
4179 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4180 DP_SINK_COUNT_ESI,
4181 sink_irq_vector, 14);
4182 if (ret != 14)
4183 return false;
4184
4185 return true;
4186}
4187
Todd Previtec5d5ab72015-04-15 08:38:38 -07004188static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004189{
Todd Previtec5d5ab72015-04-15 08:38:38 -07004190 uint8_t test_result = DP_TEST_ACK;
4191 return test_result;
4192}
4193
4194static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4195{
4196 uint8_t test_result = DP_TEST_NAK;
4197 return test_result;
4198}
4199
4200static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4201{
4202 uint8_t test_result = DP_TEST_NAK;
Todd Previte559be302015-05-04 07:48:20 -07004203 struct intel_connector *intel_connector = intel_dp->attached_connector;
4204 struct drm_connector *connector = &intel_connector->base;
4205
4206 if (intel_connector->detect_edid == NULL ||
Daniel Vetterac6f2e22015-05-08 16:15:41 +02004207 connector->edid_corrupt ||
Todd Previte559be302015-05-04 07:48:20 -07004208 intel_dp->aux.i2c_defer_count > 6) {
4209 /* Check EDID read for NACKs, DEFERs and corruption
4210 * (DP CTS 1.2 Core r1.1)
4211 * 4.2.2.4 : Failed EDID read, I2C_NAK
4212 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4213 * 4.2.2.6 : EDID corruption detected
4214 * Use failsafe mode for all cases
4215 */
4216 if (intel_dp->aux.i2c_nack_count > 0 ||
4217 intel_dp->aux.i2c_defer_count > 0)
4218 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4219 intel_dp->aux.i2c_nack_count,
4220 intel_dp->aux.i2c_defer_count);
4221 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4222 } else {
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304223 struct edid *block = intel_connector->detect_edid;
4224
4225 /* We have to write the checksum
4226 * of the last block read
4227 */
4228 block += intel_connector->detect_edid->extensions;
4229
Todd Previte559be302015-05-04 07:48:20 -07004230 if (!drm_dp_dpcd_write(&intel_dp->aux,
4231 DP_TEST_EDID_CHECKSUM,
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304232 &block->checksum,
Dan Carpenter5a1cc652015-05-12 21:07:37 +03004233 1))
Todd Previte559be302015-05-04 07:48:20 -07004234 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4235
4236 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4237 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4238 }
4239
4240 /* Set test active flag here so userspace doesn't interrupt things */
4241 intel_dp->compliance_test_active = 1;
4242
Todd Previtec5d5ab72015-04-15 08:38:38 -07004243 return test_result;
4244}
4245
4246static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4247{
4248 uint8_t test_result = DP_TEST_NAK;
4249 return test_result;
4250}
4251
4252static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4253{
4254 uint8_t response = DP_TEST_NAK;
4255 uint8_t rxdata = 0;
4256 int status = 0;
4257
Todd Previte559be302015-05-04 07:48:20 -07004258 intel_dp->compliance_test_active = 0;
Todd Previtec5d5ab72015-04-15 08:38:38 -07004259 intel_dp->compliance_test_type = 0;
Todd Previte559be302015-05-04 07:48:20 -07004260 intel_dp->compliance_test_data = 0;
4261
Todd Previtec5d5ab72015-04-15 08:38:38 -07004262 intel_dp->aux.i2c_nack_count = 0;
4263 intel_dp->aux.i2c_defer_count = 0;
4264
4265 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4266 if (status <= 0) {
4267 DRM_DEBUG_KMS("Could not read test request from sink\n");
4268 goto update_status;
4269 }
4270
4271 switch (rxdata) {
4272 case DP_TEST_LINK_TRAINING:
4273 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4274 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4275 response = intel_dp_autotest_link_training(intel_dp);
4276 break;
4277 case DP_TEST_LINK_VIDEO_PATTERN:
4278 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4279 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4280 response = intel_dp_autotest_video_pattern(intel_dp);
4281 break;
4282 case DP_TEST_LINK_EDID_READ:
4283 DRM_DEBUG_KMS("EDID test requested\n");
4284 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4285 response = intel_dp_autotest_edid(intel_dp);
4286 break;
4287 case DP_TEST_LINK_PHY_TEST_PATTERN:
4288 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4289 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4290 response = intel_dp_autotest_phy_pattern(intel_dp);
4291 break;
4292 default:
4293 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4294 break;
4295 }
4296
4297update_status:
4298 status = drm_dp_dpcd_write(&intel_dp->aux,
4299 DP_TEST_RESPONSE,
4300 &response, 1);
4301 if (status <= 0)
4302 DRM_DEBUG_KMS("Could not write test response to sink\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004303}
4304
Dave Airlie0e32b392014-05-02 14:02:48 +10004305static int
4306intel_dp_check_mst_status(struct intel_dp *intel_dp)
4307{
4308 bool bret;
4309
4310 if (intel_dp->is_mst) {
4311 u8 esi[16] = { 0 };
4312 int ret = 0;
4313 int retry;
4314 bool handled;
4315 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4316go_again:
4317 if (bret == true) {
4318
4319 /* check link status - esi[10] = 0x200c */
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03004320 if (intel_dp->active_mst_links &&
Ville Syrjälä901c2da2015-08-17 18:05:12 +03004321 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
Dave Airlie0e32b392014-05-02 14:02:48 +10004322 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4323 intel_dp_start_link_train(intel_dp);
4324 intel_dp_complete_link_train(intel_dp);
4325 intel_dp_stop_link_train(intel_dp);
4326 }
4327
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004328 DRM_DEBUG_KMS("got esi %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004329 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4330
4331 if (handled) {
4332 for (retry = 0; retry < 3; retry++) {
4333 int wret;
4334 wret = drm_dp_dpcd_write(&intel_dp->aux,
4335 DP_SINK_COUNT_ESI+1,
4336 &esi[1], 3);
4337 if (wret == 3) {
4338 break;
4339 }
4340 }
4341
4342 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4343 if (bret == true) {
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004344 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004345 goto go_again;
4346 }
4347 } else
4348 ret = 0;
4349
4350 return ret;
4351 } else {
4352 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4353 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4354 intel_dp->is_mst = false;
4355 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4356 /* send a hotplug event */
4357 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4358 }
4359 }
4360 return -EINVAL;
4361}
4362
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004363/*
4364 * According to DP spec
4365 * 5.1.2:
4366 * 1. Read DPCD
4367 * 2. Configure link according to Receiver Capabilities
4368 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4369 * 4. Check link status on receipt of hot-plug interrupt
4370 */
Damien Lespiaua5146202015-02-10 19:32:22 +00004371static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01004372intel_dp_check_link_status(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004373{
Dave Airlie5b215bc2014-08-05 10:40:20 +10004374 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004375 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004376 u8 sink_irq_vector;
Keith Packard93f62da2011-11-01 19:45:03 -07004377 u8 link_status[DP_LINK_STATUS_SIZE];
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004378
Dave Airlie5b215bc2014-08-05 10:40:20 +10004379 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4380
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02004381 if (!intel_encoder->base.crtc)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004382 return;
4383
Imre Deak1a125d82014-08-18 14:42:46 +03004384 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4385 return;
4386
Keith Packard92fd8fd2011-07-25 19:50:10 -07004387 /* Try to read receiver status if the link appears to be up */
Keith Packard93f62da2011-11-01 19:45:03 -07004388 if (!intel_dp_get_link_status(intel_dp, link_status)) {
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004389 return;
4390 }
4391
Keith Packard92fd8fd2011-07-25 19:50:10 -07004392 /* Now read the DPCD to see if it's actually running */
Keith Packard26d61aa2011-07-25 20:01:09 -07004393 if (!intel_dp_get_dpcd(intel_dp)) {
Jesse Barnes59cd09e2011-07-07 11:10:59 -07004394 return;
4395 }
4396
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004397 /* Try to read the source of the interrupt */
4398 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4399 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4400 /* Clear interrupt source */
Jani Nikula9d1a1032014-03-14 16:51:15 +02004401 drm_dp_dpcd_writeb(&intel_dp->aux,
4402 DP_DEVICE_SERVICE_IRQ_VECTOR,
4403 sink_irq_vector);
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004404
4405 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
Todd Previte09b1eb12015-04-20 15:27:34 -07004406 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004407 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4408 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4409 }
4410
Ville Syrjälä901c2da2015-08-17 18:05:12 +03004411 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
Keith Packard92fd8fd2011-07-25 19:50:10 -07004412 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
Jani Nikula8e329a02014-06-03 14:56:21 +03004413 intel_encoder->base.name);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004414 intel_dp_start_link_train(intel_dp);
4415 intel_dp_complete_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03004416 intel_dp_stop_link_train(intel_dp);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004417 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004418}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004419
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004420/* XXX this is probably wrong for multiple downstream ports */
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004421static enum drm_connector_status
Keith Packard26d61aa2011-07-25 20:01:09 -07004422intel_dp_detect_dpcd(struct intel_dp *intel_dp)
Adam Jackson71ba90002011-07-12 17:38:04 -04004423{
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004424 uint8_t *dpcd = intel_dp->dpcd;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004425 uint8_t type;
4426
4427 if (!intel_dp_get_dpcd(intel_dp))
4428 return connector_status_disconnected;
4429
4430 /* if there's no downstream port, we're done */
4431 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
Keith Packard26d61aa2011-07-25 20:01:09 -07004432 return connector_status_connected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004433
4434 /* If we're HPD-aware, SINK_COUNT changes dynamically */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004435 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4436 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
Adam Jackson23235172012-09-20 16:42:45 -04004437 uint8_t reg;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004438
4439 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4440 &reg, 1) < 0)
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004441 return connector_status_unknown;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004442
Adam Jackson23235172012-09-20 16:42:45 -04004443 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4444 : connector_status_disconnected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004445 }
4446
4447 /* If no HPD, poke DDC gently */
Jani Nikula0b998362014-03-14 16:51:17 +02004448 if (drm_probe_ddc(&intel_dp->aux.ddc))
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004449 return connector_status_connected;
4450
4451 /* Well we tried, say unknown for unreliable port types */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004452 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4453 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4454 if (type == DP_DS_PORT_TYPE_VGA ||
4455 type == DP_DS_PORT_TYPE_NON_EDID)
4456 return connector_status_unknown;
4457 } else {
4458 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4459 DP_DWN_STRM_PORT_TYPE_MASK;
4460 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4461 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4462 return connector_status_unknown;
4463 }
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004464
4465 /* Anything else is out of spec, warn and ignore */
4466 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
Keith Packard26d61aa2011-07-25 20:01:09 -07004467 return connector_status_disconnected;
Adam Jackson71ba90002011-07-12 17:38:04 -04004468}
4469
4470static enum drm_connector_status
Chris Wilsond410b562014-09-02 20:03:59 +01004471edp_detect(struct intel_dp *intel_dp)
4472{
4473 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4474 enum drm_connector_status status;
4475
4476 status = intel_panel_detect(dev);
4477 if (status == connector_status_unknown)
4478 status = connector_status_connected;
4479
4480 return status;
4481}
4482
4483static enum drm_connector_status
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004484ironlake_dp_detect(struct intel_dp *intel_dp)
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004485{
Paulo Zanoni30add222012-10-26 19:05:45 -02004486 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Damien Lespiau1b469632012-12-13 16:09:01 +00004487 struct drm_i915_private *dev_priv = dev->dev_private;
4488 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07004489
Damien Lespiau1b469632012-12-13 16:09:01 +00004490 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4491 return connector_status_disconnected;
4492
Keith Packard26d61aa2011-07-25 20:01:09 -07004493 return intel_dp_detect_dpcd(intel_dp);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004494}
4495
Dave Airlie2a592be2014-09-01 16:58:12 +10004496static int g4x_digital_port_connected(struct drm_device *dev,
4497 struct intel_digital_port *intel_dig_port)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004498{
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004499 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson10f76a32012-05-11 18:01:32 +01004500 uint32_t bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004501
Todd Previte232a6ee2014-01-23 00:13:41 -07004502 if (IS_VALLEYVIEW(dev)) {
4503 switch (intel_dig_port->port) {
4504 case PORT_B:
4505 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4506 break;
4507 case PORT_C:
4508 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4509 break;
4510 case PORT_D:
4511 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4512 break;
4513 default:
Dave Airlie2a592be2014-09-01 16:58:12 +10004514 return -EINVAL;
Todd Previte232a6ee2014-01-23 00:13:41 -07004515 }
4516 } else {
4517 switch (intel_dig_port->port) {
4518 case PORT_B:
4519 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4520 break;
4521 case PORT_C:
4522 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4523 break;
4524 case PORT_D:
4525 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4526 break;
4527 default:
Dave Airlie2a592be2014-09-01 16:58:12 +10004528 return -EINVAL;
Todd Previte232a6ee2014-01-23 00:13:41 -07004529 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004530 }
4531
Chris Wilson10f76a32012-05-11 18:01:32 +01004532 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
Dave Airlie2a592be2014-09-01 16:58:12 +10004533 return 0;
4534 return 1;
4535}
4536
4537static enum drm_connector_status
4538g4x_dp_detect(struct intel_dp *intel_dp)
4539{
4540 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4541 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4542 int ret;
4543
4544 /* Can't disconnect eDP, but you can close the lid... */
4545 if (is_edp(intel_dp)) {
4546 enum drm_connector_status status;
4547
4548 status = intel_panel_detect(dev);
4549 if (status == connector_status_unknown)
4550 status = connector_status_connected;
4551 return status;
4552 }
4553
4554 ret = g4x_digital_port_connected(dev, intel_dig_port);
4555 if (ret == -EINVAL)
4556 return connector_status_unknown;
4557 else if (ret == 0)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004558 return connector_status_disconnected;
4559
Keith Packard26d61aa2011-07-25 20:01:09 -07004560 return intel_dp_detect_dpcd(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004561}
4562
Keith Packard8c241fe2011-09-28 16:38:44 -07004563static struct edid *
Chris Wilsonbeb60602014-09-02 20:04:00 +01004564intel_dp_get_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004565{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004566 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packard8c241fe2011-09-28 16:38:44 -07004567
Jani Nikula9cd300e2012-10-19 14:51:52 +03004568 /* use cached edid if we have one */
4569 if (intel_connector->edid) {
Jani Nikula9cd300e2012-10-19 14:51:52 +03004570 /* invalid edid */
4571 if (IS_ERR(intel_connector->edid))
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004572 return NULL;
4573
Jani Nikula55e9ede2013-10-01 10:38:54 +03004574 return drm_edid_duplicate(intel_connector->edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004575 } else
4576 return drm_get_edid(&intel_connector->base,
4577 &intel_dp->aux.ddc);
Keith Packard8c241fe2011-09-28 16:38:44 -07004578}
4579
Chris Wilsonbeb60602014-09-02 20:04:00 +01004580static void
4581intel_dp_set_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004582{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004583 struct intel_connector *intel_connector = intel_dp->attached_connector;
4584 struct edid *edid;
Keith Packard8c241fe2011-09-28 16:38:44 -07004585
Chris Wilsonbeb60602014-09-02 20:04:00 +01004586 edid = intel_dp_get_edid(intel_dp);
4587 intel_connector->detect_edid = edid;
Jani Nikula9cd300e2012-10-19 14:51:52 +03004588
Chris Wilsonbeb60602014-09-02 20:04:00 +01004589 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4590 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4591 else
4592 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4593}
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004594
Chris Wilsonbeb60602014-09-02 20:04:00 +01004595static void
4596intel_dp_unset_edid(struct intel_dp *intel_dp)
4597{
4598 struct intel_connector *intel_connector = intel_dp->attached_connector;
4599
4600 kfree(intel_connector->detect_edid);
4601 intel_connector->detect_edid = NULL;
4602
4603 intel_dp->has_audio = false;
4604}
4605
4606static enum intel_display_power_domain
4607intel_dp_power_get(struct intel_dp *dp)
4608{
4609 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4610 enum intel_display_power_domain power_domain;
4611
4612 power_domain = intel_display_port_power_domain(encoder);
4613 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4614
4615 return power_domain;
4616}
4617
4618static void
4619intel_dp_power_put(struct intel_dp *dp,
4620 enum intel_display_power_domain power_domain)
4621{
4622 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4623 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
Keith Packard8c241fe2011-09-28 16:38:44 -07004624}
4625
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004626static enum drm_connector_status
4627intel_dp_detect(struct drm_connector *connector, bool force)
4628{
4629 struct intel_dp *intel_dp = intel_attached_dp(connector);
Paulo Zanonid63885d2012-10-26 19:05:49 -02004630 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4631 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004632 struct drm_device *dev = connector->dev;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004633 enum drm_connector_status status;
Imre Deak671dedd2014-03-05 16:20:53 +02004634 enum intel_display_power_domain power_domain;
Dave Airlie0e32b392014-05-02 14:02:48 +10004635 bool ret;
Todd Previte09b1eb12015-04-20 15:27:34 -07004636 u8 sink_irq_vector;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004637
Chris Wilson164c8592013-07-20 20:27:08 +01004638 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03004639 connector->base.id, connector->name);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004640 intel_dp_unset_edid(intel_dp);
Chris Wilson164c8592013-07-20 20:27:08 +01004641
Dave Airlie0e32b392014-05-02 14:02:48 +10004642 if (intel_dp->is_mst) {
4643 /* MST devices are disconnected from a monitor POV */
4644 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4645 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004646 return connector_status_disconnected;
Dave Airlie0e32b392014-05-02 14:02:48 +10004647 }
4648
Chris Wilsonbeb60602014-09-02 20:04:00 +01004649 power_domain = intel_dp_power_get(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004650
Chris Wilsond410b562014-09-02 20:03:59 +01004651 /* Can't disconnect eDP, but you can close the lid... */
4652 if (is_edp(intel_dp))
4653 status = edp_detect(intel_dp);
4654 else if (HAS_PCH_SPLIT(dev))
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004655 status = ironlake_dp_detect(intel_dp);
4656 else
4657 status = g4x_dp_detect(intel_dp);
4658 if (status != connector_status_connected)
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004659 goto out;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004660
Adam Jackson0d198322012-05-14 16:05:47 -04004661 intel_dp_probe_oui(intel_dp);
4662
Dave Airlie0e32b392014-05-02 14:02:48 +10004663 ret = intel_dp_probe_mst(intel_dp);
4664 if (ret) {
4665 /* if we are in MST mode then this connector
4666 won't appear connected or have anything with EDID on it */
4667 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4668 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4669 status = connector_status_disconnected;
4670 goto out;
4671 }
4672
Chris Wilsonbeb60602014-09-02 20:04:00 +01004673 intel_dp_set_edid(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004674
Paulo Zanonid63885d2012-10-26 19:05:49 -02004675 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4676 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004677 status = connector_status_connected;
4678
Todd Previte09b1eb12015-04-20 15:27:34 -07004679 /* Try to read the source of the interrupt */
4680 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4681 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4682 /* Clear interrupt source */
4683 drm_dp_dpcd_writeb(&intel_dp->aux,
4684 DP_DEVICE_SERVICE_IRQ_VECTOR,
4685 sink_irq_vector);
4686
4687 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4688 intel_dp_handle_test_request(intel_dp);
4689 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4690 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4691 }
4692
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004693out:
Chris Wilsonbeb60602014-09-02 20:04:00 +01004694 intel_dp_power_put(intel_dp, power_domain);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004695 return status;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004696}
4697
Chris Wilsonbeb60602014-09-02 20:04:00 +01004698static void
4699intel_dp_force(struct drm_connector *connector)
4700{
4701 struct intel_dp *intel_dp = intel_attached_dp(connector);
4702 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4703 enum intel_display_power_domain power_domain;
4704
4705 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4706 connector->base.id, connector->name);
4707 intel_dp_unset_edid(intel_dp);
4708
4709 if (connector->status != connector_status_connected)
4710 return;
4711
4712 power_domain = intel_dp_power_get(intel_dp);
4713
4714 intel_dp_set_edid(intel_dp);
4715
4716 intel_dp_power_put(intel_dp, power_domain);
4717
4718 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4719 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4720}
4721
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004722static int intel_dp_get_modes(struct drm_connector *connector)
4723{
Jani Nikuladd06f902012-10-19 14:51:50 +03004724 struct intel_connector *intel_connector = to_intel_connector(connector);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004725 struct edid *edid;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004726
Chris Wilsonbeb60602014-09-02 20:04:00 +01004727 edid = intel_connector->detect_edid;
4728 if (edid) {
4729 int ret = intel_connector_update_modes(connector, edid);
4730 if (ret)
4731 return ret;
4732 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004733
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004734 /* if eDP has no EDID, fall back to fixed mode */
Chris Wilsonbeb60602014-09-02 20:04:00 +01004735 if (is_edp(intel_attached_dp(connector)) &&
4736 intel_connector->panel.fixed_mode) {
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004737 struct drm_display_mode *mode;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004738
4739 mode = drm_mode_duplicate(connector->dev,
Jani Nikuladd06f902012-10-19 14:51:50 +03004740 intel_connector->panel.fixed_mode);
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004741 if (mode) {
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004742 drm_mode_probed_add(connector, mode);
4743 return 1;
4744 }
4745 }
Chris Wilsonbeb60602014-09-02 20:04:00 +01004746
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004747 return 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004748}
4749
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004750static bool
4751intel_dp_detect_audio(struct drm_connector *connector)
4752{
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004753 bool has_audio = false;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004754 struct edid *edid;
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004755
Chris Wilsonbeb60602014-09-02 20:04:00 +01004756 edid = to_intel_connector(connector)->detect_edid;
4757 if (edid)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004758 has_audio = drm_detect_monitor_audio(edid);
Imre Deak671dedd2014-03-05 16:20:53 +02004759
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004760 return has_audio;
4761}
4762
Chris Wilsonf6849602010-09-19 09:29:33 +01004763static int
4764intel_dp_set_property(struct drm_connector *connector,
4765 struct drm_property *property,
4766 uint64_t val)
4767{
Chris Wilsone953fd72011-02-21 22:23:52 +00004768 struct drm_i915_private *dev_priv = connector->dev->dev_private;
Yuly Novikov53b41832012-10-26 12:04:00 +03004769 struct intel_connector *intel_connector = to_intel_connector(connector);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004770 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4771 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonf6849602010-09-19 09:29:33 +01004772 int ret;
4773
Rob Clark662595d2012-10-11 20:36:04 -05004774 ret = drm_object_property_set_value(&connector->base, property, val);
Chris Wilsonf6849602010-09-19 09:29:33 +01004775 if (ret)
4776 return ret;
4777
Chris Wilson3f43c482011-05-12 22:17:24 +01004778 if (property == dev_priv->force_audio_property) {
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004779 int i = val;
4780 bool has_audio;
4781
4782 if (i == intel_dp->force_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004783 return 0;
4784
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004785 intel_dp->force_audio = i;
Chris Wilsonf6849602010-09-19 09:29:33 +01004786
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004787 if (i == HDMI_AUDIO_AUTO)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004788 has_audio = intel_dp_detect_audio(connector);
4789 else
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004790 has_audio = (i == HDMI_AUDIO_ON);
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004791
4792 if (has_audio == intel_dp->has_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004793 return 0;
4794
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004795 intel_dp->has_audio = has_audio;
Chris Wilsonf6849602010-09-19 09:29:33 +01004796 goto done;
4797 }
4798
Chris Wilsone953fd72011-02-21 22:23:52 +00004799 if (property == dev_priv->broadcast_rgb_property) {
Daniel Vetterae4edb82013-04-22 17:07:23 +02004800 bool old_auto = intel_dp->color_range_auto;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004801 bool old_range = intel_dp->limited_color_range;
Daniel Vetterae4edb82013-04-22 17:07:23 +02004802
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004803 switch (val) {
4804 case INTEL_BROADCAST_RGB_AUTO:
4805 intel_dp->color_range_auto = true;
4806 break;
4807 case INTEL_BROADCAST_RGB_FULL:
4808 intel_dp->color_range_auto = false;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004809 intel_dp->limited_color_range = false;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004810 break;
4811 case INTEL_BROADCAST_RGB_LIMITED:
4812 intel_dp->color_range_auto = false;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004813 intel_dp->limited_color_range = true;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004814 break;
4815 default:
4816 return -EINVAL;
4817 }
Daniel Vetterae4edb82013-04-22 17:07:23 +02004818
4819 if (old_auto == intel_dp->color_range_auto &&
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004820 old_range == intel_dp->limited_color_range)
Daniel Vetterae4edb82013-04-22 17:07:23 +02004821 return 0;
4822
Chris Wilsone953fd72011-02-21 22:23:52 +00004823 goto done;
4824 }
4825
Yuly Novikov53b41832012-10-26 12:04:00 +03004826 if (is_edp(intel_dp) &&
4827 property == connector->dev->mode_config.scaling_mode_property) {
4828 if (val == DRM_MODE_SCALE_NONE) {
4829 DRM_DEBUG_KMS("no scaling not supported\n");
4830 return -EINVAL;
4831 }
4832
4833 if (intel_connector->panel.fitting_mode == val) {
4834 /* the eDP scaling property is not changed */
4835 return 0;
4836 }
4837 intel_connector->panel.fitting_mode = val;
4838
4839 goto done;
4840 }
4841
Chris Wilsonf6849602010-09-19 09:29:33 +01004842 return -EINVAL;
4843
4844done:
Chris Wilsonc0c36b942012-12-19 16:08:43 +00004845 if (intel_encoder->base.crtc)
4846 intel_crtc_restore_mode(intel_encoder->base.crtc);
Chris Wilsonf6849602010-09-19 09:29:33 +01004847
4848 return 0;
4849}
4850
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004851static void
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004852intel_dp_connector_destroy(struct drm_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004853{
Jani Nikula1d508702012-10-19 14:51:49 +03004854 struct intel_connector *intel_connector = to_intel_connector(connector);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004855
Chris Wilson10e972d2014-09-04 21:43:45 +01004856 kfree(intel_connector->detect_edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004857
Jani Nikula9cd300e2012-10-19 14:51:52 +03004858 if (!IS_ERR_OR_NULL(intel_connector->edid))
4859 kfree(intel_connector->edid);
4860
Paulo Zanoniacd8db102013-06-12 17:27:23 -03004861 /* Can't call is_edp() since the encoder may have been destroyed
4862 * already. */
4863 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
Jani Nikula1d508702012-10-19 14:51:49 +03004864 intel_panel_fini(&intel_connector->panel);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004865
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004866 drm_connector_cleanup(connector);
Zhenyu Wang55f78c42010-03-29 16:13:57 +08004867 kfree(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004868}
4869
Paulo Zanoni00c09d72012-10-26 19:05:52 -02004870void intel_dp_encoder_destroy(struct drm_encoder *encoder)
Daniel Vetter24d05922010-08-20 18:08:28 +02004871{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004872 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4873 struct intel_dp *intel_dp = &intel_dig_port->dp;
Daniel Vetter24d05922010-08-20 18:08:28 +02004874
Dave Airlie4f71d0c2014-06-04 16:02:28 +10004875 drm_dp_aux_unregister(&intel_dp->aux);
Dave Airlie0e32b392014-05-02 14:02:48 +10004876 intel_dp_mst_encoder_cleanup(intel_dig_port);
Keith Packardbd943152011-09-18 23:09:52 -07004877 if (is_edp(intel_dp)) {
4878 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03004879 /*
4880 * vdd might still be enabled do to the delayed vdd off.
4881 * Make sure vdd is actually turned off here.
4882 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03004883 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01004884 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004885 pps_unlock(intel_dp);
4886
Clint Taylor01527b32014-07-07 13:01:46 -07004887 if (intel_dp->edp_notifier.notifier_call) {
4888 unregister_reboot_notifier(&intel_dp->edp_notifier);
4889 intel_dp->edp_notifier.notifier_call = NULL;
4890 }
Keith Packardbd943152011-09-18 23:09:52 -07004891 }
Imre Deakc8bd0e42014-12-12 17:57:38 +02004892 drm_encoder_cleanup(encoder);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004893 kfree(intel_dig_port);
Daniel Vetter24d05922010-08-20 18:08:28 +02004894}
4895
Imre Deak07f9cd02014-08-18 14:42:45 +03004896static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4897{
4898 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4899
4900 if (!is_edp(intel_dp))
4901 return;
4902
Ville Syrjälä951468f2014-09-04 14:55:31 +03004903 /*
4904 * vdd might still be enabled do to the delayed vdd off.
4905 * Make sure vdd is actually turned off here.
4906 */
Ville Syrjäläafa4e532014-11-25 15:43:48 +02004907 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004908 pps_lock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004909 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004910 pps_unlock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004911}
4912
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004913static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4914{
4915 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4916 struct drm_device *dev = intel_dig_port->base.base.dev;
4917 struct drm_i915_private *dev_priv = dev->dev_private;
4918 enum intel_display_power_domain power_domain;
4919
4920 lockdep_assert_held(&dev_priv->pps_mutex);
4921
4922 if (!edp_have_panel_vdd(intel_dp))
4923 return;
4924
4925 /*
4926 * The VDD bit needs a power domain reference, so if the bit is
4927 * already enabled when we boot or resume, grab this reference and
4928 * schedule a vdd off, so we don't hold on to the reference
4929 * indefinitely.
4930 */
4931 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4932 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4933 intel_display_power_get(dev_priv, power_domain);
4934
4935 edp_panel_vdd_schedule_off(intel_dp);
4936}
4937
Imre Deak6d93c0c2014-07-31 14:03:36 +03004938static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4939{
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004940 struct intel_dp *intel_dp;
4941
4942 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4943 return;
4944
4945 intel_dp = enc_to_intel_dp(encoder);
4946
4947 pps_lock(intel_dp);
4948
4949 /*
4950 * Read out the current power sequencer assignment,
4951 * in case the BIOS did something with it.
4952 */
4953 if (IS_VALLEYVIEW(encoder->dev))
4954 vlv_initial_power_sequencer_setup(intel_dp);
4955
4956 intel_edp_panel_vdd_sanitize(intel_dp);
4957
4958 pps_unlock(intel_dp);
Imre Deak6d93c0c2014-07-31 14:03:36 +03004959}
4960
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004961static const struct drm_connector_funcs intel_dp_connector_funcs = {
Maarten Lankhorst4d688a22015-08-05 12:37:06 +02004962 .dpms = drm_atomic_helper_connector_dpms,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004963 .detect = intel_dp_detect,
Chris Wilsonbeb60602014-09-02 20:04:00 +01004964 .force = intel_dp_force,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004965 .fill_modes = drm_helper_probe_single_connector_modes,
Chris Wilsonf6849602010-09-19 09:29:33 +01004966 .set_property = intel_dp_set_property,
Matt Roper2545e4a2015-01-22 16:51:27 -08004967 .atomic_get_property = intel_connector_atomic_get_property,
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004968 .destroy = intel_dp_connector_destroy,
Matt Roperc6f95f22015-01-22 16:50:32 -08004969 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
Ander Conselvan de Oliveira98969722015-03-20 16:18:06 +02004970 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004971};
4972
4973static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4974 .get_modes = intel_dp_get_modes,
4975 .mode_valid = intel_dp_mode_valid,
Chris Wilsondf0e9242010-09-09 16:20:55 +01004976 .best_encoder = intel_best_encoder,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004977};
4978
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004979static const struct drm_encoder_funcs intel_dp_enc_funcs = {
Imre Deak6d93c0c2014-07-31 14:03:36 +03004980 .reset = intel_dp_encoder_reset,
Daniel Vetter24d05922010-08-20 18:08:28 +02004981 .destroy = intel_dp_encoder_destroy,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004982};
4983
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004984enum irqreturn
Dave Airlie13cf5502014-06-18 11:29:35 +10004985intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4986{
4987 struct intel_dp *intel_dp = &intel_dig_port->dp;
Imre Deak1c767b32014-08-18 14:42:42 +03004988 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Dave Airlie0e32b392014-05-02 14:02:48 +10004989 struct drm_device *dev = intel_dig_port->base.base.dev;
4990 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak1c767b32014-08-18 14:42:42 +03004991 enum intel_display_power_domain power_domain;
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004992 enum irqreturn ret = IRQ_NONE;
Imre Deak1c767b32014-08-18 14:42:42 +03004993
Dave Airlie0e32b392014-05-02 14:02:48 +10004994 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4995 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
Dave Airlie13cf5502014-06-18 11:29:35 +10004996
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03004997 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4998 /*
4999 * vdd off can generate a long pulse on eDP which
5000 * would require vdd on to handle it, and thus we
5001 * would end up in an endless cycle of
5002 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5003 */
5004 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5005 port_name(intel_dig_port->port));
Ville Syrjäläa8b3d522015-02-10 14:11:46 +02005006 return IRQ_HANDLED;
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03005007 }
5008
Ville Syrjälä26fbb772014-08-11 18:37:37 +03005009 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5010 port_name(intel_dig_port->port),
Dave Airlie0e32b392014-05-02 14:02:48 +10005011 long_hpd ? "long" : "short");
Dave Airlie13cf5502014-06-18 11:29:35 +10005012
Imre Deak1c767b32014-08-18 14:42:42 +03005013 power_domain = intel_display_port_power_domain(intel_encoder);
5014 intel_display_power_get(dev_priv, power_domain);
5015
Dave Airlie0e32b392014-05-02 14:02:48 +10005016 if (long_hpd) {
Mika Kahola5fa836a2015-04-29 09:17:40 +03005017 /* indicate that we need to restart link training */
5018 intel_dp->train_set_valid = false;
Dave Airlie2a592be2014-09-01 16:58:12 +10005019
5020 if (HAS_PCH_SPLIT(dev)) {
5021 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
5022 goto mst_fail;
5023 } else {
5024 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
5025 goto mst_fail;
5026 }
Dave Airlie0e32b392014-05-02 14:02:48 +10005027
5028 if (!intel_dp_get_dpcd(intel_dp)) {
5029 goto mst_fail;
5030 }
5031
5032 intel_dp_probe_oui(intel_dp);
5033
5034 if (!intel_dp_probe_mst(intel_dp))
5035 goto mst_fail;
5036
5037 } else {
5038 if (intel_dp->is_mst) {
Imre Deak1c767b32014-08-18 14:42:42 +03005039 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
Dave Airlie0e32b392014-05-02 14:02:48 +10005040 goto mst_fail;
5041 }
5042
5043 if (!intel_dp->is_mst) {
5044 /*
5045 * we'll check the link status via the normal hot plug path later -
5046 * but for short hpds we should check it now
5047 */
Dave Airlie5b215bc2014-08-05 10:40:20 +10005048 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
Dave Airlie0e32b392014-05-02 14:02:48 +10005049 intel_dp_check_link_status(intel_dp);
Dave Airlie5b215bc2014-08-05 10:40:20 +10005050 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10005051 }
5052 }
Daniel Vetterb2c5c182015-01-23 06:00:31 +01005053
5054 ret = IRQ_HANDLED;
5055
Imre Deak1c767b32014-08-18 14:42:42 +03005056 goto put_power;
Dave Airlie0e32b392014-05-02 14:02:48 +10005057mst_fail:
5058 /* if we were in MST mode, and device is not there get out of MST mode */
5059 if (intel_dp->is_mst) {
5060 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5061 intel_dp->is_mst = false;
5062 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5063 }
Imre Deak1c767b32014-08-18 14:42:42 +03005064put_power:
5065 intel_display_power_put(dev_priv, power_domain);
5066
5067 return ret;
Dave Airlie13cf5502014-06-18 11:29:35 +10005068}
5069
Zhenyu Wange3421a12010-04-08 09:43:27 +08005070/* Return which DP Port should be selected for Transcoder DP control */
5071int
Akshay Joshi0206e352011-08-16 15:34:10 -04005072intel_trans_dp_port_sel(struct drm_crtc *crtc)
Zhenyu Wange3421a12010-04-08 09:43:27 +08005073{
5074 struct drm_device *dev = crtc->dev;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005075 struct intel_encoder *intel_encoder;
5076 struct intel_dp *intel_dp;
Zhenyu Wange3421a12010-04-08 09:43:27 +08005077
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005078 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5079 intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonea5b2132010-08-04 13:50:23 +01005080
Paulo Zanonifa90ece2012-10-26 19:05:44 -02005081 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5082 intel_encoder->type == INTEL_OUTPUT_EDP)
Chris Wilsonea5b2132010-08-04 13:50:23 +01005083 return intel_dp->output_reg;
Zhenyu Wange3421a12010-04-08 09:43:27 +08005084 }
Chris Wilsonea5b2132010-08-04 13:50:23 +01005085
Zhenyu Wange3421a12010-04-08 09:43:27 +08005086 return -1;
5087}
5088
Zhao Yakui36e83a12010-06-12 14:32:21 +08005089/* check the VBT to see whether the eDP is on DP-D port */
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005090bool intel_dp_is_edp(struct drm_device *dev, enum port port)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005091{
5092 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni768f69c2013-09-11 18:02:47 -03005093 union child_device_config *p_child;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005094 int i;
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005095 static const short port_mapping[] = {
5096 [PORT_B] = PORT_IDPB,
5097 [PORT_C] = PORT_IDPC,
5098 [PORT_D] = PORT_IDPD,
5099 };
Zhao Yakui36e83a12010-06-12 14:32:21 +08005100
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005101 if (port == PORT_A)
5102 return true;
5103
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005104 if (!dev_priv->vbt.child_dev_num)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005105 return false;
5106
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005107 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5108 p_child = dev_priv->vbt.child_dev + i;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005109
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005110 if (p_child->common.dvo_port == port_mapping[port] &&
Ville Syrjäläf02586d2013-11-01 20:32:08 +02005111 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5112 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
Zhao Yakui36e83a12010-06-12 14:32:21 +08005113 return true;
5114 }
5115 return false;
5116}
5117
Dave Airlie0e32b392014-05-02 14:02:48 +10005118void
Chris Wilsonf6849602010-09-19 09:29:33 +01005119intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5120{
Yuly Novikov53b41832012-10-26 12:04:00 +03005121 struct intel_connector *intel_connector = to_intel_connector(connector);
5122
Chris Wilson3f43c482011-05-12 22:17:24 +01005123 intel_attach_force_audio_property(connector);
Chris Wilsone953fd72011-02-21 22:23:52 +00005124 intel_attach_broadcast_rgb_property(connector);
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02005125 intel_dp->color_range_auto = true;
Yuly Novikov53b41832012-10-26 12:04:00 +03005126
5127 if (is_edp(intel_dp)) {
5128 drm_mode_create_scaling_mode_property(connector->dev);
Rob Clark6de6d842012-10-11 20:36:04 -05005129 drm_object_attach_property(
5130 &connector->base,
Yuly Novikov53b41832012-10-26 12:04:00 +03005131 connector->dev->mode_config.scaling_mode_property,
Yuly Novikov8e740cd2012-10-26 12:04:01 +03005132 DRM_MODE_SCALE_ASPECT);
5133 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
Yuly Novikov53b41832012-10-26 12:04:00 +03005134 }
Chris Wilsonf6849602010-09-19 09:29:33 +01005135}
5136
Imre Deakdada1a92014-01-29 13:25:41 +02005137static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5138{
5139 intel_dp->last_power_cycle = jiffies;
5140 intel_dp->last_power_on = jiffies;
5141 intel_dp->last_backlight_off = jiffies;
5142}
5143
Daniel Vetter67a54562012-10-20 20:57:45 +02005144static void
5145intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005146 struct intel_dp *intel_dp)
Daniel Vetter67a54562012-10-20 20:57:45 +02005147{
5148 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005149 struct edp_power_seq cur, vbt, spec,
5150 *final = &intel_dp->pps_delays;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305151 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5152 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
Jesse Barnes453c5422013-03-28 09:55:41 -07005153
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005154 lockdep_assert_held(&dev_priv->pps_mutex);
5155
Ville Syrjälä81ddbc62014-10-16 21:27:31 +03005156 /* already initialized? */
5157 if (final->t11_t12 != 0)
5158 return;
5159
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305160 if (IS_BROXTON(dev)) {
5161 /*
5162 * TODO: BXT has 2 sets of PPS registers.
5163 * Correct Register for Broxton need to be identified
5164 * using VBT. hardcoding for now
5165 */
5166 pp_ctrl_reg = BXT_PP_CONTROL(0);
5167 pp_on_reg = BXT_PP_ON_DELAYS(0);
5168 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5169 } else if (HAS_PCH_SPLIT(dev)) {
Jani Nikulabf13e812013-09-06 07:40:05 +03005170 pp_ctrl_reg = PCH_PP_CONTROL;
Jesse Barnes453c5422013-03-28 09:55:41 -07005171 pp_on_reg = PCH_PP_ON_DELAYS;
5172 pp_off_reg = PCH_PP_OFF_DELAYS;
5173 pp_div_reg = PCH_PP_DIVISOR;
5174 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005175 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5176
5177 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5178 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5179 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5180 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005181 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005182
5183 /* Workaround: Need to write PP_CONTROL with the unlock key as
5184 * the very first thing. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305185 pp_ctl = ironlake_get_pp_control(intel_dp);
Daniel Vetter67a54562012-10-20 20:57:45 +02005186
Jesse Barnes453c5422013-03-28 09:55:41 -07005187 pp_on = I915_READ(pp_on_reg);
5188 pp_off = I915_READ(pp_off_reg);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305189 if (!IS_BROXTON(dev)) {
5190 I915_WRITE(pp_ctrl_reg, pp_ctl);
5191 pp_div = I915_READ(pp_div_reg);
5192 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005193
5194 /* Pull timing values out of registers */
5195 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5196 PANEL_POWER_UP_DELAY_SHIFT;
5197
5198 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5199 PANEL_LIGHT_ON_DELAY_SHIFT;
5200
5201 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5202 PANEL_LIGHT_OFF_DELAY_SHIFT;
5203
5204 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5205 PANEL_POWER_DOWN_DELAY_SHIFT;
5206
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305207 if (IS_BROXTON(dev)) {
5208 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5209 BXT_POWER_CYCLE_DELAY_SHIFT;
5210 if (tmp > 0)
5211 cur.t11_t12 = (tmp - 1) * 1000;
5212 else
5213 cur.t11_t12 = 0;
5214 } else {
5215 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
Daniel Vetter67a54562012-10-20 20:57:45 +02005216 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305217 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005218
5219 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5220 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5221
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005222 vbt = dev_priv->vbt.edp_pps;
Daniel Vetter67a54562012-10-20 20:57:45 +02005223
5224 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5225 * our hw here, which are all in 100usec. */
5226 spec.t1_t3 = 210 * 10;
5227 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5228 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5229 spec.t10 = 500 * 10;
5230 /* This one is special and actually in units of 100ms, but zero
5231 * based in the hw (so we need to add 100 ms). But the sw vbt
5232 * table multiplies it with 1000 to make it in units of 100usec,
5233 * too. */
5234 spec.t11_t12 = (510 + 100) * 10;
5235
5236 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5237 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5238
5239 /* Use the max of the register settings and vbt. If both are
5240 * unset, fall back to the spec limits. */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005241#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
Daniel Vetter67a54562012-10-20 20:57:45 +02005242 spec.field : \
5243 max(cur.field, vbt.field))
5244 assign_final(t1_t3);
5245 assign_final(t8);
5246 assign_final(t9);
5247 assign_final(t10);
5248 assign_final(t11_t12);
5249#undef assign_final
5250
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005251#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
Daniel Vetter67a54562012-10-20 20:57:45 +02005252 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5253 intel_dp->backlight_on_delay = get_delay(t8);
5254 intel_dp->backlight_off_delay = get_delay(t9);
5255 intel_dp->panel_power_down_delay = get_delay(t10);
5256 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5257#undef get_delay
5258
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005259 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5260 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5261 intel_dp->panel_power_cycle_delay);
5262
5263 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5264 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005265}
5266
5267static void
5268intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005269 struct intel_dp *intel_dp)
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005270{
5271 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07005272 u32 pp_on, pp_off, pp_div, port_sel = 0;
5273 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305274 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
Ville Syrjäläad933b52014-08-18 22:15:56 +03005275 enum port port = dp_to_dig_port(intel_dp)->port;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005276 const struct edp_power_seq *seq = &intel_dp->pps_delays;
Jesse Barnes453c5422013-03-28 09:55:41 -07005277
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005278 lockdep_assert_held(&dev_priv->pps_mutex);
Jesse Barnes453c5422013-03-28 09:55:41 -07005279
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305280 if (IS_BROXTON(dev)) {
5281 /*
5282 * TODO: BXT has 2 sets of PPS registers.
5283 * Correct Register for Broxton need to be identified
5284 * using VBT. hardcoding for now
5285 */
5286 pp_ctrl_reg = BXT_PP_CONTROL(0);
5287 pp_on_reg = BXT_PP_ON_DELAYS(0);
5288 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5289
5290 } else if (HAS_PCH_SPLIT(dev)) {
Jesse Barnes453c5422013-03-28 09:55:41 -07005291 pp_on_reg = PCH_PP_ON_DELAYS;
5292 pp_off_reg = PCH_PP_OFF_DELAYS;
5293 pp_div_reg = PCH_PP_DIVISOR;
5294 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005295 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5296
5297 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5298 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5299 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005300 }
5301
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005302 /*
5303 * And finally store the new values in the power sequencer. The
5304 * backlight delays are set to 1 because we do manual waits on them. For
5305 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5306 * we'll end up waiting for the backlight off delay twice: once when we
5307 * do the manual sleep, and once when we disable the panel and wait for
5308 * the PP_STATUS bit to become zero.
5309 */
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005310 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005311 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5312 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005313 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
Daniel Vetter67a54562012-10-20 20:57:45 +02005314 /* Compute the divisor for the pp clock, simply match the Bspec
5315 * formula. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305316 if (IS_BROXTON(dev)) {
5317 pp_div = I915_READ(pp_ctrl_reg);
5318 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5319 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5320 << BXT_POWER_CYCLE_DELAY_SHIFT);
5321 } else {
5322 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5323 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5324 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5325 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005326
5327 /* Haswell doesn't have any port selection bits for the panel
5328 * power sequencer any more. */
Imre Deakbc7d38a2013-05-16 14:40:36 +03005329 if (IS_VALLEYVIEW(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005330 port_sel = PANEL_PORT_SELECT_VLV(port);
Imre Deakbc7d38a2013-05-16 14:40:36 +03005331 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005332 if (port == PORT_A)
Jani Nikulaa24c1442013-09-05 16:44:46 +03005333 port_sel = PANEL_PORT_SELECT_DPA;
Daniel Vetter67a54562012-10-20 20:57:45 +02005334 else
Jani Nikulaa24c1442013-09-05 16:44:46 +03005335 port_sel = PANEL_PORT_SELECT_DPD;
Daniel Vetter67a54562012-10-20 20:57:45 +02005336 }
5337
Jesse Barnes453c5422013-03-28 09:55:41 -07005338 pp_on |= port_sel;
5339
5340 I915_WRITE(pp_on_reg, pp_on);
5341 I915_WRITE(pp_off_reg, pp_off);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305342 if (IS_BROXTON(dev))
5343 I915_WRITE(pp_ctrl_reg, pp_div);
5344 else
5345 I915_WRITE(pp_div_reg, pp_div);
Daniel Vetter67a54562012-10-20 20:57:45 +02005346
Daniel Vetter67a54562012-10-20 20:57:45 +02005347 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07005348 I915_READ(pp_on_reg),
5349 I915_READ(pp_off_reg),
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305350 IS_BROXTON(dev) ?
5351 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
Jesse Barnes453c5422013-03-28 09:55:41 -07005352 I915_READ(pp_div_reg));
Keith Packardc8110e52009-05-06 11:51:10 -07005353}
5354
Vandana Kannanb33a2812015-02-13 15:33:03 +05305355/**
5356 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5357 * @dev: DRM device
5358 * @refresh_rate: RR to be programmed
5359 *
5360 * This function gets called when refresh rate (RR) has to be changed from
5361 * one frequency to another. Switches can be between high and low RR
5362 * supported by the panel or to any other RR based on media playback (in
5363 * this case, RR value needs to be passed from user space).
5364 *
5365 * The caller of this function needs to take a lock on dev_priv->drrs.
5366 */
Vandana Kannan96178ee2015-01-10 02:25:56 +05305367static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305368{
5369 struct drm_i915_private *dev_priv = dev->dev_private;
5370 struct intel_encoder *encoder;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305371 struct intel_digital_port *dig_port = NULL;
5372 struct intel_dp *intel_dp = dev_priv->drrs.dp;
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02005373 struct intel_crtc_state *config = NULL;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305374 struct intel_crtc *intel_crtc = NULL;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305375 u32 reg, val;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305376 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305377
5378 if (refresh_rate <= 0) {
5379 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5380 return;
5381 }
5382
Vandana Kannan96178ee2015-01-10 02:25:56 +05305383 if (intel_dp == NULL) {
5384 DRM_DEBUG_KMS("DRRS not supported.\n");
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305385 return;
5386 }
5387
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005388 /*
Rodrigo Vivie4d59f62014-11-20 02:22:08 -08005389 * FIXME: This needs proper synchronization with psr state for some
5390 * platforms that cannot have PSR and DRRS enabled at the same time.
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005391 */
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305392
Vandana Kannan96178ee2015-01-10 02:25:56 +05305393 dig_port = dp_to_dig_port(intel_dp);
5394 encoder = &dig_port->base;
Ander Conselvan de Oliveira723f9aa2015-03-20 16:18:18 +02005395 intel_crtc = to_intel_crtc(encoder->base.crtc);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305396
5397 if (!intel_crtc) {
5398 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5399 return;
5400 }
5401
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005402 config = intel_crtc->config;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305403
Vandana Kannan96178ee2015-01-10 02:25:56 +05305404 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305405 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5406 return;
5407 }
5408
Vandana Kannan96178ee2015-01-10 02:25:56 +05305409 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5410 refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305411 index = DRRS_LOW_RR;
5412
Vandana Kannan96178ee2015-01-10 02:25:56 +05305413 if (index == dev_priv->drrs.refresh_rate_type) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305414 DRM_DEBUG_KMS(
5415 "DRRS requested for previously set RR...ignoring\n");
5416 return;
5417 }
5418
5419 if (!intel_crtc->active) {
5420 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5421 return;
5422 }
5423
Durgadoss R44395bf2015-02-13 15:33:02 +05305424 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
Vandana Kannana4c30b12015-02-13 15:33:00 +05305425 switch (index) {
5426 case DRRS_HIGH_RR:
5427 intel_dp_set_m_n(intel_crtc, M1_N1);
5428 break;
5429 case DRRS_LOW_RR:
5430 intel_dp_set_m_n(intel_crtc, M2_N2);
5431 break;
5432 case DRRS_MAX_RR:
5433 default:
5434 DRM_ERROR("Unsupported refreshrate type\n");
5435 }
5436 } else if (INTEL_INFO(dev)->gen > 6) {
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005437 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305438 val = I915_READ(reg);
Vandana Kannana4c30b12015-02-13 15:33:00 +05305439
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305440 if (index > DRRS_HIGH_RR) {
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305441 if (IS_VALLEYVIEW(dev))
5442 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5443 else
5444 val |= PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305445 } else {
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305446 if (IS_VALLEYVIEW(dev))
5447 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5448 else
5449 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305450 }
5451 I915_WRITE(reg, val);
5452 }
5453
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305454 dev_priv->drrs.refresh_rate_type = index;
5455
5456 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5457}
5458
Vandana Kannanb33a2812015-02-13 15:33:03 +05305459/**
5460 * intel_edp_drrs_enable - init drrs struct if supported
5461 * @intel_dp: DP struct
5462 *
5463 * Initializes frontbuffer_bits and drrs.dp
5464 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305465void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5466{
5467 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5468 struct drm_i915_private *dev_priv = dev->dev_private;
5469 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5470 struct drm_crtc *crtc = dig_port->base.base.crtc;
5471 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5472
5473 if (!intel_crtc->config->has_drrs) {
5474 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5475 return;
5476 }
5477
5478 mutex_lock(&dev_priv->drrs.mutex);
5479 if (WARN_ON(dev_priv->drrs.dp)) {
5480 DRM_ERROR("DRRS already enabled\n");
5481 goto unlock;
5482 }
5483
5484 dev_priv->drrs.busy_frontbuffer_bits = 0;
5485
5486 dev_priv->drrs.dp = intel_dp;
5487
5488unlock:
5489 mutex_unlock(&dev_priv->drrs.mutex);
5490}
5491
Vandana Kannanb33a2812015-02-13 15:33:03 +05305492/**
5493 * intel_edp_drrs_disable - Disable DRRS
5494 * @intel_dp: DP struct
5495 *
5496 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305497void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5498{
5499 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5500 struct drm_i915_private *dev_priv = dev->dev_private;
5501 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5502 struct drm_crtc *crtc = dig_port->base.base.crtc;
5503 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5504
5505 if (!intel_crtc->config->has_drrs)
5506 return;
5507
5508 mutex_lock(&dev_priv->drrs.mutex);
5509 if (!dev_priv->drrs.dp) {
5510 mutex_unlock(&dev_priv->drrs.mutex);
5511 return;
5512 }
5513
5514 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5515 intel_dp_set_drrs_state(dev_priv->dev,
5516 intel_dp->attached_connector->panel.
5517 fixed_mode->vrefresh);
5518
5519 dev_priv->drrs.dp = NULL;
5520 mutex_unlock(&dev_priv->drrs.mutex);
5521
5522 cancel_delayed_work_sync(&dev_priv->drrs.work);
5523}
5524
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305525static void intel_edp_drrs_downclock_work(struct work_struct *work)
5526{
5527 struct drm_i915_private *dev_priv =
5528 container_of(work, typeof(*dev_priv), drrs.work.work);
5529 struct intel_dp *intel_dp;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305530
Vandana Kannan96178ee2015-01-10 02:25:56 +05305531 mutex_lock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305532
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305533 intel_dp = dev_priv->drrs.dp;
5534
5535 if (!intel_dp)
5536 goto unlock;
5537
5538 /*
5539 * The delayed work can race with an invalidate hence we need to
5540 * recheck.
5541 */
5542
5543 if (dev_priv->drrs.busy_frontbuffer_bits)
5544 goto unlock;
5545
5546 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5547 intel_dp_set_drrs_state(dev_priv->dev,
5548 intel_dp->attached_connector->panel.
5549 downclock_mode->vrefresh);
5550
5551unlock:
Vandana Kannan96178ee2015-01-10 02:25:56 +05305552 mutex_unlock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305553}
5554
Vandana Kannanb33a2812015-02-13 15:33:03 +05305555/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305556 * intel_edp_drrs_invalidate - Disable Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305557 * @dev: DRM device
5558 * @frontbuffer_bits: frontbuffer plane tracking bits
5559 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305560 * This function gets called everytime rendering on the given planes start.
5561 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
Vandana Kannanb33a2812015-02-13 15:33:03 +05305562 *
5563 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5564 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305565void intel_edp_drrs_invalidate(struct drm_device *dev,
5566 unsigned frontbuffer_bits)
5567{
5568 struct drm_i915_private *dev_priv = dev->dev_private;
5569 struct drm_crtc *crtc;
5570 enum pipe pipe;
5571
Daniel Vetter9da7d692015-04-09 16:44:15 +02005572 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305573 return;
5574
Daniel Vetter88f933a2015-04-09 16:44:16 +02005575 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305576
Vandana Kannana93fad02015-01-10 02:25:59 +05305577 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005578 if (!dev_priv->drrs.dp) {
5579 mutex_unlock(&dev_priv->drrs.mutex);
5580 return;
5581 }
5582
Vandana Kannana93fad02015-01-10 02:25:59 +05305583 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5584 pipe = to_intel_crtc(crtc)->pipe;
5585
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005586 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5587 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5588
Ramalingam C0ddfd202015-06-15 20:50:05 +05305589 /* invalidate means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005590 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Vandana Kannana93fad02015-01-10 02:25:59 +05305591 intel_dp_set_drrs_state(dev_priv->dev,
5592 dev_priv->drrs.dp->attached_connector->panel.
5593 fixed_mode->vrefresh);
Vandana Kannana93fad02015-01-10 02:25:59 +05305594
Vandana Kannana93fad02015-01-10 02:25:59 +05305595 mutex_unlock(&dev_priv->drrs.mutex);
5596}
5597
Vandana Kannanb33a2812015-02-13 15:33:03 +05305598/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305599 * intel_edp_drrs_flush - Restart Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305600 * @dev: DRM device
5601 * @frontbuffer_bits: frontbuffer plane tracking bits
5602 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305603 * This function gets called every time rendering on the given planes has
5604 * completed or flip on a crtc is completed. So DRRS should be upclocked
5605 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5606 * if no other planes are dirty.
Vandana Kannanb33a2812015-02-13 15:33:03 +05305607 *
5608 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5609 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305610void intel_edp_drrs_flush(struct drm_device *dev,
5611 unsigned frontbuffer_bits)
5612{
5613 struct drm_i915_private *dev_priv = dev->dev_private;
5614 struct drm_crtc *crtc;
5615 enum pipe pipe;
5616
Daniel Vetter9da7d692015-04-09 16:44:15 +02005617 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305618 return;
5619
Daniel Vetter88f933a2015-04-09 16:44:16 +02005620 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305621
Vandana Kannana93fad02015-01-10 02:25:59 +05305622 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005623 if (!dev_priv->drrs.dp) {
5624 mutex_unlock(&dev_priv->drrs.mutex);
5625 return;
5626 }
5627
Vandana Kannana93fad02015-01-10 02:25:59 +05305628 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5629 pipe = to_intel_crtc(crtc)->pipe;
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005630
5631 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Vandana Kannana93fad02015-01-10 02:25:59 +05305632 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5633
Ramalingam C0ddfd202015-06-15 20:50:05 +05305634 /* flush means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005635 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Ramalingam C0ddfd202015-06-15 20:50:05 +05305636 intel_dp_set_drrs_state(dev_priv->dev,
5637 dev_priv->drrs.dp->attached_connector->panel.
5638 fixed_mode->vrefresh);
5639
5640 /*
5641 * flush also means no more activity hence schedule downclock, if all
5642 * other fbs are quiescent too
5643 */
5644 if (!dev_priv->drrs.busy_frontbuffer_bits)
Vandana Kannana93fad02015-01-10 02:25:59 +05305645 schedule_delayed_work(&dev_priv->drrs.work,
5646 msecs_to_jiffies(1000));
5647 mutex_unlock(&dev_priv->drrs.mutex);
5648}
5649
Vandana Kannanb33a2812015-02-13 15:33:03 +05305650/**
5651 * DOC: Display Refresh Rate Switching (DRRS)
5652 *
5653 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5654 * which enables swtching between low and high refresh rates,
5655 * dynamically, based on the usage scenario. This feature is applicable
5656 * for internal panels.
5657 *
5658 * Indication that the panel supports DRRS is given by the panel EDID, which
5659 * would list multiple refresh rates for one resolution.
5660 *
5661 * DRRS is of 2 types - static and seamless.
5662 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5663 * (may appear as a blink on screen) and is used in dock-undock scenario.
5664 * Seamless DRRS involves changing RR without any visual effect to the user
5665 * and can be used during normal system usage. This is done by programming
5666 * certain registers.
5667 *
5668 * Support for static/seamless DRRS may be indicated in the VBT based on
5669 * inputs from the panel spec.
5670 *
5671 * DRRS saves power by switching to low RR based on usage scenarios.
5672 *
5673 * eDP DRRS:-
5674 * The implementation is based on frontbuffer tracking implementation.
5675 * When there is a disturbance on the screen triggered by user activity or a
5676 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5677 * When there is no movement on screen, after a timeout of 1 second, a switch
5678 * to low RR is made.
5679 * For integration with frontbuffer tracking code,
5680 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5681 *
5682 * DRRS can be further extended to support other internal panels and also
5683 * the scenario of video playback wherein RR is set based on the rate
5684 * requested by userspace.
5685 */
5686
5687/**
5688 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5689 * @intel_connector: eDP connector
5690 * @fixed_mode: preferred mode of panel
5691 *
5692 * This function is called only once at driver load to initialize basic
5693 * DRRS stuff.
5694 *
5695 * Returns:
5696 * Downclock mode if panel supports it, else return NULL.
5697 * DRRS support is determined by the presence of downclock mode (apart
5698 * from VBT setting).
5699 */
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305700static struct drm_display_mode *
Vandana Kannan96178ee2015-01-10 02:25:56 +05305701intel_dp_drrs_init(struct intel_connector *intel_connector,
5702 struct drm_display_mode *fixed_mode)
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305703{
5704 struct drm_connector *connector = &intel_connector->base;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305705 struct drm_device *dev = connector->dev;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305706 struct drm_i915_private *dev_priv = dev->dev_private;
5707 struct drm_display_mode *downclock_mode = NULL;
5708
Daniel Vetter9da7d692015-04-09 16:44:15 +02005709 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5710 mutex_init(&dev_priv->drrs.mutex);
5711
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305712 if (INTEL_INFO(dev)->gen <= 6) {
5713 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5714 return NULL;
5715 }
5716
5717 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005718 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305719 return NULL;
5720 }
5721
5722 downclock_mode = intel_find_panel_downclock
5723 (dev, fixed_mode, connector);
5724
5725 if (!downclock_mode) {
Ramalingam Ca1d26342015-02-23 17:38:33 +05305726 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305727 return NULL;
5728 }
5729
Vandana Kannan96178ee2015-01-10 02:25:56 +05305730 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305731
Vandana Kannan96178ee2015-01-10 02:25:56 +05305732 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005733 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305734 return downclock_mode;
5735}
5736
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005737static bool intel_edp_init_connector(struct intel_dp *intel_dp,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005738 struct intel_connector *intel_connector)
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005739{
5740 struct drm_connector *connector = &intel_connector->base;
5741 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005742 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5743 struct drm_device *dev = intel_encoder->base.dev;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005744 struct drm_i915_private *dev_priv = dev->dev_private;
5745 struct drm_display_mode *fixed_mode = NULL;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305746 struct drm_display_mode *downclock_mode = NULL;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005747 bool has_dpcd;
5748 struct drm_display_mode *scan;
5749 struct edid *edid;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005750 enum pipe pipe = INVALID_PIPE;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005751
5752 if (!is_edp(intel_dp))
5753 return true;
5754
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02005755 pps_lock(intel_dp);
5756 intel_edp_panel_vdd_sanitize(intel_dp);
5757 pps_unlock(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005758
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005759 /* Cache DPCD and EDID for edp. */
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005760 has_dpcd = intel_dp_get_dpcd(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005761
5762 if (has_dpcd) {
5763 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5764 dev_priv->no_aux_handshake =
5765 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5766 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5767 } else {
5768 /* if this fails, presume the device is a ghost */
5769 DRM_INFO("failed to retrieve link info, disabling eDP\n");
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005770 return false;
5771 }
5772
5773 /* We now know it's not a ghost, init power sequence regs. */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005774 pps_lock(intel_dp);
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005775 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005776 pps_unlock(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005777
Daniel Vetter060c8772014-03-21 23:22:35 +01005778 mutex_lock(&dev->mode_config.mutex);
Jani Nikula0b998362014-03-14 16:51:17 +02005779 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005780 if (edid) {
5781 if (drm_add_edid_modes(connector, edid)) {
5782 drm_mode_connector_update_edid_property(connector,
5783 edid);
5784 drm_edid_to_eld(connector, edid);
5785 } else {
5786 kfree(edid);
5787 edid = ERR_PTR(-EINVAL);
5788 }
5789 } else {
5790 edid = ERR_PTR(-ENOENT);
5791 }
5792 intel_connector->edid = edid;
5793
5794 /* prefer fixed mode from EDID if available */
5795 list_for_each_entry(scan, &connector->probed_modes, head) {
5796 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5797 fixed_mode = drm_mode_duplicate(dev, scan);
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305798 downclock_mode = intel_dp_drrs_init(
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305799 intel_connector, fixed_mode);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005800 break;
5801 }
5802 }
5803
5804 /* fallback to VBT if available for eDP */
5805 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5806 fixed_mode = drm_mode_duplicate(dev,
5807 dev_priv->vbt.lfp_lvds_vbt_mode);
5808 if (fixed_mode)
5809 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5810 }
Daniel Vetter060c8772014-03-21 23:22:35 +01005811 mutex_unlock(&dev->mode_config.mutex);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005812
Clint Taylor01527b32014-07-07 13:01:46 -07005813 if (IS_VALLEYVIEW(dev)) {
5814 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5815 register_reboot_notifier(&intel_dp->edp_notifier);
Ville Syrjälä6517d272014-11-07 11:16:02 +02005816
5817 /*
5818 * Figure out the current pipe for the initial backlight setup.
5819 * If the current pipe isn't valid, try the PPS pipe, and if that
5820 * fails just assume pipe A.
5821 */
5822 if (IS_CHERRYVIEW(dev))
5823 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5824 else
5825 pipe = PORT_TO_PIPE(intel_dp->DP);
5826
5827 if (pipe != PIPE_A && pipe != PIPE_B)
5828 pipe = intel_dp->pps_pipe;
5829
5830 if (pipe != PIPE_A && pipe != PIPE_B)
5831 pipe = PIPE_A;
5832
5833 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5834 pipe_name(pipe));
Clint Taylor01527b32014-07-07 13:01:46 -07005835 }
5836
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305837 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
Jani Nikula73580fb72014-08-12 17:11:41 +03005838 intel_connector->panel.backlight_power = intel_edp_backlight_power;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005839 intel_panel_setup_backlight(connector, pipe);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005840
5841 return true;
5842}
5843
Paulo Zanoni16c25532013-06-12 17:27:25 -03005844bool
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005845intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5846 struct intel_connector *intel_connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005847{
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005848 struct drm_connector *connector = &intel_connector->base;
5849 struct intel_dp *intel_dp = &intel_dig_port->dp;
5850 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5851 struct drm_device *dev = intel_encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005852 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni174edf12012-10-26 19:05:50 -02005853 enum port port = intel_dig_port->port;
Jani Nikula0b998362014-03-14 16:51:17 +02005854 int type;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005855
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005856 intel_dp->pps_pipe = INVALID_PIPE;
5857
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005858 /* intel_dp vfuncs */
Damien Lespiaub6b5e382014-01-20 16:00:59 +00005859 if (INTEL_INFO(dev)->gen >= 9)
5860 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5861 else if (IS_VALLEYVIEW(dev))
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005862 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5863 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5864 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5865 else if (HAS_PCH_SPLIT(dev))
5866 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5867 else
5868 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5869
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +00005870 if (INTEL_INFO(dev)->gen >= 9)
5871 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5872 else
5873 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
Damien Lespiau153b1102014-01-21 13:37:15 +00005874
Daniel Vetter07679352012-09-06 22:15:42 +02005875 /* Preserve the current hw state. */
5876 intel_dp->DP = I915_READ(intel_dp->output_reg);
Jani Nikuladd06f902012-10-19 14:51:50 +03005877 intel_dp->attached_connector = intel_connector;
Chris Wilson3d3dc142011-02-12 10:33:12 +00005878
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005879 if (intel_dp_is_edp(dev, port))
Gajanan Bhat19c03922012-09-27 19:13:07 +05305880 type = DRM_MODE_CONNECTOR_eDP;
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005881 else
5882 type = DRM_MODE_CONNECTOR_DisplayPort;
Adam Jacksonb3295302010-07-16 14:46:28 -04005883
Imre Deakf7d24902013-05-08 13:14:05 +03005884 /*
5885 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5886 * for DP the encoder type can be set by the caller to
5887 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5888 */
5889 if (type == DRM_MODE_CONNECTOR_eDP)
5890 intel_encoder->type = INTEL_OUTPUT_EDP;
5891
Ville Syrjäläc17ed5b2014-10-16 21:27:27 +03005892 /* eDP only on port B and/or C on vlv/chv */
5893 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5894 port != PORT_B && port != PORT_C))
5895 return false;
5896
Imre Deake7281ea2013-05-08 13:14:08 +03005897 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5898 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5899 port_name(port));
5900
Adam Jacksonb3295302010-07-16 14:46:28 -04005901 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005902 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5903
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005904 connector->interlace_allowed = true;
5905 connector->doublescan_allowed = 0;
Ma Lingf8aed702009-08-24 13:50:24 +08005906
Daniel Vetter66a92782012-07-12 20:08:18 +02005907 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
Daniel Vetter4be73782014-01-17 14:39:48 +01005908 edp_panel_vdd_work);
Zhenyu Wang6251ec02010-01-12 05:38:32 +08005909
Chris Wilsondf0e9242010-09-09 16:20:55 +01005910 intel_connector_attach_encoder(intel_connector, intel_encoder);
Thomas Wood34ea3d32014-05-29 16:57:41 +01005911 drm_connector_register(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005912
Paulo Zanoniaffa9352012-11-23 15:30:39 -02005913 if (HAS_DDI(dev))
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005914 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5915 else
5916 intel_connector->get_hw_state = intel_connector_get_hw_state;
Imre Deak80f65de2014-02-11 17:12:49 +02005917 intel_connector->unregister = intel_dp_connector_unregister;
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005918
Jani Nikula0b998362014-03-14 16:51:17 +02005919 /* Set up the hotplug pin. */
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005920 switch (port) {
5921 case PORT_A:
Egbert Eich1d843f92013-02-25 12:06:49 -05005922 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005923 break;
5924 case PORT_B:
Egbert Eich1d843f92013-02-25 12:06:49 -05005925 intel_encoder->hpd_pin = HPD_PORT_B;
Sonika Jindalcf1d5882015-08-10 10:35:36 +05305926 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
5927 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005928 break;
5929 case PORT_C:
Egbert Eich1d843f92013-02-25 12:06:49 -05005930 intel_encoder->hpd_pin = HPD_PORT_C;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005931 break;
5932 case PORT_D:
Egbert Eich1d843f92013-02-25 12:06:49 -05005933 intel_encoder->hpd_pin = HPD_PORT_D;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005934 break;
5935 default:
Damien Lespiauad1c0b12013-03-07 15:30:28 +00005936 BUG();
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08005937 }
5938
Imre Deakdada1a92014-01-29 13:25:41 +02005939 if (is_edp(intel_dp)) {
Ville Syrjälä773538e82014-09-04 14:54:56 +03005940 pps_lock(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005941 intel_dp_init_panel_power_timestamps(intel_dp);
5942 if (IS_VALLEYVIEW(dev))
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005943 vlv_initial_power_sequencer_setup(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005944 else
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005945 intel_dp_init_panel_power_sequencer(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005946 pps_unlock(intel_dp);
Imre Deakdada1a92014-01-29 13:25:41 +02005947 }
Paulo Zanoni0095e6d2013-12-19 14:29:39 -02005948
Jani Nikula9d1a1032014-03-14 16:51:15 +02005949 intel_dp_aux_init(intel_dp, intel_connector);
Dave Airliec1f05262012-08-30 11:06:18 +10005950
Dave Airlie0e32b392014-05-02 14:02:48 +10005951 /* init MST on ports that can support it */
Jani Nikula0c9b3712015-05-18 17:10:01 +03005952 if (HAS_DP_MST(dev) &&
5953 (port == PORT_B || port == PORT_C || port == PORT_D))
5954 intel_dp_mst_encoder_init(intel_dig_port,
5955 intel_connector->base.base.id);
Dave Airlie0e32b392014-05-02 14:02:48 +10005956
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005957 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10005958 drm_dp_aux_unregister(&intel_dp->aux);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03005959 if (is_edp(intel_dp)) {
5960 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03005961 /*
5962 * vdd might still be enabled do to the delayed vdd off.
5963 * Make sure vdd is actually turned off here.
5964 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005965 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01005966 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005967 pps_unlock(intel_dp);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03005968 }
Thomas Wood34ea3d32014-05-29 16:57:41 +01005969 drm_connector_unregister(connector);
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005970 drm_connector_cleanup(connector);
Paulo Zanoni16c25532013-06-12 17:27:25 -03005971 return false;
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005972 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08005973
Chris Wilsonf6849602010-09-19 09:29:33 +01005974 intel_dp_add_properties(intel_dp, connector);
5975
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005976 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5977 * 0xd. Failure to do so will result in spurious interrupts being
5978 * generated on the port when a cable is not attached.
5979 */
5980 if (IS_G4X(dev) && !IS_GM45(dev)) {
5981 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5982 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5983 }
Paulo Zanoni16c25532013-06-12 17:27:25 -03005984
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005985 i915_debugfs_connector_add(connector);
5986
Paulo Zanoni16c25532013-06-12 17:27:25 -03005987 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005988}
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005989
5990void
5991intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5992{
Dave Airlie13cf5502014-06-18 11:29:35 +10005993 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005994 struct intel_digital_port *intel_dig_port;
5995 struct intel_encoder *intel_encoder;
5996 struct drm_encoder *encoder;
5997 struct intel_connector *intel_connector;
5998
Daniel Vetterb14c5672013-09-19 12:18:32 +02005999 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006000 if (!intel_dig_port)
6001 return;
6002
Ander Conselvan de Oliveira08d9bc92015-04-10 10:59:10 +03006003 intel_connector = intel_connector_alloc();
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006004 if (!intel_connector) {
6005 kfree(intel_dig_port);
6006 return;
6007 }
6008
6009 intel_encoder = &intel_dig_port->base;
6010 encoder = &intel_encoder->base;
6011
6012 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6013 DRM_MODE_ENCODER_TMDS);
6014
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01006015 intel_encoder->compute_config = intel_dp_compute_config;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02006016 intel_encoder->disable = intel_disable_dp;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02006017 intel_encoder->get_hw_state = intel_dp_get_hw_state;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07006018 intel_encoder->get_config = intel_dp_get_config;
Imre Deak07f9cd02014-08-18 14:42:45 +03006019 intel_encoder->suspend = intel_dp_encoder_suspend;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03006020 if (IS_CHERRYVIEW(dev)) {
Ville Syrjälä9197c882014-04-09 13:29:05 +03006021 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03006022 intel_encoder->pre_enable = chv_pre_enable_dp;
6023 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä580d3812014-04-09 13:29:00 +03006024 intel_encoder->post_disable = chv_post_disable_dp;
Ville Syrjäläd6db9952015-07-08 23:45:49 +03006025 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03006026 } else if (IS_VALLEYVIEW(dev)) {
Jani Nikulaecff4f32013-09-06 07:38:29 +03006027 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03006028 intel_encoder->pre_enable = vlv_pre_enable_dp;
6029 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä49277c32014-03-31 18:21:26 +03006030 intel_encoder->post_disable = vlv_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03006031 } else {
Jani Nikulaecff4f32013-09-06 07:38:29 +03006032 intel_encoder->pre_enable = g4x_pre_enable_dp;
6033 intel_encoder->enable = g4x_enable_dp;
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03006034 if (INTEL_INFO(dev)->gen >= 5)
6035 intel_encoder->post_disable = ilk_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03006036 }
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006037
Paulo Zanoni174edf12012-10-26 19:05:50 -02006038 intel_dig_port->port = port;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006039 intel_dig_port->dp.output_reg = output_reg;
6040
Paulo Zanoni00c09d72012-10-26 19:05:52 -02006041 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Ville Syrjälä882ec382014-04-28 14:07:43 +03006042 if (IS_CHERRYVIEW(dev)) {
6043 if (port == PORT_D)
6044 intel_encoder->crtc_mask = 1 << 2;
6045 else
6046 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6047 } else {
6048 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6049 }
Ville Syrjäläbc079e82014-03-03 16:15:28 +02006050 intel_encoder->cloneable = 0;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006051
Dave Airlie13cf5502014-06-18 11:29:35 +10006052 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
Jani Nikula5fcece82015-05-27 15:03:42 +03006053 dev_priv->hotplug.irq_port[port] = intel_dig_port;
Dave Airlie13cf5502014-06-18 11:29:35 +10006054
Paulo Zanoni15b1d172013-06-12 17:27:27 -03006055 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6056 drm_encoder_cleanup(encoder);
6057 kfree(intel_dig_port);
Paulo Zanonib2f246a2013-06-12 17:27:26 -03006058 kfree(intel_connector);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03006059 }
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02006060}
Dave Airlie0e32b392014-05-02 14:02:48 +10006061
6062void intel_dp_mst_suspend(struct drm_device *dev)
6063{
6064 struct drm_i915_private *dev_priv = dev->dev_private;
6065 int i;
6066
6067 /* disable MST */
6068 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006069 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006070 if (!intel_dig_port)
6071 continue;
6072
6073 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6074 if (!intel_dig_port->dp.can_mst)
6075 continue;
6076 if (intel_dig_port->dp.is_mst)
6077 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6078 }
6079 }
6080}
6081
6082void intel_dp_mst_resume(struct drm_device *dev)
6083{
6084 struct drm_i915_private *dev_priv = dev->dev_private;
6085 int i;
6086
6087 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006088 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006089 if (!intel_dig_port)
6090 continue;
6091 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6092 int ret;
6093
6094 if (!intel_dig_port->dp.can_mst)
6095 continue;
6096
6097 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6098 if (ret != 0) {
6099 intel_dp_check_mst_status(&intel_dig_port->dp);
6100 }
6101 }
6102 }
6103}