blob: 019283f439513cb0f1765aab72ba15b028d81b30 [file] [log] [blame]
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040030#include <linux/export.h>
Clint Taylor01527b32014-07-07 13:01:46 -070031#include <linux/notifier.h>
32#include <linux/reboot.h>
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/drmP.h>
Matt Roperc6f95f22015-01-22 16:50:32 -080034#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010035#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070038#include "intel_drv.h"
David Howells760285e2012-10-02 18:01:07 +010039#include <drm/i915_drm.h>
Keith Packarda4fc5ed2009-04-07 16:16:42 -070040#include "i915_drv.h"
Keith Packarda4fc5ed2009-04-07 16:16:42 -070041
Keith Packarda4fc5ed2009-04-07 16:16:42 -070042#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
Todd Previte559be302015-05-04 07:48:20 -070044/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080050struct dp_link_dpll {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030051 int clock;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080052 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030056 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080057 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030058 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080059 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030063 { 162000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080064 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030065 { 270000,
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +080066 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080069static const struct dp_link_dpll vlv_dpll[] = {
Ville Syrjälä840b32b2015-08-11 20:21:46 +030070 { 162000,
Chon Ming Lee58f6e632013-09-25 15:47:51 +080071 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030072 { 270000,
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +080073 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
Chon Ming Leeef9348c2014-04-09 13:28:18 +030076/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
Ville Syrjälä840b32b2015-08-11 20:21:46 +030086 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030087 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030088 { 270000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030089 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
Ville Syrjälä840b32b2015-08-11 20:21:46 +030090 { 540000, /* m2_int = 27, m2_fraction = 0 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +030091 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
Sonika Jindal637a9c62015-05-07 09:52:08 +053093
Sonika Jindal64987fc2015-05-26 17:50:13 +053094static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
Sonika Jindal637a9c62015-05-07 09:52:08 +053096static const int skl_rates[] = { 162000, 216000, 270000,
Ville Syrjäläf4896f12015-03-12 17:10:27 +020097 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
Chon Ming Leeef9348c2014-04-09 13:28:18 +030099
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
Paulo Zanonida63a9f2012-10-26 19:05:46 -0200109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700112}
113
Imre Deak68b4d822013-05-08 13:14:06 +0300114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700115{
Imre Deak68b4d822013-05-08 13:14:06 +0300116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
Jesse Barnescfcb0fc2010-10-07 16:01:06 -0700119}
120
Chris Wilsondf0e9242010-09-09 16:20:55 +0100121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
Paulo Zanonifa90ece2012-10-26 19:05:44 -0200123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Chris Wilsondf0e9242010-09-09 16:20:55 +0100124}
125
Chris Wilsonea5b2132010-08-04 13:50:23 +0100126static void intel_dp_link_down(struct intel_dp *intel_dp);
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +0100128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
Ville Syrjälä093e3f12014-10-16 21:27:33 +0300129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700132
Ville Syrjäläe0fce782015-07-08 23:45:54 +0300133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
Ville Syrjäläed4e9c12015-03-12 17:10:36 +0200138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700140{
Jesse Barnes7183dc22011-07-07 11:10:58 -0700141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
Ville Syrjälä1db10e22015-03-12 17:10:32 +0200146 case DP_LINK_BW_5_4:
Imre Deakd4eead52013-07-09 17:05:26 +0300147 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700148 default:
Imre Deakd4eead52013-07-09 17:05:26 +0300149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
Paulo Zanonieeb63242014-05-06 14:56:50 +0300157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 struct drm_device *dev = intel_dig_port->base.base.dev;
161 u8 source_max, sink_max;
162
163 source_max = 4;
164 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 source_max = 2;
167
168 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170 return min(source_max, sink_max);
171}
172
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400173/*
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
176 *
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178 *
179 * 270000 * 1 * 8 / 10 == 216000
180 *
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
185 *
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
188 */
189
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700190static int
Keith Packardc8982612012-01-25 08:16:25 -0800191intel_dp_link_required(int pixel_clock, int bpp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700192{
Adam Jacksoncd9dde42011-10-14 12:43:49 -0400193 return (pixel_clock * bpp + 9) / 10;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700194}
195
196static int
Dave Airliefe27d532010-06-30 11:46:17 +1000197intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198{
199 return (max_link_clock * max_lanes * 8) / 10;
200}
201
Damien Lespiauc19de8e2013-11-28 15:29:18 +0000202static enum drm_mode_status
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700203intel_dp_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
205{
Chris Wilsondf0e9242010-09-09 16:20:55 +0100206 struct intel_dp *intel_dp = intel_attached_dp(connector);
Jani Nikuladd06f902012-10-19 14:51:50 +0300207 struct intel_connector *intel_connector = to_intel_connector(connector);
208 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
Daniel Vetter36008362013-03-27 00:44:59 +0100209 int target_clock = mode->clock;
210 int max_rate, mode_rate, max_lanes, max_link_clock;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700211
Jani Nikuladd06f902012-10-19 14:51:50 +0300212 if (is_edp(intel_dp) && fixed_mode) {
213 if (mode->hdisplay > fixed_mode->hdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100214 return MODE_PANEL;
215
Jani Nikuladd06f902012-10-19 14:51:50 +0300216 if (mode->vdisplay > fixed_mode->vdisplay)
Zhao Yakui7de56f42010-07-19 09:43:14 +0100217 return MODE_PANEL;
Daniel Vetter03afc4a2013-04-02 23:42:31 +0200218
219 target_clock = fixed_mode->clock;
Zhao Yakui7de56f42010-07-19 09:43:14 +0100220 }
221
Ville Syrjälä50fec212015-03-12 17:10:34 +0200222 max_link_clock = intel_dp_max_link_rate(intel_dp);
Paulo Zanonieeb63242014-05-06 14:56:50 +0300223 max_lanes = intel_dp_max_lane_count(intel_dp);
Daniel Vetter36008362013-03-27 00:44:59 +0100224
225 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 mode_rate = intel_dp_link_required(target_clock, 18);
227
228 if (mode_rate > max_rate)
Daniel Vetterc4867932012-04-10 10:42:36 +0200229 return MODE_CLOCK_HIGH;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700230
231 if (mode->clock < 10000)
232 return MODE_CLOCK_LOW;
233
Daniel Vetter0af78a22012-05-23 11:30:55 +0200234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 return MODE_H_ILLEGAL;
236
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700237 return MODE_OK;
238}
239
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800240uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700241{
242 int i;
243 uint32_t v = 0;
244
245 if (src_bytes > 4)
246 src_bytes = 4;
247 for (i = 0; i < src_bytes; i++)
248 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249 return v;
250}
251
Damien Lespiauc2af70e2015-02-10 19:32:23 +0000252static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700253{
254 int i;
255 if (dst_bytes > 4)
256 dst_bytes = 4;
257 for (i = 0; i < dst_bytes; i++)
258 dst[i] = src >> ((3-i) * 8);
259}
260
Jani Nikulabf13e812013-09-06 07:40:05 +0300261static void
262intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300263 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300264static void
265intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300266 struct intel_dp *intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300267
Ville Syrjälä773538e82014-09-04 14:54:56 +0300268static void pps_lock(struct intel_dp *intel_dp)
269{
270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 struct intel_encoder *encoder = &intel_dig_port->base;
272 struct drm_device *dev = encoder->base.dev;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 enum intel_display_power_domain power_domain;
275
276 /*
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
279 */
280 power_domain = intel_display_port_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain);
282
283 mutex_lock(&dev_priv->pps_mutex);
284}
285
286static void pps_unlock(struct intel_dp *intel_dp)
287{
288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 struct intel_encoder *encoder = &intel_dig_port->base;
290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 enum intel_display_power_domain power_domain;
293
294 mutex_unlock(&dev_priv->pps_mutex);
295
296 power_domain = intel_display_port_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain);
298}
299
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300300static void
301vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302{
303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 struct drm_device *dev = intel_dig_port->base.base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum pipe pipe = intel_dp->pps_pipe;
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300310 uint32_t DP;
311
312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe), port_name(intel_dig_port->port)))
315 return;
316
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe), port_name(intel_dig_port->port));
319
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
322 */
323 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 DP |= DP_PORT_WIDTH(1);
326 DP |= DP_LINK_TRAIN_PAT_1;
327
328 if (IS_CHERRYVIEW(dev))
329 DP |= DP_PIPE_SELECT_CHV(pipe);
330 else if (pipe == PIPE_B)
331 DP |= DP_PIPEB_SELECT;
332
Ville Syrjäläd288f652014-10-28 13:20:22 +0200333 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335 /*
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
338 */
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300339 if (!pll_enabled) {
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
Ville Syrjäläd288f652014-10-28 13:20:22 +0200343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300345 }
Ville Syrjäläd288f652014-10-28 13:20:22 +0200346
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200361
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300362 if (!pll_enabled) {
Ville Syrjäläd288f652014-10-28 13:20:22 +0200363 vlv_force_pll_off(dev, pipe);
Ville Syrjälä0047eed2015-07-10 10:56:24 +0300364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300368}
369
Jani Nikulabf13e812013-09-06 07:40:05 +0300370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300378 enum pipe pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300379
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300380 lockdep_assert_held(&dev_priv->pps_mutex);
381
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300387
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393 base.head) {
394 struct intel_dp *tmp;
395
396 if (encoder->type != INTEL_OUTPUT_EDP)
397 continue;
398
399 tmp = enc_to_intel_dp(&encoder->base);
400
401 if (tmp->pps_pipe != INVALID_PIPE)
402 pipes &= ~(1 << tmp->pps_pipe);
403 }
404
405 /*
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
408 */
409 if (WARN_ON(pipes == 0))
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300410 pipe = PIPE_A;
411 else
412 pipe = ffs(pipes) - 1;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300413
Ville Syrjäläa8c33442014-10-16 21:29:59 +0300414 vlv_steal_power_sequencer(dev, pipe);
415 intel_dp->pps_pipe = pipe;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300416
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp->pps_pipe),
419 port_name(intel_dig_port->port));
420
421 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300422 intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300424
Ville Syrjälä961a0db2014-10-16 21:29:42 +0300425 /*
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
428 */
429 vlv_power_sequencer_kick(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300430
431 return intel_dp->pps_pipe;
432}
433
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300434typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 enum pipe pipe);
436
437static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439{
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441}
442
443static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445{
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447}
448
449static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450 enum pipe pipe)
451{
452 return true;
453}
454
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300455static enum pipe
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300456vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 enum port port,
458 vlv_pipe_check pipe_check)
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300459{
Jani Nikulabf13e812013-09-06 07:40:05 +0300460 enum pipe pipe;
461
Jani Nikulabf13e812013-09-06 07:40:05 +0300462 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 PANEL_PORT_SELECT_MASK;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300465
466 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 continue;
468
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300469 if (!pipe_check(dev_priv, pipe))
470 continue;
471
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300472 return pipe;
Jani Nikulabf13e812013-09-06 07:40:05 +0300473 }
474
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300475 return INVALID_PIPE;
476}
477
478static void
479vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480{
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300484 enum port port = intel_dig_port->port;
485
486 lockdep_assert_held(&dev_priv->pps_mutex);
487
488 /* try to find a pipe with this port selected */
Ville Syrjälä6491ab22014-08-18 22:16:06 +0300489 /* first pick one where the panel is on */
490 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 vlv_pipe_has_pp_on);
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp->pps_pipe == INVALID_PIPE)
494 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 vlv_pipe_has_vdd_on);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp->pps_pipe == INVALID_PIPE)
498 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 vlv_pipe_any);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +0300500
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp->pps_pipe == INVALID_PIPE) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504 port_name(port));
505 return;
506 }
507
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port), pipe_name(intel_dp->pps_pipe));
510
Ville Syrjälä36b5f422014-10-16 21:27:30 +0300511 intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Jani Nikulabf13e812013-09-06 07:40:05 +0300513}
514
Ville Syrjälä773538e82014-09-04 14:54:56 +0300515void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516{
517 struct drm_device *dev = dev_priv->dev;
518 struct intel_encoder *encoder;
519
520 if (WARN_ON(!IS_VALLEYVIEW(dev)))
521 return;
522
523 /*
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
531 */
532
533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 struct intel_dp *intel_dp;
535
536 if (encoder->type != INTEL_OUTPUT_EDP)
537 continue;
538
539 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE;
541 }
Jani Nikulabf13e812013-09-06 07:40:05 +0300542}
543
544static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
545{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554}
555
556static u32 _pp_stat_reg(struct intel_dp *intel_dp)
557{
558 struct drm_device *dev = intel_dp_to_dev(intel_dp);
559
Vandana Kannanb0a08be2015-06-18 11:00:55 +0530560 if (IS_BROXTON(dev))
561 return BXT_PP_STATUS(0);
562 else if (HAS_PCH_SPLIT(dev))
Jani Nikulabf13e812013-09-06 07:40:05 +0300563 return PCH_PP_STATUS;
564 else
565 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
566}
567
Clint Taylor01527b32014-07-07 13:01:46 -0700568/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569 This function only applicable when panel PM state is not to be tracked */
570static int edp_notify_handler(struct notifier_block *this, unsigned long code,
571 void *unused)
572{
573 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
574 edp_notifier);
575 struct drm_device *dev = intel_dp_to_dev(intel_dp);
576 struct drm_i915_private *dev_priv = dev->dev_private;
Clint Taylor01527b32014-07-07 13:01:46 -0700577
578 if (!is_edp(intel_dp) || code != SYS_RESTART)
579 return 0;
580
Ville Syrjälä773538e82014-09-04 14:54:56 +0300581 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300582
Clint Taylor01527b32014-07-07 13:01:46 -0700583 if (IS_VALLEYVIEW(dev)) {
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300584 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
Ville Syrjälä649636e2015-09-22 19:50:01 +0300585 u32 pp_ctrl_reg, pp_div_reg;
586 u32 pp_div;
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300587
Clint Taylor01527b32014-07-07 13:01:46 -0700588 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
589 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
590 pp_div = I915_READ(pp_div_reg);
591 pp_div &= PP_REFERENCE_DIVIDER_MASK;
592
593 /* 0x1F write to PP_DIV_REG sets max cycle delay */
594 I915_WRITE(pp_div_reg, pp_div | 0x1F);
595 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
596 msleep(intel_dp->panel_power_cycle_delay);
597 }
598
Ville Syrjälä773538e82014-09-04 14:54:56 +0300599 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300600
Clint Taylor01527b32014-07-07 13:01:46 -0700601 return 0;
602}
603
Daniel Vetter4be73782014-01-17 14:39:48 +0100604static bool edp_have_panel_power(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700605{
Paulo Zanoni30add222012-10-26 19:05:45 -0200606 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700607 struct drm_i915_private *dev_priv = dev->dev_private;
608
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300609 lockdep_assert_held(&dev_priv->pps_mutex);
610
Ville Syrjälä9a423562014-10-16 21:29:48 +0300611 if (IS_VALLEYVIEW(dev) &&
612 intel_dp->pps_pipe == INVALID_PIPE)
613 return false;
614
Jani Nikulabf13e812013-09-06 07:40:05 +0300615 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
Keith Packardebf33b12011-09-29 15:53:27 -0700616}
617
Daniel Vetter4be73782014-01-17 14:39:48 +0100618static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
Keith Packardebf33b12011-09-29 15:53:27 -0700619{
Paulo Zanoni30add222012-10-26 19:05:45 -0200620 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packardebf33b12011-09-29 15:53:27 -0700621 struct drm_i915_private *dev_priv = dev->dev_private;
622
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300623 lockdep_assert_held(&dev_priv->pps_mutex);
624
Ville Syrjälä9a423562014-10-16 21:29:48 +0300625 if (IS_VALLEYVIEW(dev) &&
626 intel_dp->pps_pipe == INVALID_PIPE)
627 return false;
628
Ville Syrjälä773538e82014-09-04 14:54:56 +0300629 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -0700630}
631
Keith Packard9b984da2011-09-19 13:54:47 -0700632static void
633intel_dp_check_edp(struct intel_dp *intel_dp)
634{
Paulo Zanoni30add222012-10-26 19:05:45 -0200635 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard9b984da2011-09-19 13:54:47 -0700636 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packardebf33b12011-09-29 15:53:27 -0700637
Keith Packard9b984da2011-09-19 13:54:47 -0700638 if (!is_edp(intel_dp))
639 return;
Jesse Barnes453c5422013-03-28 09:55:41 -0700640
Daniel Vetter4be73782014-01-17 14:39:48 +0100641 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
Keith Packard9b984da2011-09-19 13:54:47 -0700642 WARN(1, "eDP powered off while attempting aux channel communication.\n");
643 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
Jani Nikulabf13e812013-09-06 07:40:05 +0300644 I915_READ(_pp_stat_reg(intel_dp)),
645 I915_READ(_pp_ctrl_reg(intel_dp)));
Keith Packard9b984da2011-09-19 13:54:47 -0700646 }
647}
648
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100649static uint32_t
650intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651{
652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653 struct drm_device *dev = intel_dig_port->base.base.dev;
654 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni9ed35ab2013-02-18 19:00:25 -0300655 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100656 uint32_t status;
657 bool done;
658
Daniel Vetteref04f002012-12-01 21:03:59 +0100659#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100660 if (has_aux_irq)
Paulo Zanonib18ac462013-02-18 19:00:24 -0300661 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
Imre Deak35987062013-05-21 20:03:20 +0300662 msecs_to_jiffies_timeout(10));
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100663 else
664 done = wait_for_atomic(C, 10) == 0;
665 if (!done)
666 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
667 has_aux_irq);
668#undef C
669
670 return status;
671}
672
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000673static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
674{
675 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676 struct drm_device *dev = intel_dig_port->base.base.dev;
677
678 /*
679 * The clock divider is based off the hrawclk, and would like to run at
680 * 2MHz. So, take the hrawclk value and divide by 2 and use that
681 */
682 return index ? 0 : intel_hrawclk(dev) / 2;
683}
684
685static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686{
687 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688 struct drm_device *dev = intel_dig_port->base.base.dev;
Ville Syrjälä469d4b22015-03-31 14:11:59 +0300689 struct drm_i915_private *dev_priv = dev->dev_private;
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000690
691 if (index)
692 return 0;
693
694 if (intel_dig_port->port == PORT_A) {
Ville Syrjälä05024da2015-06-03 15:45:08 +0300695 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
696
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000697 } else {
698 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
699 }
700}
701
702static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300703{
704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705 struct drm_device *dev = intel_dig_port->base.base.dev;
706 struct drm_i915_private *dev_priv = dev->dev_private;
707
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000708 if (intel_dig_port->port == PORT_A) {
Chris Wilsonbc866252013-07-21 16:00:03 +0100709 if (index)
710 return 0;
Ville Syrjälä05024da2015-06-03 15:45:08 +0300711 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300712 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
713 /* Workaround for non-ULT HSW */
Chris Wilsonbc866252013-07-21 16:00:03 +0100714 switch (index) {
715 case 0: return 63;
716 case 1: return 72;
717 default: return 0;
718 }
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000719 } else {
Chris Wilsonbc866252013-07-21 16:00:03 +0100720 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
Rodrigo Vivib84a1cf2013-07-11 18:44:57 -0300721 }
722}
723
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000724static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725{
726 return index ? 0 : 100;
727}
728
Damien Lespiaub6b5e382014-01-20 16:00:59 +0000729static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730{
731 /*
732 * SKL doesn't need us to program the AUX clock divider (Hardware will
733 * derive the clock from CDCLK automatically). We still implement the
734 * get_aux_clock_divider vfunc to plug-in into the existing code.
735 */
736 return index ? 0 : 1;
737}
738
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000739static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
740 bool has_aux_irq,
741 int send_bytes,
742 uint32_t aux_clock_divider)
743{
744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745 struct drm_device *dev = intel_dig_port->base.base.dev;
746 uint32_t precharge, timeout;
747
748 if (IS_GEN6(dev))
749 precharge = 3;
750 else
751 precharge = 5;
752
753 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
754 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755 else
756 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
757
758 return DP_AUX_CH_CTL_SEND_BUSY |
Damien Lespiau788d4432014-01-20 15:52:31 +0000759 DP_AUX_CH_CTL_DONE |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000760 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000761 DP_AUX_CH_CTL_TIME_OUT_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000762 timeout |
Damien Lespiau788d4432014-01-20 15:52:31 +0000763 DP_AUX_CH_CTL_RECEIVE_ERROR |
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000764 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
Damien Lespiau788d4432014-01-20 15:52:31 +0000766 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000767}
768
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +0000769static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770 bool has_aux_irq,
771 int send_bytes,
772 uint32_t unused)
773{
774 return DP_AUX_CH_CTL_SEND_BUSY |
775 DP_AUX_CH_CTL_DONE |
776 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_TIME_OUT_1600us |
779 DP_AUX_CH_CTL_RECEIVE_ERROR |
780 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
782}
783
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700784static int
Chris Wilsonea5b2132010-08-04 13:50:23 +0100785intel_dp_aux_ch(struct intel_dp *intel_dp,
Daniel Vetterbd9f74a2014-10-02 09:45:35 +0200786 const uint8_t *send, int send_bytes,
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700787 uint8_t *recv, int recv_size)
788{
Paulo Zanoni174edf12012-10-26 19:05:50 -0200789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700791 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni9ed35ab2013-02-18 19:00:25 -0300792 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700793 uint32_t ch_data = ch_ctl + 4;
Chris Wilsonbc866252013-07-21 16:00:03 +0100794 uint32_t aux_clock_divider;
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100795 int i, ret, recv_bytes;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700796 uint32_t status;
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000797 int try, clock = 0;
Daniel Vetter4e6b7882014-02-07 16:33:20 +0100798 bool has_aux_irq = HAS_AUX_IRQ(dev);
Jani Nikula884f19e2014-03-14 16:51:14 +0200799 bool vdd;
800
Ville Syrjälä773538e82014-09-04 14:54:56 +0300801 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300802
Ville Syrjälä72c35002014-08-18 22:16:00 +0300803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +0300809 vdd = edp_panel_vdd_on(intel_dp);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700816
Keith Packard9b984da2011-09-19 13:54:47 -0700817 intel_dp_check_edp(intel_dp);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +0800818
Paulo Zanonic67a4702013-08-19 13:18:09 -0300819 intel_aux_display_runtime_get(dev_priv);
820
Jesse Barnes11bee432011-08-01 15:02:20 -0700821 /* Try to wait for any previous AUX channel activity */
822 for (try = 0; try < 3; try++) {
Daniel Vetteref04f002012-12-01 21:03:59 +0100823 status = I915_READ_NOTRACE(ch_ctl);
Jesse Barnes11bee432011-08-01 15:02:20 -0700824 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
825 break;
826 msleep(1);
827 }
828
829 if (try == 3) {
Mika Kuoppala02196c72015-08-06 16:48:58 +0300830 static u32 last_status = -1;
831 const u32 status = I915_READ(ch_ctl);
832
833 if (status != last_status) {
834 WARN(1, "dp_aux_ch not started status 0x%08x\n",
835 status);
836 last_status = status;
837 }
838
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100839 ret = -EBUSY;
840 goto out;
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100841 }
842
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300843 /* Only 5 data registers! */
844 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
845 ret = -E2BIG;
846 goto out;
847 }
848
Damien Lespiauec5b01d2014-01-21 13:35:39 +0000849 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
Damien Lespiau153b1102014-01-21 13:37:15 +0000850 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
851 has_aux_irq,
852 send_bytes,
853 aux_clock_divider);
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000854
Chris Wilsonbc866252013-07-21 16:00:03 +0100855 /* Must try at least 3 times according to DP spec */
856 for (try = 0; try < 5; try++) {
857 /* Load the send data into the aux channel data registers */
858 for (i = 0; i < send_bytes; i += 4)
859 I915_WRITE(ch_data + i,
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800860 intel_dp_pack_aux(send + i,
861 send_bytes - i));
Akshay Joshi0206e352011-08-16 15:34:10 -0400862
Chris Wilsonbc866252013-07-21 16:00:03 +0100863 /* Send the command and wait for it to complete */
Damien Lespiau5ed12a12014-01-20 15:52:30 +0000864 I915_WRITE(ch_ctl, send_ctl);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100865
Chris Wilsonbc866252013-07-21 16:00:03 +0100866 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
Akshay Joshi0206e352011-08-16 15:34:10 -0400867
Chris Wilsonbc866252013-07-21 16:00:03 +0100868 /* Clear done status and any errors */
869 I915_WRITE(ch_ctl,
870 status |
871 DP_AUX_CH_CTL_DONE |
872 DP_AUX_CH_CTL_TIME_OUT_ERROR |
873 DP_AUX_CH_CTL_RECEIVE_ERROR);
Adam Jacksond7e96fe2011-07-26 15:39:46 -0400874
Todd Previte74ebf292015-04-15 08:38:41 -0700875 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
Chris Wilsonbc866252013-07-21 16:00:03 +0100876 continue;
Todd Previte74ebf292015-04-15 08:38:41 -0700877
878 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
879 * 400us delay required for errors and timeouts
880 * Timeout errors from the HW already meet this
881 * requirement so skip to next iteration
882 */
883 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
884 usleep_range(400, 500);
885 continue;
886 }
Chris Wilsonbc866252013-07-21 16:00:03 +0100887 if (status & DP_AUX_CH_CTL_DONE)
Jim Bridee058c942015-05-27 10:21:48 -0700888 goto done;
Chris Wilsonbc866252013-07-21 16:00:03 +0100889 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700890 }
891
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700892 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700893 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100894 ret = -EBUSY;
895 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700896 }
897
Jim Bridee058c942015-05-27 10:21:48 -0700898done:
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700899 /* Check for timeout or receive error.
900 * Timeouts occur when the sink is not connected
901 */
Keith Packarda5b3da52009-06-11 22:30:32 -0700902 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700903 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100904 ret = -EIO;
905 goto out;
Keith Packarda5b3da52009-06-11 22:30:32 -0700906 }
Keith Packard1ae8c0a2009-06-28 15:42:17 -0700907
908 /* Timeouts occur when the device isn't connected, so they're
909 * "normal" -- don't fill the kernel log with these */
Keith Packarda5b3da52009-06-11 22:30:32 -0700910 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
Zhao Yakui28c97732009-10-09 11:39:41 +0800911 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100912 ret = -ETIMEDOUT;
913 goto out;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700914 }
915
916 /* Unload any bytes sent back from the other side */
917 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
918 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700919 if (recv_bytes > recv_size)
920 recv_bytes = recv_size;
Akshay Joshi0206e352011-08-16 15:34:10 -0400921
Chris Wilson4f7f7b72010-08-18 18:12:56 +0100922 for (i = 0; i < recv_bytes; i += 4)
Rodrigo Vivia4f12892014-11-14 08:52:27 -0800923 intel_dp_unpack_aux(I915_READ(ch_data + i),
924 recv + i, recv_bytes - i);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700925
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100926 ret = recv_bytes;
927out:
928 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
Paulo Zanonic67a4702013-08-19 13:18:09 -0300929 intel_aux_display_runtime_put(dev_priv);
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100930
Jani Nikula884f19e2014-03-14 16:51:14 +0200931 if (vdd)
932 edp_panel_vdd_off(intel_dp, false);
933
Ville Syrjälä773538e82014-09-04 14:54:56 +0300934 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +0300935
Daniel Vetter9ee32fea2012-12-01 13:53:48 +0100936 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700937}
938
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300939#define BARE_ADDRESS_SIZE 3
940#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
Jani Nikula9d1a1032014-03-14 16:51:15 +0200941static ssize_t
942intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700943{
Jani Nikula9d1a1032014-03-14 16:51:15 +0200944 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
945 uint8_t txbuf[20], rxbuf[20];
946 size_t txsize, rxsize;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700947 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700948
Ville Syrjäläd2d9cbb2015-03-19 11:44:06 +0200949 txbuf[0] = (msg->request << 4) |
950 ((msg->address >> 16) & 0xf);
951 txbuf[1] = (msg->address >> 8) & 0xff;
Jani Nikula9d1a1032014-03-14 16:51:15 +0200952 txbuf[2] = msg->address & 0xff;
953 txbuf[3] = msg->size - 1;
Paulo Zanoni46a5ae92013-09-17 11:14:10 -0300954
Jani Nikula9d1a1032014-03-14 16:51:15 +0200955 switch (msg->request & ~DP_AUX_I2C_MOT) {
956 case DP_AUX_NATIVE_WRITE:
957 case DP_AUX_I2C_WRITE:
Ville Syrjäläc1e741222015-08-27 17:23:27 +0300958 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300959 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200960 rxsize = 2; /* 0 or 1 data bytes */
Jani Nikulaf51a44b2014-02-11 11:52:05 +0200961
Jani Nikula9d1a1032014-03-14 16:51:15 +0200962 if (WARN_ON(txsize > 20))
963 return -E2BIG;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700964
Jani Nikula9d1a1032014-03-14 16:51:15 +0200965 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700966
Jani Nikula9d1a1032014-03-14 16:51:15 +0200967 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
968 if (ret > 0) {
969 msg->reply = rxbuf[0] >> 4;
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700970
Jani Nikulaa1ddefd2015-03-17 17:18:54 +0200971 if (ret > 1) {
972 /* Number of bytes written in a short write. */
973 ret = clamp_t(int, rxbuf[1], 0, msg->size);
974 } else {
975 /* Return payload size. */
976 ret = msg->size;
977 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700978 }
Jani Nikula9d1a1032014-03-14 16:51:15 +0200979 break;
980
981 case DP_AUX_NATIVE_READ:
982 case DP_AUX_I2C_READ:
Jani Nikulaa6c8aff02014-04-07 12:37:25 +0300983 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
Jani Nikula9d1a1032014-03-14 16:51:15 +0200984 rxsize = msg->size + 1;
985
986 if (WARN_ON(rxsize > 20))
987 return -E2BIG;
988
989 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
990 if (ret > 0) {
991 msg->reply = rxbuf[0] >> 4;
992 /*
993 * Assume happy day, and copy the data. The caller is
994 * expected to check msg->reply before touching it.
995 *
996 * Return payload size.
997 */
998 ret--;
999 memcpy(msg->buffer, rxbuf + 1, ret);
1000 }
1001 break;
1002
1003 default:
1004 ret = -EINVAL;
1005 break;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001006 }
Jani Nikulaf51a44b2014-02-11 11:52:05 +02001007
Jani Nikula9d1a1032014-03-14 16:51:15 +02001008 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001009}
1010
Jani Nikula9d1a1032014-03-14 16:51:15 +02001011static void
1012intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001013{
Jani Nikula9d1a1032014-03-14 16:51:15 +02001014 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001015 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikula33ad6622014-03-14 16:51:16 +02001016 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1017 enum port port = intel_dig_port->port;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001018 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
Jani Nikula0b998362014-03-14 16:51:17 +02001019 const char *name = NULL;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001020 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
Dave Airlieab2c0672009-12-04 10:55:24 +10001021 int ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001022
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001023 /* On SKL we don't have Aux for port E so we rely on VBT to set
1024 * a proper alternate aux channel.
1025 */
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001026 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && port == PORT_E) {
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001027 switch (info->alternate_aux_channel) {
1028 case DP_AUX_B:
1029 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1030 break;
1031 case DP_AUX_C:
1032 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1033 break;
1034 case DP_AUX_D:
1035 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1036 break;
1037 case DP_AUX_A:
1038 default:
1039 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1040 }
1041 }
1042
Jani Nikula33ad6622014-03-14 16:51:16 +02001043 switch (port) {
1044 case PORT_A:
1045 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001046 name = "DPDDC-A";
Dave Airlieab2c0672009-12-04 10:55:24 +10001047 break;
Jani Nikula33ad6622014-03-14 16:51:16 +02001048 case PORT_B:
1049 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001050 name = "DPDDC-B";
Jani Nikula33ad6622014-03-14 16:51:16 +02001051 break;
1052 case PORT_C:
1053 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001054 name = "DPDDC-C";
Jani Nikula33ad6622014-03-14 16:51:16 +02001055 break;
1056 case PORT_D:
1057 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
Jani Nikula0b998362014-03-14 16:51:17 +02001058 name = "DPDDC-D";
Dave Airlieab2c0672009-12-04 10:55:24 +10001059 break;
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001060 case PORT_E:
1061 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1062 name = "DPDDC-E";
1063 break;
Dave Airlieab2c0672009-12-04 10:55:24 +10001064 default:
Jani Nikula33ad6622014-03-14 16:51:16 +02001065 BUG();
Dave Airlieab2c0672009-12-04 10:55:24 +10001066 }
1067
Damien Lespiau1b1aad72013-12-03 13:56:29 +00001068 /*
1069 * The AUX_CTL register is usually DP_CTL + 0x10.
1070 *
1071 * On Haswell and Broadwell though:
1072 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1073 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1074 *
1075 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1076 */
Rodrigo Vivi500ea702015-08-07 17:01:16 -07001077 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
Jani Nikula33ad6622014-03-14 16:51:16 +02001078 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
David Flynn8316f332010-12-08 16:10:21 +00001079
Jani Nikula0b998362014-03-14 16:51:17 +02001080 intel_dp->aux.name = name;
Jani Nikula9d1a1032014-03-14 16:51:15 +02001081 intel_dp->aux.dev = dev->dev;
1082 intel_dp->aux.transfer = intel_dp_aux_transfer;
David Flynn8316f332010-12-08 16:10:21 +00001083
Jani Nikula0b998362014-03-14 16:51:17 +02001084 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1085 connector->base.kdev->kobj.name);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001086
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001087 ret = drm_dp_aux_register(&intel_dp->aux);
Jani Nikula0b998362014-03-14 16:51:17 +02001088 if (ret < 0) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001089 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
Jani Nikula0b998362014-03-14 16:51:17 +02001090 name, ret);
1091 return;
Dave Airlieab2c0672009-12-04 10:55:24 +10001092 }
David Flynn8316f332010-12-08 16:10:21 +00001093
Jani Nikula0b998362014-03-14 16:51:17 +02001094 ret = sysfs_create_link(&connector->base.kdev->kobj,
1095 &intel_dp->aux.ddc.dev.kobj,
1096 intel_dp->aux.ddc.dev.kobj.name);
1097 if (ret < 0) {
1098 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
Dave Airlie4f71d0c2014-06-04 16:02:28 +10001099 drm_dp_aux_unregister(&intel_dp->aux);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001100 }
1101}
1102
Imre Deak80f65de2014-02-11 17:12:49 +02001103static void
1104intel_dp_connector_unregister(struct intel_connector *intel_connector)
1105{
1106 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1107
Dave Airlie0e32b392014-05-02 14:02:48 +10001108 if (!intel_connector->mst_port)
1109 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1110 intel_dp->aux.ddc.dev.kobj.name);
Imre Deak80f65de2014-02-11 17:12:49 +02001111 intel_connector_unregister(intel_connector);
1112}
1113
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001114static void
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001115skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
Damien Lespiau5416d872014-11-14 17:24:33 +00001116{
1117 u32 ctrl1;
1118
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03001119 memset(&pipe_config->dpll_hw_state, 0,
1120 sizeof(pipe_config->dpll_hw_state));
1121
Damien Lespiau5416d872014-11-14 17:24:33 +00001122 pipe_config->ddi_pll_sel = SKL_DPLL0;
1123 pipe_config->dpll_hw_state.cfgcr1 = 0;
1124 pipe_config->dpll_hw_state.cfgcr2 = 0;
1125
1126 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001127 switch (pipe_config->port_clock / 2) {
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301128 case 81000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001129 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
Damien Lespiau5416d872014-11-14 17:24:33 +00001130 SKL_DPLL0);
1131 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301132 case 135000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001133 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
Damien Lespiau5416d872014-11-14 17:24:33 +00001134 SKL_DPLL0);
1135 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301136 case 270000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001137 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
Damien Lespiau5416d872014-11-14 17:24:33 +00001138 SKL_DPLL0);
1139 break;
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301140 case 162000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001141 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301142 SKL_DPLL0);
1143 break;
1144 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1145 results in CDCLK change. Need to handle the change of CDCLK by
1146 disabling pipes and re-enabling them */
1147 case 108000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001148 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301149 SKL_DPLL0);
1150 break;
1151 case 216000:
Damien Lespiau71cd8422015-04-30 16:39:17 +01001152 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
Sonika Jindalc3346ef2015-02-21 11:12:13 +05301153 SKL_DPLL0);
1154 break;
1155
Damien Lespiau5416d872014-11-14 17:24:33 +00001156 }
1157 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1158}
1159
Ander Conselvan de Oliveira6fa2d192015-08-31 11:23:28 +03001160void
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001161hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
Daniel Vetter0e503382014-07-04 11:26:04 -03001162{
Ander Conselvan de Oliveiraee46f3c72015-06-30 16:10:38 +03001163 memset(&pipe_config->dpll_hw_state, 0,
1164 sizeof(pipe_config->dpll_hw_state));
1165
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001166 switch (pipe_config->port_clock / 2) {
1167 case 81000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001168 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1169 break;
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001170 case 135000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001171 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1172 break;
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001173 case 270000:
Daniel Vetter0e503382014-07-04 11:26:04 -03001174 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1175 break;
1176 }
1177}
1178
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301179static int
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001180intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301181{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001182 if (intel_dp->num_sink_rates) {
1183 *sink_rates = intel_dp->sink_rates;
1184 return intel_dp->num_sink_rates;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301185 }
Ville Syrjälä12f6a2e2015-03-12 17:10:30 +02001186
1187 *sink_rates = default_rates;
1188
1189 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05301190}
1191
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001192bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301193{
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001194 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1195 struct drm_device *dev = dig_port->base.base.dev;
1196
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301197 /* WaDisableHBR2:skl */
Jani Nikulae87a0052015-10-20 15:22:02 +03001198 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301199 return false;
1200
1201 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1202 (INTEL_INFO(dev)->gen >= 9))
1203 return true;
1204 else
1205 return false;
1206}
1207
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301208static int
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001209intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301210{
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001211 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1212 struct drm_device *dev = dig_port->base.base.dev;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301213 int size;
1214
Sonika Jindal64987fc2015-05-26 17:50:13 +05301215 if (IS_BROXTON(dev)) {
1216 *source_rates = bxt_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301217 size = ARRAY_SIZE(bxt_rates);
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001218 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
Sonika Jindal637a9c62015-05-07 09:52:08 +05301219 *source_rates = skl_rates;
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301220 size = ARRAY_SIZE(skl_rates);
1221 } else {
1222 *source_rates = default_rates;
1223 size = ARRAY_SIZE(default_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301224 }
Ville Syrjälä636280b2015-03-12 17:10:29 +02001225
Thulasimani,Sivakumared63baa2015-08-18 15:30:37 +05301226 /* This depends on the fact that 5.4 is last value in the array */
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001227 if (!intel_dp_source_supports_hbr2(intel_dp))
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301228 size--;
Ville Syrjälä636280b2015-03-12 17:10:29 +02001229
Thulasimani,Sivakumaraf7080f2015-08-18 11:07:59 +05301230 return size;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301231}
1232
Daniel Vetter0e503382014-07-04 11:26:04 -03001233static void
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001234intel_dp_set_clock(struct intel_encoder *encoder,
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001235 struct intel_crtc_state *pipe_config)
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001236{
1237 struct drm_device *dev = encoder->base.dev;
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001238 const struct dp_link_dpll *divisor = NULL;
1239 int i, count = 0;
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001240
1241 if (IS_G4X(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001242 divisor = gen4_dpll;
1243 count = ARRAY_SIZE(gen4_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001244 } else if (HAS_PCH_SPLIT(dev)) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001245 divisor = pch_dpll;
1246 count = ARRAY_SIZE(pch_dpll);
Chon Ming Leeef9348c2014-04-09 13:28:18 +03001247 } else if (IS_CHERRYVIEW(dev)) {
1248 divisor = chv_dpll;
1249 count = ARRAY_SIZE(chv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001250 } else if (IS_VALLEYVIEW(dev)) {
Chon Ming Lee65ce4bf2013-09-04 01:30:38 +08001251 divisor = vlv_dpll;
1252 count = ARRAY_SIZE(vlv_dpll);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001253 }
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001254
1255 if (divisor && count) {
1256 for (i = 0; i < count; i++) {
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001257 if (pipe_config->port_clock == divisor[i].clock) {
Chon Ming Lee9dd4ffd2013-09-04 01:30:37 +08001258 pipe_config->dpll = divisor[i].dpll;
1259 pipe_config->clock_set = true;
1260 break;
1261 }
1262 }
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001263 }
1264}
1265
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001266static int intersect_rates(const int *source_rates, int source_len,
1267 const int *sink_rates, int sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001268 int *common_rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301269{
1270 int i = 0, j = 0, k = 0;
1271
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301272 while (i < source_len && j < sink_len) {
1273 if (source_rates[i] == sink_rates[j]) {
Ville Syrjäläe6bda3e2015-03-12 17:10:37 +02001274 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1275 return k;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001276 common_rates[k] = source_rates[i];
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301277 ++k;
1278 ++i;
1279 ++j;
1280 } else if (source_rates[i] < sink_rates[j]) {
1281 ++i;
1282 } else {
1283 ++j;
1284 }
1285 }
1286 return k;
1287}
1288
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001289static int intel_dp_common_rates(struct intel_dp *intel_dp,
1290 int *common_rates)
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001291{
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001292 const int *source_rates, *sink_rates;
1293 int source_len, sink_len;
1294
1295 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001296 source_len = intel_dp_source_rates(intel_dp, &source_rates);
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001297
1298 return intersect_rates(source_rates, source_len,
1299 sink_rates, sink_len,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001300 common_rates);
Ville Syrjälä2ecae762015-03-12 17:10:33 +02001301}
1302
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001303static void snprintf_int_array(char *str, size_t len,
1304 const int *array, int nelem)
1305{
1306 int i;
1307
1308 str[0] = '\0';
1309
1310 for (i = 0; i < nelem; i++) {
Jani Nikulab2f505b2015-05-18 16:01:45 +03001311 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001312 if (r >= len)
1313 return;
1314 str += r;
1315 len -= r;
1316 }
1317}
1318
1319static void intel_dp_print_rates(struct intel_dp *intel_dp)
1320{
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001321 const int *source_rates, *sink_rates;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001322 int source_len, sink_len, common_len;
1323 int common_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001324 char str[128]; /* FIXME: too big for stack? */
1325
1326 if ((drm_debug & DRM_UT_KMS) == 0)
1327 return;
1328
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03001329 source_len = intel_dp_source_rates(intel_dp, &source_rates);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001330 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1331 DRM_DEBUG_KMS("source rates: %s\n", str);
1332
1333 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1334 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1335 DRM_DEBUG_KMS("sink rates: %s\n", str);
1336
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001337 common_len = intel_dp_common_rates(intel_dp, common_rates);
1338 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1339 DRM_DEBUG_KMS("common rates: %s\n", str);
Ville Syrjälä0336400e2015-03-12 17:10:39 +02001340}
1341
Ville Syrjäläf4896f12015-03-12 17:10:27 +02001342static int rate_to_index(int find, const int *rates)
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301343{
1344 int i = 0;
1345
1346 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1347 if (find == rates[i])
1348 break;
1349
1350 return i;
1351}
1352
Ville Syrjälä50fec212015-03-12 17:10:34 +02001353int
1354intel_dp_max_link_rate(struct intel_dp *intel_dp)
1355{
1356 int rates[DP_MAX_SUPPORTED_RATES] = {};
1357 int len;
1358
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001359 len = intel_dp_common_rates(intel_dp, rates);
Ville Syrjälä50fec212015-03-12 17:10:34 +02001360 if (WARN_ON(len <= 0))
1361 return 162000;
1362
1363 return rates[rate_to_index(0, rates) - 1];
1364}
1365
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001366int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1367{
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001368 return rate_to_index(rate, intel_dp->sink_rates);
Ville Syrjäläed4e9c12015-03-12 17:10:36 +02001369}
1370
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03001371void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1372 uint8_t *link_bw, uint8_t *rate_select)
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001373{
1374 if (intel_dp->num_sink_rates) {
1375 *link_bw = 0;
1376 *rate_select =
1377 intel_dp_rate_select(intel_dp, port_clock);
1378 } else {
1379 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1380 *rate_select = 0;
1381 }
1382}
1383
Paulo Zanoni00c09d72012-10-26 19:05:52 -02001384bool
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001385intel_dp_compute_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02001386 struct intel_crtc_state *pipe_config)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001387{
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001388 struct drm_device *dev = encoder->base.dev;
Daniel Vetter36008362013-03-27 00:44:59 +01001389 struct drm_i915_private *dev_priv = dev->dev_private;
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02001390 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001391 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001392 enum port port = dp_to_dig_port(intel_dp)->port;
Ander Conselvan de Oliveira84556d52015-03-20 16:18:10 +02001393 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
Jani Nikuladd06f902012-10-19 14:51:50 +03001394 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001395 int lane_count, clock;
Jani Nikula56071a22014-05-06 14:56:52 +03001396 int min_lane_count = 1;
Paulo Zanonieeb63242014-05-06 14:56:50 +03001397 int max_lane_count = intel_dp_max_lane_count(intel_dp);
Todd Previte06ea66b2014-01-20 10:19:39 -07001398 /* Conveniently, the link BW constants become indices with a shift...*/
Jani Nikula56071a22014-05-06 14:56:52 +03001399 int min_clock = 0;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301400 int max_clock;
Daniel Vetter083f9562012-04-20 20:23:49 +02001401 int bpp, mode_rate;
Daniel Vetterff9a6752013-06-01 17:16:21 +02001402 int link_avail, link_clock;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001403 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1404 int common_len;
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001405 uint8_t link_bw, rate_select;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301406
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001407 common_len = intel_dp_common_rates(intel_dp, common_rates);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301408
1409 /* No common link rates between source and sink */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001410 WARN_ON(common_len <= 0);
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301411
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001412 max_clock = common_len - 1;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001413
Imre Deakbc7d38a2013-05-16 14:40:36 +03001414 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01001415 pipe_config->has_pch_encoder = true;
1416
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001417 pipe_config->has_dp_encoder = true;
Vandana Kannanf769cd22014-08-05 07:51:22 -07001418 pipe_config->has_drrs = false;
Jani Nikula9fcb1702015-05-05 16:32:12 +03001419 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001420
Jani Nikuladd06f902012-10-19 14:51:50 +03001421 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1422 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1423 adjusted_mode);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001424
1425 if (INTEL_INFO(dev)->gen >= 9) {
1426 int ret;
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02001427 ret = skl_update_scaler_crtc(pipe_config);
Chandra Kondurua1b22782015-04-07 15:28:45 -07001428 if (ret)
1429 return ret;
1430 }
1431
Matt Roperb56676272015-11-04 09:05:27 -08001432 if (HAS_GMCH_DISPLAY(dev))
Jesse Barnes2dd24552013-04-25 12:55:01 -07001433 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1434 intel_connector->panel.fitting_mode);
1435 else
Jesse Barnesb074cec2013-04-25 12:55:02 -07001436 intel_pch_panel_fitting(intel_crtc, pipe_config,
1437 intel_connector->panel.fitting_mode);
Zhao Yakui0d3a1be2010-07-19 09:43:13 +01001438 }
1439
Daniel Vettercb1793c2012-06-04 18:39:21 +02001440 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
Daniel Vetter0af78a22012-05-23 11:30:55 +02001441 return false;
1442
Daniel Vetter083f9562012-04-20 20:23:49 +02001443 DRM_DEBUG_KMS("DP link computation with max lane count %i "
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301444 "max bw %d pixel clock %iKHz\n",
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001445 max_lane_count, common_rates[max_clock],
Damien Lespiau241bfc32013-09-25 16:45:37 +01001446 adjusted_mode->crtc_clock);
Daniel Vetter083f9562012-04-20 20:23:49 +02001447
Daniel Vetter36008362013-03-27 00:44:59 +01001448 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1449 * bpc in between. */
Daniel Vetter3e7ca982013-06-01 19:45:56 +02001450 bpp = pipe_config->pipe_bpp;
Jani Nikula56071a22014-05-06 14:56:52 +03001451 if (is_edp(intel_dp)) {
Thulasimani,Sivakumar22ce5622015-07-31 11:05:27 +05301452
1453 /* Get bpp from vbt only for panels that dont have bpp in edid */
1454 if (intel_connector->base.display_info.bpc == 0 &&
1455 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
Jani Nikula56071a22014-05-06 14:56:52 +03001456 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1457 dev_priv->vbt.edp_bpp);
1458 bpp = dev_priv->vbt.edp_bpp;
1459 }
1460
Jani Nikula344c5bb2014-09-09 11:25:13 +03001461 /*
1462 * Use the maximum clock and number of lanes the eDP panel
1463 * advertizes being capable of. The panels are generally
1464 * designed to support only a single clock and lane
1465 * configuration, and typically these values correspond to the
1466 * native resolution of the panel.
1467 */
1468 min_lane_count = max_lane_count;
1469 min_clock = max_clock;
Imre Deak79842112013-07-18 17:44:13 +03001470 }
Daniel Vetter657445f2013-05-04 10:09:18 +02001471
Daniel Vetter36008362013-03-27 00:44:59 +01001472 for (; bpp >= 6*3; bpp -= 2*3) {
Damien Lespiau241bfc32013-09-25 16:45:37 +01001473 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1474 bpp);
Daniel Vetterc4867932012-04-10 10:42:36 +02001475
Dave Airliec6930992014-07-14 11:04:39 +10001476 for (clock = min_clock; clock <= max_clock; clock++) {
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301477 for (lane_count = min_lane_count;
1478 lane_count <= max_lane_count;
1479 lane_count <<= 1) {
1480
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001481 link_clock = common_rates[clock];
Daniel Vetter36008362013-03-27 00:44:59 +01001482 link_avail = intel_dp_max_data_rate(link_clock,
1483 lane_count);
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02001484
Daniel Vetter36008362013-03-27 00:44:59 +01001485 if (mode_rate <= link_avail) {
1486 goto found;
1487 }
1488 }
1489 }
1490 }
1491
1492 return false;
1493
1494found:
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001495 if (intel_dp->color_range_auto) {
1496 /*
1497 * See:
1498 * CEA-861-E - 5.1 Default Encoding Parameters
1499 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1500 */
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001501 pipe_config->limited_color_range =
1502 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1503 } else {
1504 pipe_config->limited_color_range =
1505 intel_dp->limited_color_range;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02001506 }
1507
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03001508 pipe_config->lane_count = lane_count;
Sonika Jindala8f3ef62015-03-05 10:02:30 +05301509
Daniel Vetter657445f2013-05-04 10:09:18 +02001510 pipe_config->pipe_bpp = bpp;
Ville Syrjälä94ca7192015-03-13 19:40:31 +02001511 pipe_config->port_clock = common_rates[clock];
Daniel Vetterc4867932012-04-10 10:42:36 +02001512
Ville Syrjälä04a60f92015-07-06 15:10:06 +03001513 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1514 &link_bw, &rate_select);
1515
1516 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1517 link_bw, rate_select, pipe_config->lane_count,
Daniel Vetterff9a6752013-06-01 17:16:21 +02001518 pipe_config->port_clock, bpp);
Daniel Vetter36008362013-03-27 00:44:59 +01001519 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1520 mode_rate, link_avail);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001521
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001522 intel_link_compute_m_n(bpp, lane_count,
Damien Lespiau241bfc32013-09-25 16:45:37 +01001523 adjusted_mode->crtc_clock,
1524 pipe_config->port_clock,
Daniel Vetter03afc4a2013-04-02 23:42:31 +02001525 &pipe_config->dp_m_n);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001526
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301527 if (intel_connector->panel.downclock_mode != NULL &&
Vandana Kannan96178ee2015-01-10 02:25:56 +05301528 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
Vandana Kannanf769cd22014-08-05 07:51:22 -07001529 pipe_config->has_drrs = true;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05301530 intel_link_compute_m_n(bpp, lane_count,
1531 intel_connector->panel.downclock_mode->clock,
1532 pipe_config->port_clock,
1533 &pipe_config->dp_m2_n2);
1534 }
1535
Rodrigo Vivief11bdb2015-10-28 04:16:45 -07001536 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001537 skl_edp_set_pll_config(pipe_config);
Satheeshakrishna M977bb382014-08-22 09:49:12 +05301538 else if (IS_BROXTON(dev))
1539 /* handled in ddi */;
Damien Lespiau5416d872014-11-14 17:24:33 +00001540 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001541 hsw_dp_set_ddi_pll_sel(pipe_config);
Daniel Vetter0e503382014-07-04 11:26:04 -03001542 else
Ville Syrjälä840b32b2015-08-11 20:21:46 +03001543 intel_dp_set_clock(encoder, pipe_config);
Daniel Vetterc6bb3532013-04-19 11:14:33 +02001544
Daniel Vetter36008362013-03-27 00:44:59 +01001545 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001546}
1547
Daniel Vetter7c62a162013-06-01 17:16:20 +02001548static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
Daniel Vetterea9b6002012-11-29 15:59:31 +01001549{
Daniel Vetter7c62a162013-06-01 17:16:20 +02001550 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1551 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1552 struct drm_device *dev = crtc->base.dev;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001553 struct drm_i915_private *dev_priv = dev->dev_private;
1554 u32 dpa_ctl;
1555
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001556 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1557 crtc->config->port_clock);
Daniel Vetterea9b6002012-11-29 15:59:31 +01001558 dpa_ctl = I915_READ(DP_A);
1559 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1560
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001561 if (crtc->config->port_clock == 162000) {
Ville Syrjäläb377e0d2015-10-29 21:25:59 +02001562 dpa_ctl |= DP_PLL_FREQ_162MHZ;
1563 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001564 } else {
1565 dpa_ctl |= DP_PLL_FREQ_270MHZ;
Daniel Vetter7c62a162013-06-01 17:16:20 +02001566 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
Daniel Vetterea9b6002012-11-29 15:59:31 +01001567 }
Daniel Vetter1ce17032012-11-29 15:59:32 +01001568
Daniel Vetterea9b6002012-11-29 15:59:31 +01001569 I915_WRITE(DP_A, dpa_ctl);
1570
1571 POSTING_READ(DP_A);
1572 udelay(500);
1573}
1574
Ville Syrjälä901c2da2015-08-17 18:05:12 +03001575void intel_dp_set_link_params(struct intel_dp *intel_dp,
1576 const struct intel_crtc_state *pipe_config)
1577{
1578 intel_dp->link_rate = pipe_config->port_clock;
1579 intel_dp->lane_count = pipe_config->lane_count;
1580}
1581
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02001582static void intel_dp_prepare(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001583{
Daniel Vetterb934223d2013-07-21 21:37:05 +02001584 struct drm_device *dev = encoder->base.dev;
Keith Packard417e8222011-11-01 19:54:11 -07001585 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001586 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03001587 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetterb934223d2013-07-21 21:37:05 +02001588 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03001589 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001590
Ville Syrjälä901c2da2015-08-17 18:05:12 +03001591 intel_dp_set_link_params(intel_dp, crtc->config);
1592
Keith Packard417e8222011-11-01 19:54:11 -07001593 /*
Keith Packard1a2eb462011-11-16 16:26:07 -08001594 * There are four kinds of DP registers:
Keith Packard417e8222011-11-01 19:54:11 -07001595 *
1596 * IBX PCH
Keith Packard1a2eb462011-11-16 16:26:07 -08001597 * SNB CPU
1598 * IVB CPU
Keith Packard417e8222011-11-01 19:54:11 -07001599 * CPT PCH
1600 *
1601 * IBX PCH and CPU are the same for almost everything,
1602 * except that the CPU DP PLL is configured in this
1603 * register
1604 *
1605 * CPT PCH is quite different, having many bits moved
1606 * to the TRANS_DP_CTL register instead. That
1607 * configuration happens (oddly) in ironlake_pch_enable
1608 */
Adam Jackson9c9e7922010-04-05 17:57:59 -04001609
Keith Packard417e8222011-11-01 19:54:11 -07001610 /* Preserve the BIOS-computed detected bit. This is
1611 * supposed to be read-only.
1612 */
1613 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001614
Keith Packard417e8222011-11-01 19:54:11 -07001615 /* Handle DP bits in common between all three register formats */
Keith Packard417e8222011-11-01 19:54:11 -07001616 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03001617 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001618
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02001619 if (crtc->config->has_audio)
Chris Wilsonea5b2132010-08-04 13:50:23 +01001620 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
Paulo Zanoni247d89f2012-10-15 15:51:33 -03001621
Keith Packard417e8222011-11-01 19:54:11 -07001622 /* Split out the IBX/CPU vs CPT settings */
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001623
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001624 if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08001625 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1626 intel_dp->DP |= DP_SYNC_HS_HIGH;
1627 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1628 intel_dp->DP |= DP_SYNC_VS_HIGH;
1629 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1630
Jani Nikula6aba5b62013-10-04 15:08:10 +03001631 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard1a2eb462011-11-16 16:26:07 -08001632 intel_dp->DP |= DP_ENHANCED_FRAMING;
1633
Daniel Vetter7c62a162013-06-01 17:16:20 +02001634 intel_dp->DP |= crtc->pipe << 29;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001635 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001636 u32 trans_dp;
1637
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001638 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03001639
1640 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1641 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1642 trans_dp |= TRANS_DP_ENH_FRAMING;
1643 else
1644 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1645 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001646 } else {
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03001647 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1648 crtc->config->limited_color_range)
1649 intel_dp->DP |= DP_COLOR_RANGE_16_235;
Keith Packard417e8222011-11-01 19:54:11 -07001650
1651 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1652 intel_dp->DP |= DP_SYNC_HS_HIGH;
1653 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1654 intel_dp->DP |= DP_SYNC_VS_HIGH;
1655 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1656
Jani Nikula6aba5b62013-10-04 15:08:10 +03001657 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
Keith Packard417e8222011-11-01 19:54:11 -07001658 intel_dp->DP |= DP_ENHANCED_FRAMING;
1659
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001660 if (IS_CHERRYVIEW(dev))
Chon Ming Lee44f37d12014-04-09 13:28:21 +03001661 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03001662 else if (crtc->pipe == PIPE_B)
1663 intel_dp->DP |= DP_PIPEB_SELECT;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08001664 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07001665}
1666
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001667#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1668#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001669
Paulo Zanoni1a5ef5b2013-12-19 14:29:43 -02001670#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1671#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
Keith Packard99ea7122011-11-01 19:57:50 -07001672
Paulo Zanoniffd6749d2013-12-19 14:29:42 -02001673#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1674#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
Keith Packard99ea7122011-11-01 19:57:50 -07001675
Daniel Vetter4be73782014-01-17 14:39:48 +01001676static void wait_panel_status(struct intel_dp *intel_dp,
Keith Packard99ea7122011-11-01 19:57:50 -07001677 u32 mask,
1678 u32 value)
1679{
Paulo Zanoni30add222012-10-26 19:05:45 -02001680 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001681 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07001682 u32 pp_stat_reg, pp_ctrl_reg;
1683
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001684 lockdep_assert_held(&dev_priv->pps_mutex);
1685
Jani Nikulabf13e812013-09-06 07:40:05 +03001686 pp_stat_reg = _pp_stat_reg(intel_dp);
1687 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001688
1689 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001690 mask, value,
1691 I915_READ(pp_stat_reg),
1692 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001693
Jesse Barnes453c5422013-03-28 09:55:41 -07001694 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
Keith Packard99ea7122011-11-01 19:57:50 -07001695 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07001696 I915_READ(pp_stat_reg),
1697 I915_READ(pp_ctrl_reg));
Keith Packard99ea7122011-11-01 19:57:50 -07001698 }
Chris Wilson54c136d2013-12-02 09:57:16 +00001699
1700 DRM_DEBUG_KMS("Wait complete\n");
Keith Packard99ea7122011-11-01 19:57:50 -07001701}
1702
Daniel Vetter4be73782014-01-17 14:39:48 +01001703static void wait_panel_on(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001704{
1705 DRM_DEBUG_KMS("Wait for panel power on\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001706 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001707}
1708
Daniel Vetter4be73782014-01-17 14:39:48 +01001709static void wait_panel_off(struct intel_dp *intel_dp)
Keith Packardbd943152011-09-18 23:09:52 -07001710{
Keith Packardbd943152011-09-18 23:09:52 -07001711 DRM_DEBUG_KMS("Wait for panel power off time\n");
Daniel Vetter4be73782014-01-17 14:39:48 +01001712 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
Keith Packardbd943152011-09-18 23:09:52 -07001713}
Keith Packardbd943152011-09-18 23:09:52 -07001714
Daniel Vetter4be73782014-01-17 14:39:48 +01001715static void wait_panel_power_cycle(struct intel_dp *intel_dp)
Keith Packard99ea7122011-11-01 19:57:50 -07001716{
1717 DRM_DEBUG_KMS("Wait for panel power cycle\n");
Paulo Zanonidce56b32013-12-19 14:29:40 -02001718
1719 /* When we disable the VDD override bit last we have to do the manual
1720 * wait. */
1721 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1722 intel_dp->panel_power_cycle_delay);
1723
Daniel Vetter4be73782014-01-17 14:39:48 +01001724 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
Keith Packard99ea7122011-11-01 19:57:50 -07001725}
Keith Packardbd943152011-09-18 23:09:52 -07001726
Daniel Vetter4be73782014-01-17 14:39:48 +01001727static void wait_backlight_on(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001728{
1729 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1730 intel_dp->backlight_on_delay);
1731}
1732
Daniel Vetter4be73782014-01-17 14:39:48 +01001733static void edp_wait_backlight_off(struct intel_dp *intel_dp)
Paulo Zanonidce56b32013-12-19 14:29:40 -02001734{
1735 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1736 intel_dp->backlight_off_delay);
1737}
Keith Packard99ea7122011-11-01 19:57:50 -07001738
Keith Packard832dd3c2011-11-01 19:34:06 -07001739/* Read the current pp_control value, unlocking the register if it
1740 * is locked
1741 */
1742
Jesse Barnes453c5422013-03-28 09:55:41 -07001743static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
Keith Packard832dd3c2011-11-01 19:34:06 -07001744{
Jesse Barnes453c5422013-03-28 09:55:41 -07001745 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1746 struct drm_i915_private *dev_priv = dev->dev_private;
1747 u32 control;
Jesse Barnes453c5422013-03-28 09:55:41 -07001748
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001749 lockdep_assert_held(&dev_priv->pps_mutex);
1750
Jani Nikulabf13e812013-09-06 07:40:05 +03001751 control = I915_READ(_pp_ctrl_reg(intel_dp));
Vandana Kannanb0a08be2015-06-18 11:00:55 +05301752 if (!IS_BROXTON(dev)) {
1753 control &= ~PANEL_UNLOCK_MASK;
1754 control |= PANEL_UNLOCK_REGS;
1755 }
Keith Packard832dd3c2011-11-01 19:34:06 -07001756 return control;
Keith Packardbd943152011-09-18 23:09:52 -07001757}
1758
Ville Syrjälä951468f2014-09-04 14:55:31 +03001759/*
1760 * Must be paired with edp_panel_vdd_off().
1761 * Must hold pps_mutex around the whole on/off sequence.
1762 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1763 */
Ville Syrjälä1e0560e2014-08-19 13:24:25 +03001764static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001765{
Paulo Zanoni30add222012-10-26 19:05:45 -02001766 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001767 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1768 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Jesse Barnes5d613502011-01-24 17:10:54 -08001769 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02001770 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001771 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001772 u32 pp_stat_reg, pp_ctrl_reg;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001773 bool need_to_disable = !intel_dp->want_panel_vdd;
Jesse Barnes5d613502011-01-24 17:10:54 -08001774
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001775 lockdep_assert_held(&dev_priv->pps_mutex);
1776
Keith Packard97af61f572011-09-28 16:23:51 -07001777 if (!is_edp(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001778 return false;
Keith Packardbd943152011-09-18 23:09:52 -07001779
Egbert Eich2c623c12014-11-25 12:54:57 +01001780 cancel_delayed_work(&intel_dp->panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001781 intel_dp->want_panel_vdd = true;
Keith Packard99ea7122011-11-01 19:57:50 -07001782
Daniel Vetter4be73782014-01-17 14:39:48 +01001783 if (edp_have_panel_vdd(intel_dp))
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001784 return need_to_disable;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001785
Imre Deak4e6e1a52014-03-27 17:45:11 +02001786 power_domain = intel_display_port_power_domain(intel_encoder);
1787 intel_display_power_get(dev_priv, power_domain);
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001788
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001789 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1790 port_name(intel_dig_port->port));
Keith Packardbd943152011-09-18 23:09:52 -07001791
Daniel Vetter4be73782014-01-17 14:39:48 +01001792 if (!edp_have_panel_power(intel_dp))
1793 wait_panel_power_cycle(intel_dp);
Keith Packard99ea7122011-11-01 19:57:50 -07001794
Jesse Barnes453c5422013-03-28 09:55:41 -07001795 pp = ironlake_get_pp_control(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001796 pp |= EDP_FORCE_VDD;
Keith Packardebf33b12011-09-29 15:53:27 -07001797
Jani Nikulabf13e812013-09-06 07:40:05 +03001798 pp_stat_reg = _pp_stat_reg(intel_dp);
1799 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001800
1801 I915_WRITE(pp_ctrl_reg, pp);
1802 POSTING_READ(pp_ctrl_reg);
1803 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1804 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Keith Packardebf33b12011-09-29 15:53:27 -07001805 /*
1806 * If the panel wasn't on, delay before accessing aux channel
1807 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001808 if (!edp_have_panel_power(intel_dp)) {
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001809 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1810 port_name(intel_dig_port->port));
Keith Packardf01eca22011-09-28 16:48:10 -07001811 msleep(intel_dp->panel_power_up_delay);
Keith Packardf01eca22011-09-28 16:48:10 -07001812 }
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001813
1814 return need_to_disable;
1815}
1816
Ville Syrjälä951468f2014-09-04 14:55:31 +03001817/*
1818 * Must be paired with intel_edp_panel_vdd_off() or
1819 * intel_edp_panel_off().
1820 * Nested calls to these functions are not allowed since
1821 * we drop the lock. Caller must use some higher level
1822 * locking to prevent nested calls from other threads.
1823 */
Daniel Vetterb80d6c72014-03-19 15:54:37 +01001824void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001825{
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001826 bool vdd;
Jani Nikulaadddaaf2014-03-14 16:51:13 +02001827
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001828 if (!is_edp(intel_dp))
1829 return;
1830
Ville Syrjälä773538e82014-09-04 14:54:56 +03001831 pps_lock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001832 vdd = edp_panel_vdd_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001833 pps_unlock(intel_dp);
Ville Syrjäläc695b6b2014-08-18 22:16:03 +03001834
Rob Clarke2c719b2014-12-15 13:56:32 -05001835 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001836 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes5d613502011-01-24 17:10:54 -08001837}
1838
Daniel Vetter4be73782014-01-17 14:39:48 +01001839static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
Jesse Barnes5d613502011-01-24 17:10:54 -08001840{
Paulo Zanoni30add222012-10-26 19:05:45 -02001841 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001842 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001843 struct intel_digital_port *intel_dig_port =
1844 dp_to_dig_port(intel_dp);
1845 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1846 enum intel_display_power_domain power_domain;
Jesse Barnes5d613502011-01-24 17:10:54 -08001847 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001848 u32 pp_stat_reg, pp_ctrl_reg;
Jesse Barnes5d613502011-01-24 17:10:54 -08001849
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001850 lockdep_assert_held(&dev_priv->pps_mutex);
Daniel Vettera0e99e62012-12-02 01:05:46 +01001851
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001852 WARN_ON(intel_dp->want_panel_vdd);
Imre Deak4e6e1a52014-03-27 17:45:11 +02001853
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001854 if (!edp_have_panel_vdd(intel_dp))
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001855 return;
Paulo Zanonib0665d52013-10-30 19:50:27 -02001856
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001857 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1858 port_name(intel_dig_port->port));
Jesse Barnes453c5422013-03-28 09:55:41 -07001859
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001860 pp = ironlake_get_pp_control(intel_dp);
1861 pp &= ~EDP_FORCE_VDD;
Jesse Barnes453c5422013-03-28 09:55:41 -07001862
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001863 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1864 pp_stat_reg = _pp_stat_reg(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001865
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001866 I915_WRITE(pp_ctrl_reg, pp);
1867 POSTING_READ(pp_ctrl_reg);
Paulo Zanoni90791a52013-12-06 17:32:42 -02001868
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001869 /* Make sure sequencer is idle before allowing subsequent activity */
1870 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1871 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001872
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001873 if ((pp & POWER_TARGET_ON) == 0)
1874 intel_dp->last_power_cycle = jiffies;
Paulo Zanonie9cb81a2013-11-21 13:47:23 -02001875
Ville Syrjäläbe2c9192014-08-18 22:16:01 +03001876 power_domain = intel_display_port_power_domain(intel_encoder);
1877 intel_display_power_put(dev_priv, power_domain);
Keith Packardbd943152011-09-18 23:09:52 -07001878}
1879
Daniel Vetter4be73782014-01-17 14:39:48 +01001880static void edp_panel_vdd_work(struct work_struct *__work)
Keith Packardbd943152011-09-18 23:09:52 -07001881{
1882 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1883 struct intel_dp, panel_vdd_work);
Keith Packardbd943152011-09-18 23:09:52 -07001884
Ville Syrjälä773538e82014-09-04 14:54:56 +03001885 pps_lock(intel_dp);
Ville Syrjälä15e899a2014-08-18 22:16:02 +03001886 if (!intel_dp->want_panel_vdd)
1887 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001888 pps_unlock(intel_dp);
Keith Packardbd943152011-09-18 23:09:52 -07001889}
1890
Imre Deakaba86892014-07-30 15:57:31 +03001891static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1892{
1893 unsigned long delay;
1894
1895 /*
1896 * Queue the timer to fire a long time from now (relative to the power
1897 * down delay) to keep the panel power up across a sequence of
1898 * operations.
1899 */
1900 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1901 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1902}
1903
Ville Syrjälä951468f2014-09-04 14:55:31 +03001904/*
1905 * Must be paired with edp_panel_vdd_on().
1906 * Must hold pps_mutex around the whole on/off sequence.
1907 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1908 */
Daniel Vetter4be73782014-01-17 14:39:48 +01001909static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
Keith Packardbd943152011-09-18 23:09:52 -07001910{
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001911 struct drm_i915_private *dev_priv =
1912 intel_dp_to_dev(intel_dp)->dev_private;
1913
1914 lockdep_assert_held(&dev_priv->pps_mutex);
1915
Keith Packard97af61f572011-09-28 16:23:51 -07001916 if (!is_edp(intel_dp))
1917 return;
Jesse Barnes5d613502011-01-24 17:10:54 -08001918
Rob Clarke2c719b2014-12-15 13:56:32 -05001919 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001920 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packardf2e8b182011-11-01 20:01:35 -07001921
Keith Packardbd943152011-09-18 23:09:52 -07001922 intel_dp->want_panel_vdd = false;
1923
Imre Deakaba86892014-07-30 15:57:31 +03001924 if (sync)
Daniel Vetter4be73782014-01-17 14:39:48 +01001925 edp_panel_vdd_off_sync(intel_dp);
Imre Deakaba86892014-07-30 15:57:31 +03001926 else
1927 edp_panel_vdd_schedule_off(intel_dp);
Jesse Barnes5d613502011-01-24 17:10:54 -08001928}
1929
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001930static void edp_panel_on(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07001931{
Paulo Zanoni30add222012-10-26 19:05:45 -02001932 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001933 struct drm_i915_private *dev_priv = dev->dev_private;
Keith Packard99ea7122011-11-01 19:57:50 -07001934 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001935 u32 pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07001936
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001937 lockdep_assert_held(&dev_priv->pps_mutex);
1938
Keith Packard97af61f572011-09-28 16:23:51 -07001939 if (!is_edp(intel_dp))
Keith Packardbd943152011-09-18 23:09:52 -07001940 return;
Keith Packard99ea7122011-11-01 19:57:50 -07001941
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03001942 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1943 port_name(dp_to_dig_port(intel_dp)->port));
Keith Packard99ea7122011-11-01 19:57:50 -07001944
Ville Syrjäläe7a89ac2014-10-16 21:30:07 +03001945 if (WARN(edp_have_panel_power(intel_dp),
1946 "eDP port %c panel power already on\n",
1947 port_name(dp_to_dig_port(intel_dp)->port)))
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001948 return;
Jesse Barnes9934c132010-07-22 13:18:19 -07001949
Daniel Vetter4be73782014-01-17 14:39:48 +01001950 wait_panel_power_cycle(intel_dp);
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07001951
Jani Nikulabf13e812013-09-06 07:40:05 +03001952 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07001953 pp = ironlake_get_pp_control(intel_dp);
Keith Packard05ce1a42011-09-29 16:33:01 -07001954 if (IS_GEN5(dev)) {
1955 /* ILK workaround: disable reset around power sequence */
1956 pp &= ~PANEL_POWER_RESET;
Jani Nikulabf13e812013-09-06 07:40:05 +03001957 I915_WRITE(pp_ctrl_reg, pp);
1958 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07001959 }
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07001960
Keith Packard1c0ae802011-09-19 13:59:29 -07001961 pp |= POWER_TARGET_ON;
Keith Packard99ea7122011-11-01 19:57:50 -07001962 if (!IS_GEN5(dev))
1963 pp |= PANEL_POWER_RESET;
1964
Jesse Barnes453c5422013-03-28 09:55:41 -07001965 I915_WRITE(pp_ctrl_reg, pp);
1966 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07001967
Daniel Vetter4be73782014-01-17 14:39:48 +01001968 wait_panel_on(intel_dp);
Paulo Zanonidce56b32013-12-19 14:29:40 -02001969 intel_dp->last_power_on = jiffies;
Jesse Barnes9934c132010-07-22 13:18:19 -07001970
Keith Packard05ce1a42011-09-29 16:33:01 -07001971 if (IS_GEN5(dev)) {
1972 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
Jani Nikulabf13e812013-09-06 07:40:05 +03001973 I915_WRITE(pp_ctrl_reg, pp);
1974 POSTING_READ(pp_ctrl_reg);
Keith Packard05ce1a42011-09-29 16:33:01 -07001975 }
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001976}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03001977
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001978void intel_edp_panel_on(struct intel_dp *intel_dp)
1979{
1980 if (!is_edp(intel_dp))
1981 return;
1982
1983 pps_lock(intel_dp);
1984 edp_panel_on(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03001985 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001986}
1987
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001988
1989static void edp_panel_off(struct intel_dp *intel_dp)
Jesse Barnes9934c132010-07-22 13:18:19 -07001990{
Imre Deak4e6e1a52014-03-27 17:45:11 +02001991 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1992 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanoni30add222012-10-26 19:05:45 -02001993 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07001994 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak4e6e1a52014-03-27 17:45:11 +02001995 enum intel_display_power_domain power_domain;
Keith Packard99ea7122011-11-01 19:57:50 -07001996 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07001997 u32 pp_ctrl_reg;
Jesse Barnes9934c132010-07-22 13:18:19 -07001998
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03001999 lockdep_assert_held(&dev_priv->pps_mutex);
2000
Keith Packard97af61f572011-09-28 16:23:51 -07002001 if (!is_edp(intel_dp))
2002 return;
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002003
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002004 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2005 port_name(dp_to_dig_port(intel_dp)->port));
Jesse Barnes37c6c9b2010-08-11 10:04:43 -07002006
Ville Syrjälä3936fcf2014-10-16 21:30:02 +03002007 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2008 port_name(dp_to_dig_port(intel_dp)->port));
Jani Nikula24f3e092014-03-17 16:43:36 +02002009
Jesse Barnes453c5422013-03-28 09:55:41 -07002010 pp = ironlake_get_pp_control(intel_dp);
Daniel Vetter35a38552012-08-12 22:17:14 +02002011 /* We need to switch off panel power _and_ force vdd, for otherwise some
2012 * panels get very unhappy and cease to work. */
Patrik Jakobssonb3064152014-03-04 00:42:44 +01002013 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2014 EDP_BLC_ENABLE);
Jesse Barnes453c5422013-03-28 09:55:41 -07002015
Jani Nikulabf13e812013-09-06 07:40:05 +03002016 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002017
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002018 intel_dp->want_panel_vdd = false;
2019
Jesse Barnes453c5422013-03-28 09:55:41 -07002020 I915_WRITE(pp_ctrl_reg, pp);
2021 POSTING_READ(pp_ctrl_reg);
Jesse Barnes9934c132010-07-22 13:18:19 -07002022
Paulo Zanonidce56b32013-12-19 14:29:40 -02002023 intel_dp->last_power_cycle = jiffies;
Daniel Vetter4be73782014-01-17 14:39:48 +01002024 wait_panel_off(intel_dp);
Paulo Zanoni849e39f2014-03-07 20:05:20 -03002025
2026 /* We got a reference when we enabled the VDD. */
Imre Deak4e6e1a52014-03-27 17:45:11 +02002027 power_domain = intel_display_port_power_domain(intel_encoder);
2028 intel_display_power_put(dev_priv, power_domain);
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002029}
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002030
Ville Syrjälä9f0fb5b2014-10-16 21:27:32 +03002031void intel_edp_panel_off(struct intel_dp *intel_dp)
2032{
2033 if (!is_edp(intel_dp))
2034 return;
2035
2036 pps_lock(intel_dp);
2037 edp_panel_off(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002038 pps_unlock(intel_dp);
Jesse Barnes9934c132010-07-22 13:18:19 -07002039}
2040
Jani Nikula1250d102014-08-12 17:11:39 +03002041/* Enable backlight in the panel power control. */
2042static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002043{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002044 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2045 struct drm_device *dev = intel_dig_port->base.base.dev;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002046 struct drm_i915_private *dev_priv = dev->dev_private;
2047 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002048 u32 pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002049
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07002050 /*
2051 * If we enable the backlight right away following a panel power
2052 * on, we may see slight flicker as the panel syncs with the eDP
2053 * link. So delay a bit to make sure the image is solid before
2054 * allowing it to appear.
2055 */
Daniel Vetter4be73782014-01-17 14:39:48 +01002056 wait_backlight_on(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002057
Ville Syrjälä773538e82014-09-04 14:54:56 +03002058 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002059
Jesse Barnes453c5422013-03-28 09:55:41 -07002060 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002061 pp |= EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002062
Jani Nikulabf13e812013-09-06 07:40:05 +03002063 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002064
2065 I915_WRITE(pp_ctrl_reg, pp);
2066 POSTING_READ(pp_ctrl_reg);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002067
Ville Syrjälä773538e82014-09-04 14:54:56 +03002068 pps_unlock(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002069}
2070
Jani Nikula1250d102014-08-12 17:11:39 +03002071/* Enable backlight PWM and backlight PP control. */
2072void intel_edp_backlight_on(struct intel_dp *intel_dp)
2073{
2074 if (!is_edp(intel_dp))
2075 return;
2076
2077 DRM_DEBUG_KMS("\n");
2078
2079 intel_panel_enable_backlight(intel_dp->attached_connector);
2080 _intel_edp_backlight_on(intel_dp);
2081}
2082
2083/* Disable backlight in the panel power control. */
2084static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002085{
Paulo Zanoni30add222012-10-26 19:05:45 -02002086 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002087 struct drm_i915_private *dev_priv = dev->dev_private;
2088 u32 pp;
Jesse Barnes453c5422013-03-28 09:55:41 -07002089 u32 pp_ctrl_reg;
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002090
Keith Packardf01eca22011-09-28 16:48:10 -07002091 if (!is_edp(intel_dp))
2092 return;
2093
Ville Syrjälä773538e82014-09-04 14:54:56 +03002094 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002095
Jesse Barnes453c5422013-03-28 09:55:41 -07002096 pp = ironlake_get_pp_control(intel_dp);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002097 pp &= ~EDP_BLC_ENABLE;
Jesse Barnes453c5422013-03-28 09:55:41 -07002098
Jani Nikulabf13e812013-09-06 07:40:05 +03002099 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
Jesse Barnes453c5422013-03-28 09:55:41 -07002100
2101 I915_WRITE(pp_ctrl_reg, pp);
2102 POSTING_READ(pp_ctrl_reg);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002103
Ville Syrjälä773538e82014-09-04 14:54:56 +03002104 pps_unlock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002105
Paulo Zanonidce56b32013-12-19 14:29:40 -02002106 intel_dp->last_backlight_off = jiffies;
Jesse Barnesf7d23232014-03-31 11:13:56 -07002107 edp_wait_backlight_off(intel_dp);
Jani Nikula1250d102014-08-12 17:11:39 +03002108}
Jesse Barnesf7d23232014-03-31 11:13:56 -07002109
Jani Nikula1250d102014-08-12 17:11:39 +03002110/* Disable backlight PP control and backlight PWM. */
2111void intel_edp_backlight_off(struct intel_dp *intel_dp)
2112{
2113 if (!is_edp(intel_dp))
2114 return;
2115
2116 DRM_DEBUG_KMS("\n");
2117
2118 _intel_edp_backlight_off(intel_dp);
Jesse Barnesf7d23232014-03-31 11:13:56 -07002119 intel_panel_disable_backlight(intel_dp->attached_connector);
Zhenyu Wang32f9d652009-07-24 01:00:32 +08002120}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002121
Jani Nikula73580fb72014-08-12 17:11:41 +03002122/*
2123 * Hook for controlling the panel power control backlight through the bl_power
2124 * sysfs attribute. Take care to handle multiple calls.
2125 */
2126static void intel_edp_backlight_power(struct intel_connector *connector,
2127 bool enable)
2128{
2129 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002130 bool is_enabled;
2131
Ville Syrjälä773538e82014-09-04 14:54:56 +03002132 pps_lock(intel_dp);
Ville Syrjäläe39b9992014-09-04 14:53:14 +03002133 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002134 pps_unlock(intel_dp);
Jani Nikula73580fb72014-08-12 17:11:41 +03002135
2136 if (is_enabled == enable)
2137 return;
2138
Jani Nikula23ba9372014-08-27 14:08:43 +03002139 DRM_DEBUG_KMS("panel power control backlight %s\n",
2140 enable ? "enable" : "disable");
Jani Nikula73580fb72014-08-12 17:11:41 +03002141
2142 if (enable)
2143 _intel_edp_backlight_on(intel_dp);
2144 else
2145 _intel_edp_backlight_off(intel_dp);
2146}
2147
Ville Syrjälä64e10772015-10-29 21:26:01 +02002148static const char *state_string(bool enabled)
2149{
2150 return enabled ? "on" : "off";
2151}
2152
2153static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2154{
2155 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2156 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2157 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2158
2159 I915_STATE_WARN(cur_state != state,
2160 "DP port %c state assertion failure (expected %s, current %s)\n",
2161 port_name(dig_port->port),
2162 state_string(state), state_string(cur_state));
2163}
2164#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2165
2166static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2167{
2168 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2169
2170 I915_STATE_WARN(cur_state != state,
2171 "eDP PLL state assertion failure (expected %s, current %s)\n",
2172 state_string(state), state_string(cur_state));
2173}
2174#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2175#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2176
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002177static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002178{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002179 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä64e10772015-10-29 21:26:01 +02002180 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2181 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesd240f202010-08-13 15:43:26 -07002182 u32 dpa_ctl;
2183
Ville Syrjälä64e10772015-10-29 21:26:01 +02002184 assert_pipe_disabled(dev_priv, crtc->pipe);
2185 assert_dp_port_disabled(intel_dp);
2186 assert_edp_pll_disabled(dev_priv);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002187
Jesse Barnesd240f202010-08-13 15:43:26 -07002188 DRM_DEBUG_KMS("\n");
2189 dpa_ctl = I915_READ(DP_A);
Daniel Vetter07679352012-09-06 22:15:42 +02002190
2191 /* We don't adjust intel_dp->DP while tearing down the link, to
2192 * facilitate link retraining (e.g. after hotplug). Hence clear all
2193 * enable bits here to ensure that we don't enable too much. */
2194 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2195 intel_dp->DP |= DP_PLL_ENABLE;
2196 I915_WRITE(DP_A, intel_dp->DP);
Jesse Barnes298b0b32010-10-07 16:01:24 -07002197 POSTING_READ(DP_A);
2198 udelay(200);
Jesse Barnesd240f202010-08-13 15:43:26 -07002199}
2200
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002201static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
Jesse Barnesd240f202010-08-13 15:43:26 -07002202{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02002203 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä64e10772015-10-29 21:26:01 +02002204 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2205 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesd240f202010-08-13 15:43:26 -07002206 u32 dpa_ctl;
2207
Ville Syrjälä64e10772015-10-29 21:26:01 +02002208 assert_pipe_disabled(dev_priv, crtc->pipe);
2209 assert_dp_port_disabled(intel_dp);
2210 assert_edp_pll_enabled(dev_priv);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002211
Jesse Barnesd240f202010-08-13 15:43:26 -07002212 dpa_ctl = I915_READ(DP_A);
Daniel Vetter07679352012-09-06 22:15:42 +02002213
2214 /* We can't rely on the value tracked for the DP register in
2215 * intel_dp->DP because link_down must not change that (otherwise link
2216 * re-training will fail. */
Jesse Barnes298b0b32010-10-07 16:01:24 -07002217 dpa_ctl &= ~DP_PLL_ENABLE;
Jesse Barnesd240f202010-08-13 15:43:26 -07002218 I915_WRITE(DP_A, dpa_ctl);
Chris Wilson1af5fa12010-09-08 21:07:28 +01002219 POSTING_READ(DP_A);
Jesse Barnesd240f202010-08-13 15:43:26 -07002220 udelay(200);
2221}
2222
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002223/* If the sink supports it, try to set the power state appropriately */
Paulo Zanonic19b0662012-10-15 15:51:41 -03002224void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002225{
2226 int ret, i;
2227
2228 /* Should have a valid DPCD by this point */
2229 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2230 return;
2231
2232 if (mode != DRM_MODE_DPMS_ON) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002233 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2234 DP_SET_POWER_D3);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002235 } else {
2236 /*
2237 * When turning on, we need to retry for 1ms to give the sink
2238 * time to wake up.
2239 */
2240 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02002241 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2242 DP_SET_POWER_D0);
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002243 if (ret == 1)
2244 break;
2245 msleep(1);
2246 }
2247 }
Jani Nikulaf9cac722014-09-02 16:33:52 +03002248
2249 if (ret != 1)
2250 DRM_DEBUG_KMS("failed to %s sink power state\n",
2251 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
Jesse Barnesc7ad3812011-07-07 11:11:03 -07002252}
2253
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002254static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2255 enum pipe *pipe)
Jesse Barnesd240f202010-08-13 15:43:26 -07002256{
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002257 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deakbc7d38a2013-05-16 14:40:36 +03002258 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002259 struct drm_device *dev = encoder->base.dev;
2260 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak6d129be2014-03-05 16:20:54 +02002261 enum intel_display_power_domain power_domain;
2262 u32 tmp;
2263
2264 power_domain = intel_display_port_power_domain(encoder);
Daniel Vetterf458ebb2014-09-30 10:56:39 +02002265 if (!intel_display_power_is_enabled(dev_priv, power_domain))
Imre Deak6d129be2014-03-05 16:20:54 +02002266 return false;
2267
2268 tmp = I915_READ(intel_dp->output_reg);
Jesse Barnesd240f202010-08-13 15:43:26 -07002269
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002270 if (!(tmp & DP_PORT_EN))
2271 return false;
2272
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002273 if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002274 *pipe = PORT_TO_PIPE_CPT(tmp);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002275 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002276 enum pipe p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002277
Ville Syrjäläadc289d2015-05-05 17:17:30 +03002278 for_each_pipe(dev_priv, p) {
2279 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2280 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2281 *pipe = p;
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002282 return true;
2283 }
2284 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002285
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002286 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2287 intel_dp->output_reg);
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002288 } else if (IS_CHERRYVIEW(dev)) {
2289 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2290 } else {
2291 *pipe = PORT_TO_PIPE(tmp);
Daniel Vetter4a0833e2012-10-26 10:58:11 +02002292 }
Daniel Vetter19d8fe12012-07-02 13:26:27 +02002293
2294 return true;
2295}
2296
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002297static void intel_dp_get_config(struct intel_encoder *encoder,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02002298 struct intel_crtc_state *pipe_config)
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002299{
2300 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002301 u32 tmp, flags = 0;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002302 struct drm_device *dev = encoder->base.dev;
2303 struct drm_i915_private *dev_priv = dev->dev_private;
2304 enum port port = dp_to_dig_port(intel_dp)->port;
2305 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjälä18442d02013-09-13 16:00:08 +03002306 int dotclock;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002307
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002308 tmp = I915_READ(intel_dp->output_reg);
Jani Nikula9fcb1702015-05-05 16:32:12 +03002309
2310 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
Daniel Vetter9ed109a2014-04-24 23:54:52 +02002311
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002312 if (HAS_PCH_CPT(dev) && port != PORT_A) {
Ville Syrjäläb81e34c2015-07-06 15:10:03 +03002313 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2314
2315 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
Xiong Zhang63000ef2013-06-28 12:59:06 +08002316 flags |= DRM_MODE_FLAG_PHSYNC;
2317 else
2318 flags |= DRM_MODE_FLAG_NHSYNC;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002319
Ville Syrjäläb81e34c2015-07-06 15:10:03 +03002320 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
Xiong Zhang63000ef2013-06-28 12:59:06 +08002321 flags |= DRM_MODE_FLAG_PVSYNC;
2322 else
2323 flags |= DRM_MODE_FLAG_NVSYNC;
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002324 } else {
2325 if (tmp & DP_SYNC_HS_HIGH)
2326 flags |= DRM_MODE_FLAG_PHSYNC;
2327 else
2328 flags |= DRM_MODE_FLAG_NHSYNC;
2329
2330 if (tmp & DP_SYNC_VS_HIGH)
2331 flags |= DRM_MODE_FLAG_PVSYNC;
2332 else
2333 flags |= DRM_MODE_FLAG_NVSYNC;
Xiong Zhang63000ef2013-06-28 12:59:06 +08002334 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002335
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002336 pipe_config->base.adjusted_mode.flags |= flags;
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002337
Ville Syrjälä8c875fc2014-09-12 15:46:29 +03002338 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2339 tmp & DP_COLOR_RANGE_16_235)
2340 pipe_config->limited_color_range = true;
2341
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002342 pipe_config->has_dp_encoder = true;
2343
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03002344 pipe_config->lane_count =
2345 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2346
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03002347 intel_dp_get_m_n(crtc, pipe_config);
2348
Ville Syrjälä18442d02013-09-13 16:00:08 +03002349 if (port == PORT_A) {
Ville Syrjäläb377e0d2015-10-29 21:25:59 +02002350 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
Jesse Barnesf1f644d2013-06-27 00:39:25 +03002351 pipe_config->port_clock = 162000;
2352 else
2353 pipe_config->port_clock = 270000;
2354 }
Ville Syrjälä18442d02013-09-13 16:00:08 +03002355
2356 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2357 &pipe_config->dp_m_n);
2358
2359 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2360 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2361
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02002362 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
Daniel Vetter7f16e5c2013-11-04 16:28:47 +01002363
Jani Nikulac6cd2ee2013-10-21 10:52:07 +03002364 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2365 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2366 /*
2367 * This is a big fat ugly hack.
2368 *
2369 * Some machines in UEFI boot mode provide us a VBT that has 18
2370 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2371 * unknown we fail to light up. Yet the same BIOS boots up with
2372 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2373 * max, not what it tells us to use.
2374 *
2375 * Note: This will still be broken if the eDP panel is not lit
2376 * up by the BIOS, and thus we can't get the mode at module
2377 * load.
2378 */
2379 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2380 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2381 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2382 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -07002383}
2384
Daniel Vettere8cb4552012-07-01 13:05:48 +02002385static void intel_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002386{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002387 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002388 struct drm_device *dev = encoder->base.dev;
Jani Nikula495a5bb2014-10-27 16:26:55 +02002389 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2390
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002391 if (crtc->config->has_audio)
Jani Nikula495a5bb2014-10-27 16:26:55 +02002392 intel_audio_codec_disable(encoder);
Daniel Vetter6cb49832012-05-20 17:14:50 +02002393
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002394 if (HAS_PSR(dev) && !HAS_DDI(dev))
2395 intel_psr_disable(intel_dp);
2396
Daniel Vetter6cb49832012-05-20 17:14:50 +02002397 /* Make sure the panel is off before trying to change the mode. But also
2398 * ensure that we have vdd while we switch off the panel. */
Jani Nikula24f3e092014-03-17 16:43:36 +02002399 intel_edp_panel_vdd_on(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01002400 intel_edp_backlight_off(intel_dp);
Jani Nikulafdbc3b12013-11-12 17:10:13 +02002401 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
Daniel Vetter4be73782014-01-17 14:39:48 +01002402 intel_edp_panel_off(intel_dp);
Daniel Vetter37398502012-09-06 22:15:44 +02002403
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002404 /* disable the port before the pipe on g4x */
2405 if (INTEL_INFO(dev)->gen < 5)
Daniel Vetter37398502012-09-06 22:15:44 +02002406 intel_dp_link_down(intel_dp);
Jesse Barnesd240f202010-08-13 15:43:26 -07002407}
2408
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002409static void ilk_post_disable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002410{
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002411 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Imre Deak982a3862013-05-23 19:39:40 +03002412 enum port port = dp_to_dig_port(intel_dp)->port;
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002413
Ville Syrjälä49277c32014-03-31 18:21:26 +03002414 intel_dp_link_down(intel_dp);
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03002415 if (port == PORT_A)
2416 ironlake_edp_pll_off(intel_dp);
Ville Syrjälä49277c32014-03-31 18:21:26 +03002417}
2418
2419static void vlv_post_disable_dp(struct intel_encoder *encoder)
2420{
2421 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2422
2423 intel_dp_link_down(intel_dp);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002424}
2425
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002426static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2427 bool reset)
2428{
2429 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2430 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2431 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2432 enum pipe pipe = crtc->pipe;
2433 uint32_t val;
2434
2435 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2436 if (reset)
2437 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2438 else
2439 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2440 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2441
2442 if (crtc->config->lane_count > 2) {
2443 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2444 if (reset)
2445 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2446 else
2447 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2448 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2449 }
2450
2451 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2452 val |= CHV_PCS_REQ_SOFTRESET_EN;
2453 if (reset)
2454 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2455 else
2456 val |= DPIO_PCS_CLK_SOFT_RESET;
2457 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2458
2459 if (crtc->config->lane_count > 2) {
2460 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2461 val |= CHV_PCS_REQ_SOFTRESET_EN;
2462 if (reset)
2463 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2464 else
2465 val |= DPIO_PCS_CLK_SOFT_RESET;
2466 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2467 }
2468}
2469
Ville Syrjälä580d3812014-04-09 13:29:00 +03002470static void chv_post_disable_dp(struct intel_encoder *encoder)
2471{
2472 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002473 struct drm_device *dev = encoder->base.dev;
2474 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä580d3812014-04-09 13:29:00 +03002475
2476 intel_dp_link_down(intel_dp);
2477
Ville Syrjäläa5805162015-05-26 20:42:30 +03002478 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002479
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002480 /* Assert data lane reset */
2481 chv_data_lane_soft_reset(encoder, true);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002482
Ville Syrjäläa5805162015-05-26 20:42:30 +03002483 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä580d3812014-04-09 13:29:00 +03002484}
2485
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002486static void
2487_intel_dp_set_link_train(struct intel_dp *intel_dp,
2488 uint32_t *DP,
2489 uint8_t dp_train_pat)
2490{
2491 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2492 struct drm_device *dev = intel_dig_port->base.base.dev;
2493 struct drm_i915_private *dev_priv = dev->dev_private;
2494 enum port port = intel_dig_port->port;
2495
2496 if (HAS_DDI(dev)) {
2497 uint32_t temp = I915_READ(DP_TP_CTL(port));
2498
2499 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2500 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2501 else
2502 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2503
2504 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2505 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2506 case DP_TRAINING_PATTERN_DISABLE:
2507 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2508
2509 break;
2510 case DP_TRAINING_PATTERN_1:
2511 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2512 break;
2513 case DP_TRAINING_PATTERN_2:
2514 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2515 break;
2516 case DP_TRAINING_PATTERN_3:
2517 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2518 break;
2519 }
2520 I915_WRITE(DP_TP_CTL(port), temp);
2521
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03002522 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2523 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002524 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2525
2526 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2527 case DP_TRAINING_PATTERN_DISABLE:
2528 *DP |= DP_LINK_TRAIN_OFF_CPT;
2529 break;
2530 case DP_TRAINING_PATTERN_1:
2531 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2532 break;
2533 case DP_TRAINING_PATTERN_2:
2534 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2535 break;
2536 case DP_TRAINING_PATTERN_3:
2537 DRM_ERROR("DP training pattern 3 not supported\n");
2538 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2539 break;
2540 }
2541
2542 } else {
2543 if (IS_CHERRYVIEW(dev))
2544 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2545 else
2546 *DP &= ~DP_LINK_TRAIN_MASK;
2547
2548 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2549 case DP_TRAINING_PATTERN_DISABLE:
2550 *DP |= DP_LINK_TRAIN_OFF;
2551 break;
2552 case DP_TRAINING_PATTERN_1:
2553 *DP |= DP_LINK_TRAIN_PAT_1;
2554 break;
2555 case DP_TRAINING_PATTERN_2:
2556 *DP |= DP_LINK_TRAIN_PAT_2;
2557 break;
2558 case DP_TRAINING_PATTERN_3:
2559 if (IS_CHERRYVIEW(dev)) {
2560 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2561 } else {
2562 DRM_ERROR("DP training pattern 3 not supported\n");
2563 *DP |= DP_LINK_TRAIN_PAT_2;
2564 }
2565 break;
2566 }
2567 }
2568}
2569
2570static void intel_dp_enable_port(struct intel_dp *intel_dp)
2571{
2572 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2573 struct drm_i915_private *dev_priv = dev->dev_private;
2574
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002575 /* enable with pattern 1 (as per spec) */
2576 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2577 DP_TRAINING_PATTERN_1);
2578
2579 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2580 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b713f52014-10-16 21:27:35 +03002581
2582 /*
2583 * Magic for VLV/CHV. We _must_ first set up the register
2584 * without actually enabling the port, and then do another
2585 * write to enable the port. Otherwise link training will
2586 * fail when the power sequencer is freshly used for this port.
2587 */
2588 intel_dp->DP |= DP_PORT_EN;
2589
2590 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2591 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002592}
2593
Daniel Vettere8cb4552012-07-01 13:05:48 +02002594static void intel_enable_dp(struct intel_encoder *encoder)
Jesse Barnesd240f202010-08-13 15:43:26 -07002595{
Daniel Vettere8cb4552012-07-01 13:05:48 +02002596 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2597 struct drm_device *dev = encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002598 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulac1dec792014-10-27 16:26:56 +02002599 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Chris Wilsonea5b2132010-08-04 13:50:23 +01002600 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002601 enum port port = dp_to_dig_port(intel_dp)->port;
2602 enum pipe pipe = crtc->pipe;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002603
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02002604 if (WARN_ON(dp_reg & DP_PORT_EN))
2605 return;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002606
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002607 pps_lock(intel_dp);
2608
2609 if (IS_VALLEYVIEW(dev))
2610 vlv_init_panel_power_sequencer(intel_dp);
2611
Ville Syrjälä7b13b582014-08-18 22:16:08 +03002612 intel_dp_enable_port(intel_dp);
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002613
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002614 if (port == PORT_A && IS_GEN5(dev_priv)) {
2615 /*
2616 * Underrun reporting for the other pipe was disabled in
2617 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2618 * enabled, so it's now safe to re-enable underrun reporting.
2619 */
2620 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2621 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2622 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2623 }
2624
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002625 edp_panel_vdd_on(intel_dp);
2626 edp_panel_on(intel_dp);
2627 edp_panel_vdd_off(intel_dp, true);
2628
2629 pps_unlock(intel_dp);
2630
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002631 if (IS_VALLEYVIEW(dev)) {
2632 unsigned int lane_mask = 0x0;
2633
2634 if (IS_CHERRYVIEW(dev))
2635 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2636
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03002637 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2638 lane_mask);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002639 }
Ville Syrjälä61234fa2014-10-16 21:27:34 +03002640
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002641 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2642 intel_dp_start_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03002643 intel_dp_stop_link_train(intel_dp);
Jani Nikulac1dec792014-10-27 16:26:56 +02002644
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02002645 if (crtc->config->has_audio) {
Jani Nikulac1dec792014-10-27 16:26:56 +02002646 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002647 pipe_name(pipe));
Jani Nikulac1dec792014-10-27 16:26:56 +02002648 intel_audio_codec_enable(encoder);
2649 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002650}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002651
Jani Nikulaecff4f32013-09-06 07:38:29 +03002652static void g4x_enable_dp(struct intel_encoder *encoder)
2653{
Jani Nikula828f5c62013-09-05 16:44:45 +03002654 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2655
Jani Nikulaecff4f32013-09-06 07:38:29 +03002656 intel_enable_dp(encoder);
Daniel Vetter4be73782014-01-17 14:39:48 +01002657 intel_edp_backlight_on(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002658}
Jesse Barnes89b667f2013-04-18 14:51:36 -07002659
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002660static void vlv_enable_dp(struct intel_encoder *encoder)
2661{
Jani Nikula828f5c62013-09-05 16:44:45 +03002662 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2663
Daniel Vetter4be73782014-01-17 14:39:48 +01002664 intel_edp_backlight_on(intel_dp);
Rodrigo Vivib32c6f42014-11-20 03:44:37 -08002665 intel_psr_enable(intel_dp);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002666}
2667
Jani Nikulaecff4f32013-09-06 07:38:29 +03002668static void g4x_pre_enable_dp(struct intel_encoder *encoder)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002669{
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002670 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
Daniel Vetter2bd2ad62012-09-06 22:15:41 +02002671 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002672 enum port port = dp_to_dig_port(intel_dp)->port;
2673 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002674
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002675 intel_dp_prepare(encoder);
2676
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002677 if (port == PORT_A && IS_GEN5(dev_priv)) {
2678 /*
2679 * We get FIFO underruns on the other pipe when
2680 * enabling the CPU eDP PLL, and when enabling CPU
2681 * eDP port. We could potentially avoid the PLL
2682 * underrun with a vblank wait just prior to enabling
2683 * the PLL, but that doesn't appear to help the port
2684 * enable case. Just sweep it all under the rug.
2685 */
2686 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2687 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2688 }
2689
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002690 /* Only ilk+ has port A */
Ville Syrjäläd6fbdd12015-10-29 21:25:58 +02002691 if (port == PORT_A) {
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002692 ironlake_set_pll_cpu_edp(intel_dp);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002693 ironlake_edp_pll_on(intel_dp);
Daniel Vetterd41f1ef2014-04-24 23:54:53 +02002694 }
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002695}
2696
Ville Syrjälä83b84592014-10-16 21:29:51 +03002697static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2698{
2699 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2700 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2701 enum pipe pipe = intel_dp->pps_pipe;
2702 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2703
2704 edp_panel_vdd_off_sync(intel_dp);
2705
2706 /*
2707 * VLV seems to get confused when multiple power seqeuencers
2708 * have the same port selected (even if only one has power/vdd
2709 * enabled). The failure manifests as vlv_wait_port_ready() failing
2710 * CHV on the other hand doesn't seem to mind having the same port
2711 * selected in multiple power seqeuencers, but let's clear the
2712 * port select always when logically disconnecting a power sequencer
2713 * from a port.
2714 */
2715 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2716 pipe_name(pipe), port_name(intel_dig_port->port));
2717 I915_WRITE(pp_on_reg, 0);
2718 POSTING_READ(pp_on_reg);
2719
2720 intel_dp->pps_pipe = INVALID_PIPE;
2721}
2722
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002723static void vlv_steal_power_sequencer(struct drm_device *dev,
2724 enum pipe pipe)
2725{
2726 struct drm_i915_private *dev_priv = dev->dev_private;
2727 struct intel_encoder *encoder;
2728
2729 lockdep_assert_held(&dev_priv->pps_mutex);
2730
Ville Syrjäläac3c12e2014-10-16 21:29:56 +03002731 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2732 return;
2733
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002734 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2735 base.head) {
2736 struct intel_dp *intel_dp;
Ville Syrjälä773538e82014-09-04 14:54:56 +03002737 enum port port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002738
2739 if (encoder->type != INTEL_OUTPUT_EDP)
2740 continue;
2741
2742 intel_dp = enc_to_intel_dp(&encoder->base);
Ville Syrjälä773538e82014-09-04 14:54:56 +03002743 port = dp_to_dig_port(intel_dp)->port;
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002744
2745 if (intel_dp->pps_pipe != pipe)
2746 continue;
2747
2748 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
Ville Syrjälä773538e82014-09-04 14:54:56 +03002749 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002750
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02002751 WARN(encoder->base.crtc,
Ville Syrjälä034e43c2014-10-16 21:27:28 +03002752 "stealing pipe %c power sequencer from active eDP port %c\n",
2753 pipe_name(pipe), port_name(port));
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002754
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002755 /* make sure vdd is off before we steal it */
Ville Syrjälä83b84592014-10-16 21:29:51 +03002756 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002757 }
2758}
2759
2760static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2761{
2762 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2763 struct intel_encoder *encoder = &intel_dig_port->base;
2764 struct drm_device *dev = encoder->base.dev;
2765 struct drm_i915_private *dev_priv = dev->dev_private;
2766 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002767
2768 lockdep_assert_held(&dev_priv->pps_mutex);
2769
Ville Syrjälä093e3f12014-10-16 21:27:33 +03002770 if (!is_edp(intel_dp))
2771 return;
2772
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002773 if (intel_dp->pps_pipe == crtc->pipe)
2774 return;
2775
2776 /*
2777 * If another power sequencer was being used on this
2778 * port previously make sure to turn off vdd there while
2779 * we still have control of it.
2780 */
2781 if (intel_dp->pps_pipe != INVALID_PIPE)
Ville Syrjälä83b84592014-10-16 21:29:51 +03002782 vlv_detach_power_sequencer(intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002783
2784 /*
2785 * We may be stealing the power
2786 * sequencer from another port.
2787 */
2788 vlv_steal_power_sequencer(dev, crtc->pipe);
2789
2790 /* now it's all ours */
2791 intel_dp->pps_pipe = crtc->pipe;
2792
2793 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2794 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2795
2796 /* init power sequencer on this pipe and port */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03002797 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2798 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03002799}
2800
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002801static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2802{
2803 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2804 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Jesse Barnesb2634012013-03-28 09:55:40 -07002805 struct drm_device *dev = encoder->base.dev;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002806 struct drm_i915_private *dev_priv = dev->dev_private;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002807 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002808 enum dpio_channel port = vlv_dport_to_channel(dport);
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002809 int pipe = intel_crtc->pipe;
2810 u32 val;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002811
Ville Syrjäläa5805162015-05-26 20:42:30 +03002812 mutex_lock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002813
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002814 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002815 val = 0;
2816 if (pipe)
2817 val |= (1<<21);
2818 else
2819 val &= ~(1<<21);
2820 val |= 0x001000c4;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002821 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2822 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2823 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002824
Ville Syrjäläa5805162015-05-26 20:42:30 +03002825 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002826
Jani Nikulaab1f90f2013-07-30 12:20:30 +03002827 intel_enable_dp(encoder);
Jesse Barnes89b667f2013-04-18 14:51:36 -07002828}
2829
Jani Nikulaecff4f32013-09-06 07:38:29 +03002830static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
Jesse Barnes89b667f2013-04-18 14:51:36 -07002831{
2832 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2833 struct drm_device *dev = encoder->base.dev;
2834 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002835 struct intel_crtc *intel_crtc =
2836 to_intel_crtc(encoder->base.crtc);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08002837 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08002838 int pipe = intel_crtc->pipe;
Jesse Barnes89b667f2013-04-18 14:51:36 -07002839
Daniel Vetter8ac33ed2014-04-24 23:54:54 +02002840 intel_dp_prepare(encoder);
2841
Jesse Barnes89b667f2013-04-18 14:51:36 -07002842 /* Program Tx lane resets to default */
Ville Syrjäläa5805162015-05-26 20:42:30 +03002843 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002844 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002845 DPIO_PCS_TX_LANE2_RESET |
2846 DPIO_PCS_TX_LANE1_RESET);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002847 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
Jesse Barnes89b667f2013-04-18 14:51:36 -07002848 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2849 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2850 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2851 DPIO_PCS_CLK_SOFT_RESET);
2852
2853 /* Fix up inter-pair skew failure */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08002854 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2855 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2856 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03002857 mutex_unlock(&dev_priv->sb_lock);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07002858}
2859
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002860static void chv_pre_enable_dp(struct intel_encoder *encoder)
2861{
2862 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2863 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2864 struct drm_device *dev = encoder->base.dev;
2865 struct drm_i915_private *dev_priv = dev->dev_private;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002866 struct intel_crtc *intel_crtc =
2867 to_intel_crtc(encoder->base.crtc);
2868 enum dpio_channel ch = vlv_dport_to_channel(dport);
2869 int pipe = intel_crtc->pipe;
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002870 int data, i, stagger;
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002871 u32 val;
2872
Ville Syrjäläa5805162015-05-26 20:42:30 +03002873 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä949c1d42014-04-09 13:28:58 +03002874
Ville Syrjälä570e2a72014-08-18 14:42:46 +03002875 /* allow hardware to manage TX FIFO reset source */
2876 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2877 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2878 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2879
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002880 if (intel_crtc->config->lane_count > 2) {
2881 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2882 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2883 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2884 }
Ville Syrjälä570e2a72014-08-18 14:42:46 +03002885
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002886 /* Program Tx lane latency optimal setting*/
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002887 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002888 /* Set the upar bit */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002889 if (intel_crtc->config->lane_count == 1)
2890 data = 0x0;
2891 else
2892 data = (i == 1) ? 0x0 : 0x1;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002893 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2894 data << DPIO_UPAR_SHIFT);
2895 }
2896
2897 /* Data lane stagger programming */
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002898 if (intel_crtc->config->port_clock > 270000)
2899 stagger = 0x18;
2900 else if (intel_crtc->config->port_clock > 135000)
2901 stagger = 0xd;
2902 else if (intel_crtc->config->port_clock > 67500)
2903 stagger = 0x7;
2904 else if (intel_crtc->config->port_clock > 33750)
2905 stagger = 0x4;
2906 else
2907 stagger = 0x2;
2908
2909 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2910 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2911 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2912
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002913 if (intel_crtc->config->lane_count > 2) {
2914 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2915 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2916 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2917 }
Ville Syrjälä2e523e92015-04-10 18:21:27 +03002918
2919 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2920 DPIO_LANESTAGGER_STRAP(stagger) |
2921 DPIO_LANESTAGGER_STRAP_OVRD |
2922 DPIO_TX1_STAGGER_MASK(0x1f) |
2923 DPIO_TX1_STAGGER_MULT(6) |
2924 DPIO_TX2_STAGGER_MULT(0));
2925
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002926 if (intel_crtc->config->lane_count > 2) {
2927 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2928 DPIO_LANESTAGGER_STRAP(stagger) |
2929 DPIO_LANESTAGGER_STRAP_OVRD |
2930 DPIO_TX1_STAGGER_MASK(0x1f) |
2931 DPIO_TX1_STAGGER_MULT(7) |
2932 DPIO_TX2_STAGGER_MULT(5));
2933 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002934
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002935 /* Deassert data lane reset */
2936 chv_data_lane_soft_reset(encoder, false);
2937
Ville Syrjäläa5805162015-05-26 20:42:30 +03002938 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002939
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002940 intel_enable_dp(encoder);
Ville Syrjäläb0b33842015-07-08 23:45:55 +03002941
2942 /* Second common lane will stay alive on its own now */
2943 if (dport->release_cl2_override) {
2944 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2945 dport->release_cl2_override = false;
2946 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03002947}
2948
Ville Syrjälä9197c882014-04-09 13:29:05 +03002949static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2950{
2951 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2952 struct drm_device *dev = encoder->base.dev;
2953 struct drm_i915_private *dev_priv = dev->dev_private;
2954 struct intel_crtc *intel_crtc =
2955 to_intel_crtc(encoder->base.crtc);
2956 enum dpio_channel ch = vlv_dport_to_channel(dport);
2957 enum pipe pipe = intel_crtc->pipe;
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002958 unsigned int lane_mask =
2959 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002960 u32 val;
2961
Ville Syrjälä625695f2014-06-28 02:04:02 +03002962 intel_dp_prepare(encoder);
2963
Ville Syrjäläb0b33842015-07-08 23:45:55 +03002964 /*
2965 * Must trick the second common lane into life.
2966 * Otherwise we can't even access the PLL.
2967 */
2968 if (ch == DPIO_CH0 && pipe == PIPE_B)
2969 dport->release_cl2_override =
2970 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2971
Ville Syrjäläe0fce782015-07-08 23:45:54 +03002972 chv_phy_powergate_lanes(encoder, true, lane_mask);
2973
Ville Syrjäläa5805162015-05-26 20:42:30 +03002974 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03002975
Ville Syrjäläa8f327f2015-07-09 20:14:11 +03002976 /* Assert data lane reset */
2977 chv_data_lane_soft_reset(encoder, true);
2978
Ville Syrjäläb9e5ac32014-05-27 16:30:18 +03002979 /* program left/right clock distribution */
2980 if (pipe != PIPE_B) {
2981 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2982 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2983 if (ch == DPIO_CH0)
2984 val |= CHV_BUFLEFTENA1_FORCE;
2985 if (ch == DPIO_CH1)
2986 val |= CHV_BUFRIGHTENA1_FORCE;
2987 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2988 } else {
2989 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2990 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2991 if (ch == DPIO_CH0)
2992 val |= CHV_BUFLEFTENA2_FORCE;
2993 if (ch == DPIO_CH1)
2994 val |= CHV_BUFRIGHTENA2_FORCE;
2995 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2996 }
2997
Ville Syrjälä9197c882014-04-09 13:29:05 +03002998 /* program clock channel usage */
2999 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3000 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3001 if (pipe != PIPE_B)
3002 val &= ~CHV_PCS_USEDCLKCHANNEL;
3003 else
3004 val |= CHV_PCS_USEDCLKCHANNEL;
3005 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3006
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003007 if (intel_crtc->config->lane_count > 2) {
3008 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3009 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3010 if (pipe != PIPE_B)
3011 val &= ~CHV_PCS_USEDCLKCHANNEL;
3012 else
3013 val |= CHV_PCS_USEDCLKCHANNEL;
3014 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3015 }
Ville Syrjälä9197c882014-04-09 13:29:05 +03003016
3017 /*
3018 * This a a bit weird since generally CL
3019 * matches the pipe, but here we need to
3020 * pick the CL based on the port.
3021 */
3022 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3023 if (pipe != PIPE_B)
3024 val &= ~CHV_CMN_USEDCLKCHANNEL;
3025 else
3026 val |= CHV_CMN_USEDCLKCHANNEL;
3027 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3028
Ville Syrjäläa5805162015-05-26 20:42:30 +03003029 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä9197c882014-04-09 13:29:05 +03003030}
3031
Ville Syrjäläd6db9952015-07-08 23:45:49 +03003032static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3033{
3034 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3035 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3036 u32 val;
3037
3038 mutex_lock(&dev_priv->sb_lock);
3039
3040 /* disable left/right clock distribution */
3041 if (pipe != PIPE_B) {
3042 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3043 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3044 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3045 } else {
3046 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3047 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3048 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3049 }
3050
3051 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003052
Ville Syrjäläb0b33842015-07-08 23:45:55 +03003053 /*
3054 * Leave the power down bit cleared for at least one
3055 * lane so that chv_powergate_phy_ch() will power
3056 * on something when the channel is otherwise unused.
3057 * When the port is off and the override is removed
3058 * the lanes power down anyway, so otherwise it doesn't
3059 * really matter what the state of power down bits is
3060 * after this.
3061 */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003062 chv_phy_powergate_lanes(encoder, false, 0x0);
Ville Syrjäläd6db9952015-07-08 23:45:49 +03003063}
3064
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003065/*
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003066 * Native read with retry for link status and receiver capability reads for
3067 * cases where the sink may still be asleep.
Jani Nikula9d1a1032014-03-14 16:51:15 +02003068 *
3069 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3070 * supposed to retry 3 times per the spec.
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003071 */
Jani Nikula9d1a1032014-03-14 16:51:15 +02003072static ssize_t
3073intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3074 void *buffer, size_t size)
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003075{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003076 ssize_t ret;
3077 int i;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003078
Ville Syrjäläf6a19062014-10-16 20:46:09 +03003079 /*
3080 * Sometime we just get the same incorrect byte repeated
3081 * over the entire buffer. Doing just one throw away read
3082 * initially seems to "solve" it.
3083 */
3084 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3085
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003086 for (i = 0; i < 3; i++) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003087 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3088 if (ret == size)
3089 return ret;
Jesse Barnesdf0c2372011-07-07 11:11:02 -07003090 msleep(1);
3091 }
3092
Jani Nikula9d1a1032014-03-14 16:51:15 +02003093 return ret;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003094}
3095
3096/*
3097 * Fetch AUX CH registers 0x202 - 0x207 which contain
3098 * link status information
3099 */
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003100bool
Keith Packard93f62da2011-11-01 19:45:03 -07003101intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003102{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003103 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3104 DP_LANE0_1_STATUS,
3105 link_status,
3106 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003107}
3108
Paulo Zanoni11002442014-06-13 18:45:41 -03003109/* These are source-specific values. */
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003110uint8_t
Keith Packard1a2eb462011-11-16 16:26:07 -08003111intel_dp_voltage_max(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003112{
Paulo Zanoni30add222012-10-26 19:05:45 -02003113 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303114 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003115 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003116
Vandana Kannan93147262014-11-18 15:45:29 +05303117 if (IS_BROXTON(dev))
3118 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3119 else if (INTEL_INFO(dev)->gen >= 9) {
Sonika Jindal9e458032015-05-06 17:35:48 +05303120 if (dev_priv->edp_low_vswing && port == PORT_A)
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303121 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003122 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303123 } else if (IS_VALLEYVIEW(dev))
Sonika Jindalbd600182014-08-08 16:23:41 +05303124 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003125 else if (IS_GEN7(dev) && port == PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05303126 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003127 else if (HAS_PCH_CPT(dev) && port != PORT_A)
Sonika Jindalbd600182014-08-08 16:23:41 +05303128 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
Keith Packard1a2eb462011-11-16 16:26:07 -08003129 else
Sonika Jindalbd600182014-08-08 16:23:41 +05303130 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
Keith Packard1a2eb462011-11-16 16:26:07 -08003131}
3132
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003133uint8_t
Keith Packard1a2eb462011-11-16 16:26:07 -08003134intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3135{
Paulo Zanoni30add222012-10-26 19:05:45 -02003136 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003137 enum port port = dp_to_dig_port(intel_dp)->port;
Keith Packard1a2eb462011-11-16 16:26:07 -08003138
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003139 if (INTEL_INFO(dev)->gen >= 9) {
3140 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3142 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3143 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3144 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3145 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3146 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Sonika Jindal7ad14a22015-02-25 10:29:12 +05303147 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3148 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Damien Lespiau5a9d1f12013-12-03 13:56:26 +00003149 default:
3150 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3151 }
3152 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003153 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303154 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3155 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3156 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3157 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3158 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3159 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3160 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003161 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303162 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Paulo Zanonid6c0d722012-10-15 15:51:34 -03003163 }
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003164 } else if (IS_VALLEYVIEW(dev)) {
3165 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303166 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3167 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3168 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3169 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3170 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3171 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3172 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003173 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303174 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003175 }
Imre Deakbc7d38a2013-05-16 14:40:36 +03003176 } else if (IS_GEN7(dev) && port == PORT_A) {
Keith Packard1a2eb462011-11-16 16:26:07 -08003177 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303178 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3179 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3180 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3181 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3182 return DP_TRAIN_PRE_EMPH_LEVEL_1;
Keith Packard1a2eb462011-11-16 16:26:07 -08003183 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303184 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003185 }
3186 } else {
3187 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303188 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3189 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3190 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3191 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3192 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3193 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3194 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packard1a2eb462011-11-16 16:26:07 -08003195 default:
Sonika Jindalbd600182014-08-08 16:23:41 +05303196 return DP_TRAIN_PRE_EMPH_LEVEL_0;
Keith Packard1a2eb462011-11-16 16:26:07 -08003197 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003198 }
3199}
3200
Daniel Vetter5829975c2015-04-16 11:36:52 +02003201static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003202{
3203 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3204 struct drm_i915_private *dev_priv = dev->dev_private;
3205 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003206 struct intel_crtc *intel_crtc =
3207 to_intel_crtc(dport->base.base.crtc);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003208 unsigned long demph_reg_value, preemph_reg_value,
3209 uniqtranscale_reg_value;
3210 uint8_t train_set = intel_dp->train_set[0];
Chon Ming Leee4607fc2013-11-06 14:36:35 +08003211 enum dpio_channel port = vlv_dport_to_channel(dport);
Chon Ming Lee5e69f972013-09-05 20:41:49 +08003212 int pipe = intel_crtc->pipe;
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003213
3214 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303215 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003216 preemph_reg_value = 0x0004000;
3217 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303218 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003219 demph_reg_value = 0x2B405555;
3220 uniqtranscale_reg_value = 0x552AB83A;
3221 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303222 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003223 demph_reg_value = 0x2B404040;
3224 uniqtranscale_reg_value = 0x5548B83A;
3225 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303226 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003227 demph_reg_value = 0x2B245555;
3228 uniqtranscale_reg_value = 0x5560B83A;
3229 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303230 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003231 demph_reg_value = 0x2B405555;
3232 uniqtranscale_reg_value = 0x5598DA3A;
3233 break;
3234 default:
3235 return 0;
3236 }
3237 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303238 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003239 preemph_reg_value = 0x0002000;
3240 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303241 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003242 demph_reg_value = 0x2B404040;
3243 uniqtranscale_reg_value = 0x5552B83A;
3244 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303245 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003246 demph_reg_value = 0x2B404848;
3247 uniqtranscale_reg_value = 0x5580B83A;
3248 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303249 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003250 demph_reg_value = 0x2B404040;
3251 uniqtranscale_reg_value = 0x55ADDA3A;
3252 break;
3253 default:
3254 return 0;
3255 }
3256 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303257 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003258 preemph_reg_value = 0x0000000;
3259 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303260 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003261 demph_reg_value = 0x2B305555;
3262 uniqtranscale_reg_value = 0x5570B83A;
3263 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003265 demph_reg_value = 0x2B2B4040;
3266 uniqtranscale_reg_value = 0x55ADDA3A;
3267 break;
3268 default:
3269 return 0;
3270 }
3271 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303272 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003273 preemph_reg_value = 0x0006000;
3274 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003276 demph_reg_value = 0x1B405555;
3277 uniqtranscale_reg_value = 0x55ADDA3A;
3278 break;
3279 default:
3280 return 0;
3281 }
3282 break;
3283 default:
3284 return 0;
3285 }
3286
Ville Syrjäläa5805162015-05-26 20:42:30 +03003287 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003288 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3289 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3290 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003291 uniqtranscale_reg_value);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08003292 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3293 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3294 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3295 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03003296 mutex_unlock(&dev_priv->sb_lock);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003297
3298 return 0;
3299}
3300
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003301static bool chv_need_uniq_trans_scale(uint8_t train_set)
3302{
3303 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3304 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3305}
3306
Daniel Vetter5829975c2015-04-16 11:36:52 +02003307static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003308{
3309 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3310 struct drm_i915_private *dev_priv = dev->dev_private;
3311 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3312 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003313 u32 deemph_reg_value, margin_reg_value, val;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003314 uint8_t train_set = intel_dp->train_set[0];
3315 enum dpio_channel ch = vlv_dport_to_channel(dport);
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003316 enum pipe pipe = intel_crtc->pipe;
3317 int i;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003318
3319 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303320 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003321 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003323 deemph_reg_value = 128;
3324 margin_reg_value = 52;
3325 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303326 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003327 deemph_reg_value = 128;
3328 margin_reg_value = 77;
3329 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303330 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003331 deemph_reg_value = 128;
3332 margin_reg_value = 102;
3333 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303334 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003335 deemph_reg_value = 128;
3336 margin_reg_value = 154;
3337 /* FIXME extra to set for 1200 */
3338 break;
3339 default:
3340 return 0;
3341 }
3342 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303343 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003344 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303345 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003346 deemph_reg_value = 85;
3347 margin_reg_value = 78;
3348 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303349 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003350 deemph_reg_value = 85;
3351 margin_reg_value = 116;
3352 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303353 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003354 deemph_reg_value = 85;
3355 margin_reg_value = 154;
3356 break;
3357 default:
3358 return 0;
3359 }
3360 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303361 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003362 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303363 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003364 deemph_reg_value = 64;
3365 margin_reg_value = 104;
3366 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303367 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003368 deemph_reg_value = 64;
3369 margin_reg_value = 154;
3370 break;
3371 default:
3372 return 0;
3373 }
3374 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303375 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003376 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303377 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003378 deemph_reg_value = 43;
3379 margin_reg_value = 154;
3380 break;
3381 default:
3382 return 0;
3383 }
3384 break;
3385 default:
3386 return 0;
3387 }
3388
Ville Syrjäläa5805162015-05-26 20:42:30 +03003389 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003390
3391 /* Clear calc init */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003392 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3393 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003394 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3395 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
Ville Syrjälä1966e592014-04-09 13:29:04 +03003396 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3397
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003398 if (intel_crtc->config->lane_count > 2) {
3399 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3400 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3401 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3402 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3403 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3404 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003405
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003406 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3407 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3408 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3409 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3410
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003411 if (intel_crtc->config->lane_count > 2) {
3412 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3413 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3414 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3415 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3416 }
Ville Syrjäläa02ef3c2014-08-18 14:42:45 +03003417
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003418 /* Program swing deemph */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003419 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003420 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3421 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3422 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3423 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3424 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003425
3426 /* Program swing margin */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003427 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003428 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003429
Ville Syrjälä1fb44502014-06-28 02:04:03 +03003430 val &= ~DPIO_SWING_MARGIN000_MASK;
3431 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003432
3433 /*
3434 * Supposedly this value shouldn't matter when unique transition
3435 * scale is disabled, but in fact it does matter. Let's just
3436 * always program the same value and hope it's OK.
3437 */
3438 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3439 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3440
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003441 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3442 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003443
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003444 /*
3445 * The document said it needs to set bit 27 for ch0 and bit 26
3446 * for ch1. Might be a typo in the doc.
3447 * For now, for this unique transition scale selection, set bit
3448 * 27 for ch0 and ch1.
3449 */
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003450 for (i = 0; i < intel_crtc->config->lane_count; i++) {
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003451 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003452 if (chv_need_uniq_trans_scale(train_set))
Ville Syrjäläf72df8d2014-04-09 13:29:03 +03003453 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
Ville Syrjälä67fa24b2015-07-08 23:45:48 +03003454 else
3455 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3456 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003457 }
3458
3459 /* Start swing calculation */
Ville Syrjälä1966e592014-04-09 13:29:04 +03003460 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3461 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3462 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3463
Ville Syrjäläe0fce782015-07-08 23:45:54 +03003464 if (intel_crtc->config->lane_count > 2) {
3465 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3466 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3467 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3468 }
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003469
Ville Syrjäläa5805162015-05-26 20:42:30 +03003470 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003471
3472 return 0;
3473}
3474
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003475static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003476gen4_signal_levels(uint8_t train_set)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003477{
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003478 uint32_t signal_levels = 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003479
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003480 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303481 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003482 default:
3483 signal_levels |= DP_VOLTAGE_0_4;
3484 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303485 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003486 signal_levels |= DP_VOLTAGE_0_6;
3487 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303488 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003489 signal_levels |= DP_VOLTAGE_0_8;
3490 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303491 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003492 signal_levels |= DP_VOLTAGE_1_2;
3493 break;
3494 }
Chris Wilson3cf2efb2010-11-29 10:09:55 +00003495 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303496 case DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003497 default:
3498 signal_levels |= DP_PRE_EMPHASIS_0;
3499 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303500 case DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003501 signal_levels |= DP_PRE_EMPHASIS_3_5;
3502 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303503 case DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003504 signal_levels |= DP_PRE_EMPHASIS_6;
3505 break;
Sonika Jindalbd600182014-08-08 16:23:41 +05303506 case DP_TRAIN_PRE_EMPH_LEVEL_3:
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003507 signal_levels |= DP_PRE_EMPHASIS_9_5;
3508 break;
3509 }
3510 return signal_levels;
3511}
3512
Zhenyu Wange3421a12010-04-08 09:43:27 +08003513/* Gen6's DP voltage swing and pre-emphasis control */
3514static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003515gen6_edp_signal_levels(uint8_t train_set)
Zhenyu Wange3421a12010-04-08 09:43:27 +08003516{
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003517 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3518 DP_TRAIN_PRE_EMPHASIS_MASK);
3519 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303520 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3521 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003522 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303523 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003524 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303525 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3526 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003527 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303528 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3529 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003530 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
Sonika Jindalbd600182014-08-08 16:23:41 +05303531 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3532 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003533 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003534 default:
Yuanhan Liu3c5a62b2011-01-06 18:26:08 +08003535 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3536 "0x%x\n", signal_levels);
3537 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003538 }
3539}
3540
Keith Packard1a2eb462011-11-16 16:26:07 -08003541/* Gen7's DP voltage swing and pre-emphasis control */
3542static uint32_t
Daniel Vetter5829975c2015-04-16 11:36:52 +02003543gen7_edp_signal_levels(uint8_t train_set)
Keith Packard1a2eb462011-11-16 16:26:07 -08003544{
3545 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3546 DP_TRAIN_PRE_EMPHASIS_MASK);
3547 switch (signal_levels) {
Sonika Jindalbd600182014-08-08 16:23:41 +05303548 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003549 return EDP_LINK_TRAIN_400MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303550 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003551 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303552 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
Keith Packard1a2eb462011-11-16 16:26:07 -08003553 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3554
Sonika Jindalbd600182014-08-08 16:23:41 +05303555 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003556 return EDP_LINK_TRAIN_600MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303557 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003558 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3559
Sonika Jindalbd600182014-08-08 16:23:41 +05303560 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
Keith Packard1a2eb462011-11-16 16:26:07 -08003561 return EDP_LINK_TRAIN_800MV_0DB_IVB;
Sonika Jindalbd600182014-08-08 16:23:41 +05303562 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
Keith Packard1a2eb462011-11-16 16:26:07 -08003563 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3564
3565 default:
3566 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3567 "0x%x\n", signal_levels);
3568 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3569 }
3570}
3571
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003572void
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003573intel_dp_set_signal_levels(struct intel_dp *intel_dp)
Paulo Zanonif0a34242012-12-06 16:51:50 -02003574{
3575 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003576 enum port port = intel_dig_port->port;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003577 struct drm_device *dev = intel_dig_port->base.base.dev;
Ander Conselvan de Oliveirab905a912015-10-23 13:01:47 +03003578 struct drm_i915_private *dev_priv = to_i915(dev);
David Weinehallf8896f52015-06-25 11:11:03 +03003579 uint32_t signal_levels, mask = 0;
Paulo Zanonif0a34242012-12-06 16:51:50 -02003580 uint8_t train_set = intel_dp->train_set[0];
3581
David Weinehallf8896f52015-06-25 11:11:03 +03003582 if (HAS_DDI(dev)) {
3583 signal_levels = ddi_signal_levels(intel_dp);
3584
3585 if (IS_BROXTON(dev))
3586 signal_levels = 0;
3587 else
3588 mask = DDI_BUF_EMP_MASK;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03003589 } else if (IS_CHERRYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003590 signal_levels = chv_signal_levels(intel_dp);
Pallavi Ge2fa6fb2013-04-18 14:44:28 -07003591 } else if (IS_VALLEYVIEW(dev)) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003592 signal_levels = vlv_signal_levels(intel_dp);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003593 } else if (IS_GEN7(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003594 signal_levels = gen7_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003595 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
Imre Deakbc7d38a2013-05-16 14:40:36 +03003596 } else if (IS_GEN6(dev) && port == PORT_A) {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003597 signal_levels = gen6_edp_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003598 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3599 } else {
Daniel Vetter5829975c2015-04-16 11:36:52 +02003600 signal_levels = gen4_signal_levels(train_set);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003601 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3602 }
3603
Vandana Kannan96fb9f92014-11-18 15:45:27 +05303604 if (mask)
3605 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3606
3607 DRM_DEBUG_KMS("Using vswing level %d\n",
3608 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3609 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3610 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3611 DP_TRAIN_PRE_EMPHASIS_SHIFT);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003612
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003613 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
Ander Conselvan de Oliveirab905a912015-10-23 13:01:47 +03003614
3615 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3616 POSTING_READ(intel_dp->output_reg);
Paulo Zanonif0a34242012-12-06 16:51:50 -02003617}
3618
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003619void
Ander Conselvan de Oliveirae9c176d2015-10-23 13:01:45 +03003620intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3621 uint8_t dp_train_pat)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003622{
Paulo Zanoni174edf12012-10-26 19:05:50 -02003623 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03003624 struct drm_i915_private *dev_priv =
3625 to_i915(intel_dig_port->base.base.dev);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003626
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003627 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
Paulo Zanoni47ea7542012-07-17 16:55:16 -03003628
Ander Conselvan de Oliveiraf4eb6922015-10-23 13:01:44 +03003629 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
Chris Wilsonea5b2132010-08-04 13:50:23 +01003630 POSTING_READ(intel_dp->output_reg);
Ander Conselvan de Oliveirae9c176d2015-10-23 13:01:45 +03003631}
3632
Ander Conselvan de Oliveira94223d02015-10-23 13:01:48 +03003633void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
Imre Deak3ab9c632013-05-03 12:57:41 +03003634{
3635 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3636 struct drm_device *dev = intel_dig_port->base.base.dev;
3637 struct drm_i915_private *dev_priv = dev->dev_private;
3638 enum port port = intel_dig_port->port;
3639 uint32_t val;
3640
3641 if (!HAS_DDI(dev))
3642 return;
3643
3644 val = I915_READ(DP_TP_CTL(port));
3645 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3646 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3647 I915_WRITE(DP_TP_CTL(port), val);
3648
3649 /*
3650 * On PORT_A we can have only eDP in SST mode. There the only reason
3651 * we need to set idle transmission mode is to work around a HW issue
3652 * where we enable the pipe while not in idle link-training mode.
3653 * In this case there is requirement to wait for a minimum number of
3654 * idle patterns to be sent.
3655 */
3656 if (port == PORT_A)
3657 return;
3658
3659 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3660 1))
3661 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3662}
3663
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003664static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01003665intel_dp_link_down(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003666{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003667 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003668 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
Imre Deakbc7d38a2013-05-16 14:40:36 +03003669 enum port port = intel_dig_port->port;
Paulo Zanonida63a9f2012-10-26 19:05:46 -02003670 struct drm_device *dev = intel_dig_port->base.base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003671 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsonea5b2132010-08-04 13:50:23 +01003672 uint32_t DP = intel_dp->DP;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003673
Daniel Vetterbc76e3202014-05-20 22:46:50 +02003674 if (WARN_ON(HAS_DDI(dev)))
Paulo Zanonic19b0662012-10-15 15:51:41 -03003675 return;
3676
Daniel Vetter0c33d8d2012-09-06 22:15:43 +02003677 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
Chris Wilson1b39d6f2010-12-06 11:20:45 +00003678 return;
3679
Zhao Yakui28c97732009-10-09 11:39:41 +08003680 DRM_DEBUG_KMS("\n");
Zhenyu Wang32f9d652009-07-24 01:00:32 +08003681
Ville Syrjälä39e5fa82015-05-05 17:17:29 +03003682 if ((IS_GEN7(dev) && port == PORT_A) ||
3683 (HAS_PCH_CPT(dev) && port != PORT_A)) {
Zhenyu Wange3421a12010-04-08 09:43:27 +08003684 DP &= ~DP_LINK_TRAIN_MASK_CPT;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003685 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003686 } else {
Ville Syrjäläaad3d142014-06-28 02:04:25 +03003687 if (IS_CHERRYVIEW(dev))
3688 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3689 else
3690 DP &= ~DP_LINK_TRAIN_MASK;
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003691 DP |= DP_LINK_TRAIN_PAT_IDLE;
Zhenyu Wange3421a12010-04-08 09:43:27 +08003692 }
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003693 I915_WRITE(intel_dp->output_reg, DP);
Chris Wilsonfe255d02010-09-11 21:37:48 +01003694 POSTING_READ(intel_dp->output_reg);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08003695
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003696 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3697 I915_WRITE(intel_dp->output_reg, DP);
3698 POSTING_READ(intel_dp->output_reg);
3699
3700 /*
3701 * HW workaround for IBX, we need to move the port
3702 * to transcoder A after disabling it to allow the
3703 * matching HDMI port to be enabled on transcoder A.
3704 */
3705 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
Ville Syrjälä0c241d52015-10-30 19:23:22 +02003706 /*
3707 * We get CPU/PCH FIFO underruns on the other pipe when
3708 * doing the workaround. Sweep them under the rug.
3709 */
3710 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3711 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3712
Ville Syrjälä1612c8b2015-05-05 17:17:34 +03003713 /* always enable with pattern 1 (as per spec) */
3714 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3715 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3716 I915_WRITE(intel_dp->output_reg, DP);
3717 POSTING_READ(intel_dp->output_reg);
3718
3719 DP &= ~DP_PORT_EN;
Eric Anholt5bddd172010-11-18 09:32:59 +08003720 I915_WRITE(intel_dp->output_reg, DP);
Daniel Vetter0ca09682014-11-24 16:54:11 +01003721 POSTING_READ(intel_dp->output_reg);
Ville Syrjälä0c241d52015-10-30 19:23:22 +02003722
3723 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3724 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3725 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
Eric Anholt5bddd172010-11-18 09:32:59 +08003726 }
3727
Keith Packardf01eca22011-09-28 16:48:10 -07003728 msleep(intel_dp->panel_power_down_delay);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07003729}
3730
Keith Packard26d61aa2011-07-25 20:01:09 -07003731static bool
3732intel_dp_get_dpcd(struct intel_dp *intel_dp)
Keith Packard92fd8fd2011-07-25 19:50:10 -07003733{
Rodrigo Vivia031d702013-10-03 16:15:06 -03003734 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3735 struct drm_device *dev = dig_port->base.base.dev;
3736 struct drm_i915_private *dev_priv = dev->dev_private;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303737 uint8_t rev;
Rodrigo Vivia031d702013-10-03 16:15:06 -03003738
Jani Nikula9d1a1032014-03-14 16:51:15 +02003739 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3740 sizeof(intel_dp->dpcd)) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003741 return false; /* aux transfer failed */
Keith Packard92fd8fd2011-07-25 19:50:10 -07003742
Andy Shevchenkoa8e98152014-09-01 14:12:01 +03003743 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
Damien Lespiau577c7a52012-12-13 16:09:02 +00003744
Adam Jacksonedb39242012-09-18 10:58:49 -04003745 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3746 return false; /* DPCD not present */
3747
Shobhit Kumar2293bb52013-07-11 18:44:56 -03003748 /* Check if the panel supports PSR */
3749 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
Jani Nikula50003932013-09-20 16:42:17 +03003750 if (is_edp(intel_dp)) {
Jani Nikula9d1a1032014-03-14 16:51:15 +02003751 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3752 intel_dp->psr_dpcd,
3753 sizeof(intel_dp->psr_dpcd));
Rodrigo Vivia031d702013-10-03 16:15:06 -03003754 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3755 dev_priv->psr.sink_support = true;
Jani Nikula50003932013-09-20 16:42:17 +03003756 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
Rodrigo Vivia031d702013-10-03 16:15:06 -03003757 }
Sonika Jindal474d1ec2015-04-02 11:02:44 +05303758
3759 if (INTEL_INFO(dev)->gen >= 9 &&
3760 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3761 uint8_t frame_sync_cap;
3762
3763 dev_priv->psr.sink_support = true;
3764 intel_dp_dpcd_read_wake(&intel_dp->aux,
3765 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3766 &frame_sync_cap, 1);
3767 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3768 /* PSR2 needs frame sync as well */
3769 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3770 DRM_DEBUG_KMS("PSR2 %s on sink",
3771 dev_priv->psr.psr2_support ? "supported" : "not supported");
3772 }
Jani Nikula50003932013-09-20 16:42:17 +03003773 }
3774
Jani Nikulabc5133d2015-09-03 11:16:07 +03003775 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
Ander Conselvan de Oliveirae588fa12015-10-23 13:01:50 +03003776 yesno(intel_dp_source_supports_hbr2(intel_dp)),
Jani Nikula742f4912015-09-03 11:16:09 +03003777 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
Todd Previte06ea66b2014-01-20 10:19:39 -07003778
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303779 /* Intermediate frequency support */
3780 if (is_edp(intel_dp) &&
3781 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3782 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3783 (rev >= 0x03)) { /* eDp v1.4 or higher */
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003784 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003785 int i;
3786
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303787 intel_dp_dpcd_read_wake(&intel_dp->aux,
3788 DP_SUPPORTED_LINK_RATES,
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003789 sink_rates,
3790 sizeof(sink_rates));
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003791
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003792 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3793 int val = le16_to_cpu(sink_rates[i]);
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003794
3795 if (val == 0)
3796 break;
3797
Sonika Jindalaf77b972015-05-07 13:59:28 +05303798 /* Value read is in kHz while drm clock is saved in deca-kHz */
3799 intel_dp->sink_rates[i] = (val * 200) / 10;
Ville Syrjäläea2d8a42015-03-12 17:10:28 +02003800 }
Ville Syrjälä94ca7192015-03-13 19:40:31 +02003801 intel_dp->num_sink_rates = i;
Sonika Jindalfc0f8e22015-03-05 10:03:58 +05303802 }
Ville Syrjälä0336400e2015-03-12 17:10:39 +02003803
3804 intel_dp_print_rates(intel_dp);
3805
Adam Jacksonedb39242012-09-18 10:58:49 -04003806 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3807 DP_DWN_STRM_PORT_PRESENT))
3808 return true; /* native DP sink */
3809
3810 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3811 return true; /* no per-port downstream info */
3812
Jani Nikula9d1a1032014-03-14 16:51:15 +02003813 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3814 intel_dp->downstream_ports,
3815 DP_MAX_DOWNSTREAM_PORTS) < 0)
Adam Jacksonedb39242012-09-18 10:58:49 -04003816 return false; /* downstream port status fetch failed */
3817
3818 return true;
Keith Packard92fd8fd2011-07-25 19:50:10 -07003819}
3820
Adam Jackson0d198322012-05-14 16:05:47 -04003821static void
3822intel_dp_probe_oui(struct intel_dp *intel_dp)
3823{
3824 u8 buf[3];
3825
3826 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3827 return;
3828
Jani Nikula9d1a1032014-03-14 16:51:15 +02003829 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04003830 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3831 buf[0], buf[1], buf[2]);
3832
Jani Nikula9d1a1032014-03-14 16:51:15 +02003833 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
Adam Jackson0d198322012-05-14 16:05:47 -04003834 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3835 buf[0], buf[1], buf[2]);
3836}
3837
Dave Airlie0e32b392014-05-02 14:02:48 +10003838static bool
3839intel_dp_probe_mst(struct intel_dp *intel_dp)
3840{
3841 u8 buf[1];
3842
3843 if (!intel_dp->can_mst)
3844 return false;
3845
3846 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3847 return false;
3848
Dave Airlie0e32b392014-05-02 14:02:48 +10003849 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3850 if (buf[0] & DP_MST_CAP) {
3851 DRM_DEBUG_KMS("Sink is MST capable\n");
3852 intel_dp->is_mst = true;
3853 } else {
3854 DRM_DEBUG_KMS("Sink is not MST capable\n");
3855 intel_dp->is_mst = false;
3856 }
3857 }
Dave Airlie0e32b392014-05-02 14:02:48 +10003858
3859 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3860 return intel_dp->is_mst;
3861}
3862
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003863static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003864{
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003865 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3866 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
Rodrigo Viviad9dc912014-09-16 19:18:12 -04003867 u8 buf;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003868 int ret = 0;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003869
3870 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003871 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003872 ret = -EIO;
3873 goto out;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003874 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003875
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003876 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003877 buf & ~DP_TEST_SINK_START) < 0) {
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003878 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003879 ret = -EIO;
3880 goto out;
3881 }
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003882
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003883 intel_dp->sink_crc.started = false;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003884 out:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003885 hsw_enable_ips(intel_crtc);
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003886 return ret;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003887}
3888
3889static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3890{
3891 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3892 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3893 u8 buf;
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003894 int ret;
3895
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003896 if (intel_dp->sink_crc.started) {
Rodrigo Vivie5a1cab2015-07-23 16:35:48 -07003897 ret = intel_dp_sink_crc_stop(intel_dp);
3898 if (ret)
3899 return ret;
3900 }
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003901
3902 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3903 return -EIO;
3904
3905 if (!(buf & DP_TEST_CRC_SUPPORTED))
3906 return -ENOTTY;
3907
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003908 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
3909
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003910 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3911 return -EIO;
3912
3913 hsw_disable_ips(intel_crtc);
3914
3915 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3916 buf | DP_TEST_SINK_START) < 0) {
3917 hsw_enable_ips(intel_crtc);
3918 return -EIO;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003919 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003920
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003921 intel_dp->sink_crc.started = true;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003922 return 0;
3923}
3924
3925int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3926{
3927 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3928 struct drm_device *dev = dig_port->base.base.dev;
3929 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3930 u8 buf;
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003931 int count, ret;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003932 int attempts = 6;
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07003933 bool old_equal_new;
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003934
3935 ret = intel_dp_sink_crc_start(intel_dp);
3936 if (ret)
3937 return ret;
3938
Rodrigo Viviad9dc912014-09-16 19:18:12 -04003939 do {
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003940 intel_wait_for_vblank(dev, intel_crtc->pipe);
3941
Rodrigo Vivi1dda5f92014-10-01 07:32:37 -07003942 if (drm_dp_dpcd_readb(&intel_dp->aux,
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003943 DP_TEST_SINK_MISC, &buf) < 0) {
3944 ret = -EIO;
Rodrigo Viviafe0d672015-07-23 16:35:45 -07003945 goto stop;
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003946 }
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003947 count = buf & DP_TEST_COUNT_MASK;
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07003948
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003949 /*
3950 * Count might be reset during the loop. In this case
3951 * last known count needs to be reset as well.
3952 */
3953 if (count == 0)
3954 intel_dp->sink_crc.last_count = 0;
3955
3956 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3957 ret = -EIO;
3958 goto stop;
3959 }
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07003960
3961 old_equal_new = (count == intel_dp->sink_crc.last_count &&
3962 !memcmp(intel_dp->sink_crc.last_crc, crc,
3963 6 * sizeof(u8)));
3964
3965 } while (--attempts && (count == 0 || old_equal_new));
Rodrigo Vivi621d4c72015-07-23 16:35:49 -07003966
3967 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
3968 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
Rodrigo Viviad9dc912014-09-16 19:18:12 -04003969
3970 if (attempts == 0) {
Rodrigo Viviaabc95d2015-07-23 16:35:50 -07003971 if (old_equal_new) {
3972 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
3973 } else {
3974 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3975 ret = -ETIMEDOUT;
3976 goto stop;
3977 }
Rodrigo Viviad9dc912014-09-16 19:18:12 -04003978 }
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003979
Rodrigo Viviafe0d672015-07-23 16:35:45 -07003980stop:
Rodrigo Vivi082dcc72015-07-30 16:26:39 -07003981 intel_dp_sink_crc_stop(intel_dp);
Paulo Zanoni4373f0f2015-05-25 18:52:29 -03003982 return ret;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02003983}
3984
Jesse Barnesa60f0e32011-10-20 15:09:17 -07003985static bool
3986intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3987{
Jani Nikula9d1a1032014-03-14 16:51:15 +02003988 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3989 DP_DEVICE_SERVICE_IRQ_VECTOR,
3990 sink_irq_vector, 1) == 1;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07003991}
3992
Dave Airlie0e32b392014-05-02 14:02:48 +10003993static bool
3994intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3995{
3996 int ret;
3997
3998 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3999 DP_SINK_COUNT_ESI,
4000 sink_irq_vector, 14);
4001 if (ret != 14)
4002 return false;
4003
4004 return true;
4005}
4006
Todd Previtec5d5ab72015-04-15 08:38:38 -07004007static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004008{
Todd Previtec5d5ab72015-04-15 08:38:38 -07004009 uint8_t test_result = DP_TEST_ACK;
4010 return test_result;
4011}
4012
4013static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4014{
4015 uint8_t test_result = DP_TEST_NAK;
4016 return test_result;
4017}
4018
4019static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4020{
4021 uint8_t test_result = DP_TEST_NAK;
Todd Previte559be302015-05-04 07:48:20 -07004022 struct intel_connector *intel_connector = intel_dp->attached_connector;
4023 struct drm_connector *connector = &intel_connector->base;
4024
4025 if (intel_connector->detect_edid == NULL ||
Daniel Vetterac6f2e22015-05-08 16:15:41 +02004026 connector->edid_corrupt ||
Todd Previte559be302015-05-04 07:48:20 -07004027 intel_dp->aux.i2c_defer_count > 6) {
4028 /* Check EDID read for NACKs, DEFERs and corruption
4029 * (DP CTS 1.2 Core r1.1)
4030 * 4.2.2.4 : Failed EDID read, I2C_NAK
4031 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4032 * 4.2.2.6 : EDID corruption detected
4033 * Use failsafe mode for all cases
4034 */
4035 if (intel_dp->aux.i2c_nack_count > 0 ||
4036 intel_dp->aux.i2c_defer_count > 0)
4037 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4038 intel_dp->aux.i2c_nack_count,
4039 intel_dp->aux.i2c_defer_count);
4040 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4041 } else {
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304042 struct edid *block = intel_connector->detect_edid;
4043
4044 /* We have to write the checksum
4045 * of the last block read
4046 */
4047 block += intel_connector->detect_edid->extensions;
4048
Todd Previte559be302015-05-04 07:48:20 -07004049 if (!drm_dp_dpcd_write(&intel_dp->aux,
4050 DP_TEST_EDID_CHECKSUM,
Thulasimani,Sivakumarf79b468e2015-08-07 15:14:30 +05304051 &block->checksum,
Dan Carpenter5a1cc652015-05-12 21:07:37 +03004052 1))
Todd Previte559be302015-05-04 07:48:20 -07004053 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4054
4055 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4056 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4057 }
4058
4059 /* Set test active flag here so userspace doesn't interrupt things */
4060 intel_dp->compliance_test_active = 1;
4061
Todd Previtec5d5ab72015-04-15 08:38:38 -07004062 return test_result;
4063}
4064
4065static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4066{
4067 uint8_t test_result = DP_TEST_NAK;
4068 return test_result;
4069}
4070
4071static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4072{
4073 uint8_t response = DP_TEST_NAK;
4074 uint8_t rxdata = 0;
4075 int status = 0;
4076
Todd Previte559be302015-05-04 07:48:20 -07004077 intel_dp->compliance_test_active = 0;
Todd Previtec5d5ab72015-04-15 08:38:38 -07004078 intel_dp->compliance_test_type = 0;
Todd Previte559be302015-05-04 07:48:20 -07004079 intel_dp->compliance_test_data = 0;
4080
Todd Previtec5d5ab72015-04-15 08:38:38 -07004081 intel_dp->aux.i2c_nack_count = 0;
4082 intel_dp->aux.i2c_defer_count = 0;
4083
4084 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4085 if (status <= 0) {
4086 DRM_DEBUG_KMS("Could not read test request from sink\n");
4087 goto update_status;
4088 }
4089
4090 switch (rxdata) {
4091 case DP_TEST_LINK_TRAINING:
4092 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4093 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4094 response = intel_dp_autotest_link_training(intel_dp);
4095 break;
4096 case DP_TEST_LINK_VIDEO_PATTERN:
4097 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4098 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4099 response = intel_dp_autotest_video_pattern(intel_dp);
4100 break;
4101 case DP_TEST_LINK_EDID_READ:
4102 DRM_DEBUG_KMS("EDID test requested\n");
4103 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4104 response = intel_dp_autotest_edid(intel_dp);
4105 break;
4106 case DP_TEST_LINK_PHY_TEST_PATTERN:
4107 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4108 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4109 response = intel_dp_autotest_phy_pattern(intel_dp);
4110 break;
4111 default:
4112 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4113 break;
4114 }
4115
4116update_status:
4117 status = drm_dp_dpcd_write(&intel_dp->aux,
4118 DP_TEST_RESPONSE,
4119 &response, 1);
4120 if (status <= 0)
4121 DRM_DEBUG_KMS("Could not write test response to sink\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004122}
4123
Dave Airlie0e32b392014-05-02 14:02:48 +10004124static int
4125intel_dp_check_mst_status(struct intel_dp *intel_dp)
4126{
4127 bool bret;
4128
4129 if (intel_dp->is_mst) {
4130 u8 esi[16] = { 0 };
4131 int ret = 0;
4132 int retry;
4133 bool handled;
4134 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4135go_again:
4136 if (bret == true) {
4137
4138 /* check link status - esi[10] = 0x200c */
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +03004139 if (intel_dp->active_mst_links &&
Ville Syrjälä901c2da2015-08-17 18:05:12 +03004140 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
Dave Airlie0e32b392014-05-02 14:02:48 +10004141 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4142 intel_dp_start_link_train(intel_dp);
Dave Airlie0e32b392014-05-02 14:02:48 +10004143 intel_dp_stop_link_train(intel_dp);
4144 }
4145
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004146 DRM_DEBUG_KMS("got esi %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004147 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4148
4149 if (handled) {
4150 for (retry = 0; retry < 3; retry++) {
4151 int wret;
4152 wret = drm_dp_dpcd_write(&intel_dp->aux,
4153 DP_SINK_COUNT_ESI+1,
4154 &esi[1], 3);
4155 if (wret == 3) {
4156 break;
4157 }
4158 }
4159
4160 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4161 if (bret == true) {
Andy Shevchenko6f34cc32015-01-15 13:45:09 +02004162 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
Dave Airlie0e32b392014-05-02 14:02:48 +10004163 goto go_again;
4164 }
4165 } else
4166 ret = 0;
4167
4168 return ret;
4169 } else {
4170 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4171 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4172 intel_dp->is_mst = false;
4173 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4174 /* send a hotplug event */
4175 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4176 }
4177 }
4178 return -EINVAL;
4179}
4180
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004181/*
4182 * According to DP spec
4183 * 5.1.2:
4184 * 1. Read DPCD
4185 * 2. Configure link according to Receiver Capabilities
4186 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4187 * 4. Check link status on receipt of hot-plug interrupt
4188 */
Damien Lespiaua5146202015-02-10 19:32:22 +00004189static void
Chris Wilsonea5b2132010-08-04 13:50:23 +01004190intel_dp_check_link_status(struct intel_dp *intel_dp)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004191{
Dave Airlie5b215bc2014-08-05 10:40:20 +10004192 struct drm_device *dev = intel_dp_to_dev(intel_dp);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004193 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004194 u8 sink_irq_vector;
Keith Packard93f62da2011-11-01 19:45:03 -07004195 u8 link_status[DP_LINK_STATUS_SIZE];
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004196
Dave Airlie5b215bc2014-08-05 10:40:20 +10004197 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4198
Maarten Lankhorste02f9a02015-08-05 12:37:08 +02004199 if (!intel_encoder->base.crtc)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004200 return;
4201
Imre Deak1a125d82014-08-18 14:42:46 +03004202 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4203 return;
4204
Keith Packard92fd8fd2011-07-25 19:50:10 -07004205 /* Try to read receiver status if the link appears to be up */
Keith Packard93f62da2011-11-01 19:45:03 -07004206 if (!intel_dp_get_link_status(intel_dp, link_status)) {
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004207 return;
4208 }
4209
Keith Packard92fd8fd2011-07-25 19:50:10 -07004210 /* Now read the DPCD to see if it's actually running */
Keith Packard26d61aa2011-07-25 20:01:09 -07004211 if (!intel_dp_get_dpcd(intel_dp)) {
Jesse Barnes59cd09e2011-07-07 11:10:59 -07004212 return;
4213 }
4214
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004215 /* Try to read the source of the interrupt */
4216 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4217 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4218 /* Clear interrupt source */
Jani Nikula9d1a1032014-03-14 16:51:15 +02004219 drm_dp_dpcd_writeb(&intel_dp->aux,
4220 DP_DEVICE_SERVICE_IRQ_VECTOR,
4221 sink_irq_vector);
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004222
4223 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
Todd Previte09b1eb12015-04-20 15:27:34 -07004224 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
Jesse Barnesa60f0e32011-10-20 15:09:17 -07004225 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4226 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4227 }
4228
Ville Syrjälä901c2da2015-08-17 18:05:12 +03004229 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
Keith Packard92fd8fd2011-07-25 19:50:10 -07004230 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
Jani Nikula8e329a02014-06-03 14:56:21 +03004231 intel_encoder->base.name);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004232 intel_dp_start_link_train(intel_dp);
Imre Deak3ab9c632013-05-03 12:57:41 +03004233 intel_dp_stop_link_train(intel_dp);
Jesse Barnes33a34e42010-09-08 12:42:02 -07004234 }
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004235}
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004236
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004237/* XXX this is probably wrong for multiple downstream ports */
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004238static enum drm_connector_status
Keith Packard26d61aa2011-07-25 20:01:09 -07004239intel_dp_detect_dpcd(struct intel_dp *intel_dp)
Adam Jackson71ba90002011-07-12 17:38:04 -04004240{
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004241 uint8_t *dpcd = intel_dp->dpcd;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004242 uint8_t type;
4243
4244 if (!intel_dp_get_dpcd(intel_dp))
4245 return connector_status_disconnected;
4246
4247 /* if there's no downstream port, we're done */
4248 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
Keith Packard26d61aa2011-07-25 20:01:09 -07004249 return connector_status_connected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004250
4251 /* If we're HPD-aware, SINK_COUNT changes dynamically */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004252 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4253 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
Adam Jackson23235172012-09-20 16:42:45 -04004254 uint8_t reg;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004255
4256 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4257 &reg, 1) < 0)
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004258 return connector_status_unknown;
Jani Nikula9d1a1032014-03-14 16:51:15 +02004259
Adam Jackson23235172012-09-20 16:42:45 -04004260 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4261 : connector_status_disconnected;
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004262 }
4263
4264 /* If no HPD, poke DDC gently */
Jani Nikula0b998362014-03-14 16:51:17 +02004265 if (drm_probe_ddc(&intel_dp->aux.ddc))
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004266 return connector_status_connected;
4267
4268 /* Well we tried, say unknown for unreliable port types */
Jani Nikulac9ff1602013-09-27 14:48:42 +03004269 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4270 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4271 if (type == DP_DS_PORT_TYPE_VGA ||
4272 type == DP_DS_PORT_TYPE_NON_EDID)
4273 return connector_status_unknown;
4274 } else {
4275 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4276 DP_DWN_STRM_PORT_TYPE_MASK;
4277 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4278 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4279 return connector_status_unknown;
4280 }
Adam Jacksoncaf9ab22012-09-18 10:58:50 -04004281
4282 /* Anything else is out of spec, warn and ignore */
4283 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
Keith Packard26d61aa2011-07-25 20:01:09 -07004284 return connector_status_disconnected;
Adam Jackson71ba90002011-07-12 17:38:04 -04004285}
4286
4287static enum drm_connector_status
Chris Wilsond410b562014-09-02 20:03:59 +01004288edp_detect(struct intel_dp *intel_dp)
4289{
4290 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4291 enum drm_connector_status status;
4292
4293 status = intel_panel_detect(dev);
4294 if (status == connector_status_unknown)
4295 status = connector_status_connected;
4296
4297 return status;
4298}
4299
Jani Nikulab93433c2015-08-20 10:47:36 +03004300static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4301 struct intel_digital_port *port)
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004302{
Jani Nikulab93433c2015-08-20 10:47:36 +03004303 u32 bit;
Jesse Barnes01cb9ea2010-10-07 16:01:12 -07004304
Jani Nikula0df53b72015-08-20 10:47:40 +03004305 switch (port->port) {
4306 case PORT_A:
4307 return true;
4308 case PORT_B:
4309 bit = SDE_PORTB_HOTPLUG;
4310 break;
4311 case PORT_C:
4312 bit = SDE_PORTC_HOTPLUG;
4313 break;
4314 case PORT_D:
4315 bit = SDE_PORTD_HOTPLUG;
4316 break;
4317 default:
4318 MISSING_CASE(port->port);
4319 return false;
4320 }
4321
4322 return I915_READ(SDEISR) & bit;
4323}
4324
4325static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4326 struct intel_digital_port *port)
4327{
4328 u32 bit;
4329
4330 switch (port->port) {
4331 case PORT_A:
4332 return true;
4333 case PORT_B:
4334 bit = SDE_PORTB_HOTPLUG_CPT;
4335 break;
4336 case PORT_C:
4337 bit = SDE_PORTC_HOTPLUG_CPT;
4338 break;
4339 case PORT_D:
4340 bit = SDE_PORTD_HOTPLUG_CPT;
4341 break;
Jani Nikulaa78695d2015-09-18 15:54:50 +03004342 case PORT_E:
4343 bit = SDE_PORTE_HOTPLUG_SPT;
4344 break;
Jani Nikula0df53b72015-08-20 10:47:40 +03004345 default:
4346 MISSING_CASE(port->port);
4347 return false;
Jani Nikulab93433c2015-08-20 10:47:36 +03004348 }
Damien Lespiau1b469632012-12-13 16:09:01 +00004349
Jani Nikulab93433c2015-08-20 10:47:36 +03004350 return I915_READ(SDEISR) & bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004351}
4352
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004353static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
Jani Nikula1d245982015-08-20 10:47:37 +03004354 struct intel_digital_port *port)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004355{
Jani Nikula9642c812015-08-20 10:47:41 +03004356 u32 bit;
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08004357
Jani Nikula9642c812015-08-20 10:47:41 +03004358 switch (port->port) {
4359 case PORT_B:
4360 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4361 break;
4362 case PORT_C:
4363 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4364 break;
4365 case PORT_D:
4366 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4367 break;
4368 default:
4369 MISSING_CASE(port->port);
4370 return false;
4371 }
4372
4373 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4374}
4375
4376static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4377 struct intel_digital_port *port)
4378{
4379 u32 bit;
4380
4381 switch (port->port) {
4382 case PORT_B:
4383 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4384 break;
4385 case PORT_C:
4386 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4387 break;
4388 case PORT_D:
4389 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4390 break;
4391 default:
4392 MISSING_CASE(port->port);
4393 return false;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004394 }
4395
Jani Nikula1d245982015-08-20 10:47:37 +03004396 return I915_READ(PORT_HOTPLUG_STAT) & bit;
Dave Airlie2a592be2014-09-01 16:58:12 +10004397}
4398
Jani Nikulae464bfd2015-08-20 10:47:42 +03004399static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304400 struct intel_digital_port *intel_dig_port)
Jani Nikulae464bfd2015-08-20 10:47:42 +03004401{
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304402 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4403 enum port port;
Jani Nikulae464bfd2015-08-20 10:47:42 +03004404 u32 bit;
4405
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304406 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4407 switch (port) {
Jani Nikulae464bfd2015-08-20 10:47:42 +03004408 case PORT_A:
4409 bit = BXT_DE_PORT_HP_DDIA;
4410 break;
4411 case PORT_B:
4412 bit = BXT_DE_PORT_HP_DDIB;
4413 break;
4414 case PORT_C:
4415 bit = BXT_DE_PORT_HP_DDIC;
4416 break;
4417 default:
Sonika Jindale2ec35a2015-09-11 16:58:32 +05304418 MISSING_CASE(port);
Jani Nikulae464bfd2015-08-20 10:47:42 +03004419 return false;
4420 }
4421
4422 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4423}
4424
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004425/*
4426 * intel_digital_port_connected - is the specified port connected?
4427 * @dev_priv: i915 private structure
4428 * @port: the port to test
4429 *
4430 * Return %true if @port is connected, %false otherwise.
4431 */
Sonika Jindal237ed862015-09-15 09:44:20 +05304432bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004433 struct intel_digital_port *port)
4434{
Jani Nikula0df53b72015-08-20 10:47:40 +03004435 if (HAS_PCH_IBX(dev_priv))
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004436 return ibx_digital_port_connected(dev_priv, port);
Jani Nikula0df53b72015-08-20 10:47:40 +03004437 if (HAS_PCH_SPLIT(dev_priv))
4438 return cpt_digital_port_connected(dev_priv, port);
Jani Nikulae464bfd2015-08-20 10:47:42 +03004439 else if (IS_BROXTON(dev_priv))
4440 return bxt_digital_port_connected(dev_priv, port);
Jani Nikula9642c812015-08-20 10:47:41 +03004441 else if (IS_VALLEYVIEW(dev_priv))
4442 return vlv_digital_port_connected(dev_priv, port);
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004443 else
4444 return g4x_digital_port_connected(dev_priv, port);
4445}
4446
Dave Airlie2a592be2014-09-01 16:58:12 +10004447static enum drm_connector_status
Jani Nikulab93433c2015-08-20 10:47:36 +03004448ironlake_dp_detect(struct intel_dp *intel_dp)
4449{
4450 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4451 struct drm_i915_private *dev_priv = dev->dev_private;
4452 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4453
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004454 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
Jani Nikulab93433c2015-08-20 10:47:36 +03004455 return connector_status_disconnected;
4456
4457 return intel_dp_detect_dpcd(intel_dp);
4458}
4459
4460static enum drm_connector_status
Dave Airlie2a592be2014-09-01 16:58:12 +10004461g4x_dp_detect(struct intel_dp *intel_dp)
4462{
4463 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4464 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Dave Airlie2a592be2014-09-01 16:58:12 +10004465
4466 /* Can't disconnect eDP, but you can close the lid... */
4467 if (is_edp(intel_dp)) {
4468 enum drm_connector_status status;
4469
4470 status = intel_panel_detect(dev);
4471 if (status == connector_status_unknown)
4472 status = connector_status_connected;
4473 return status;
4474 }
4475
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004476 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004477 return connector_status_disconnected;
4478
Keith Packard26d61aa2011-07-25 20:01:09 -07004479 return intel_dp_detect_dpcd(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004480}
4481
Keith Packard8c241fe2011-09-28 16:38:44 -07004482static struct edid *
Chris Wilsonbeb60602014-09-02 20:04:00 +01004483intel_dp_get_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004484{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004485 struct intel_connector *intel_connector = intel_dp->attached_connector;
Keith Packard8c241fe2011-09-28 16:38:44 -07004486
Jani Nikula9cd300e2012-10-19 14:51:52 +03004487 /* use cached edid if we have one */
4488 if (intel_connector->edid) {
Jani Nikula9cd300e2012-10-19 14:51:52 +03004489 /* invalid edid */
4490 if (IS_ERR(intel_connector->edid))
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004491 return NULL;
4492
Jani Nikula55e9ede2013-10-01 10:38:54 +03004493 return drm_edid_duplicate(intel_connector->edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004494 } else
4495 return drm_get_edid(&intel_connector->base,
4496 &intel_dp->aux.ddc);
Keith Packard8c241fe2011-09-28 16:38:44 -07004497}
4498
Chris Wilsonbeb60602014-09-02 20:04:00 +01004499static void
4500intel_dp_set_edid(struct intel_dp *intel_dp)
Keith Packard8c241fe2011-09-28 16:38:44 -07004501{
Chris Wilsonbeb60602014-09-02 20:04:00 +01004502 struct intel_connector *intel_connector = intel_dp->attached_connector;
4503 struct edid *edid;
Keith Packard8c241fe2011-09-28 16:38:44 -07004504
Chris Wilsonbeb60602014-09-02 20:04:00 +01004505 edid = intel_dp_get_edid(intel_dp);
4506 intel_connector->detect_edid = edid;
Jani Nikula9cd300e2012-10-19 14:51:52 +03004507
Chris Wilsonbeb60602014-09-02 20:04:00 +01004508 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4509 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4510 else
4511 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4512}
Jesse Barnesd6f24d02012-06-14 15:28:33 -04004513
Chris Wilsonbeb60602014-09-02 20:04:00 +01004514static void
4515intel_dp_unset_edid(struct intel_dp *intel_dp)
4516{
4517 struct intel_connector *intel_connector = intel_dp->attached_connector;
4518
4519 kfree(intel_connector->detect_edid);
4520 intel_connector->detect_edid = NULL;
4521
4522 intel_dp->has_audio = false;
4523}
4524
4525static enum intel_display_power_domain
4526intel_dp_power_get(struct intel_dp *dp)
4527{
4528 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4529 enum intel_display_power_domain power_domain;
4530
4531 power_domain = intel_display_port_power_domain(encoder);
4532 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4533
4534 return power_domain;
4535}
4536
4537static void
4538intel_dp_power_put(struct intel_dp *dp,
4539 enum intel_display_power_domain power_domain)
4540{
4541 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4542 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
Keith Packard8c241fe2011-09-28 16:38:44 -07004543}
4544
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004545static enum drm_connector_status
4546intel_dp_detect(struct drm_connector *connector, bool force)
4547{
4548 struct intel_dp *intel_dp = intel_attached_dp(connector);
Paulo Zanonid63885d2012-10-26 19:05:49 -02004549 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4550 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004551 struct drm_device *dev = connector->dev;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004552 enum drm_connector_status status;
Imre Deak671dedd2014-03-05 16:20:53 +02004553 enum intel_display_power_domain power_domain;
Dave Airlie0e32b392014-05-02 14:02:48 +10004554 bool ret;
Todd Previte09b1eb12015-04-20 15:27:34 -07004555 u8 sink_irq_vector;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004556
Chris Wilson164c8592013-07-20 20:27:08 +01004557 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03004558 connector->base.id, connector->name);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004559 intel_dp_unset_edid(intel_dp);
Chris Wilson164c8592013-07-20 20:27:08 +01004560
Dave Airlie0e32b392014-05-02 14:02:48 +10004561 if (intel_dp->is_mst) {
4562 /* MST devices are disconnected from a monitor POV */
4563 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4564 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004565 return connector_status_disconnected;
Dave Airlie0e32b392014-05-02 14:02:48 +10004566 }
4567
Chris Wilsonbeb60602014-09-02 20:04:00 +01004568 power_domain = intel_dp_power_get(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004569
Chris Wilsond410b562014-09-02 20:03:59 +01004570 /* Can't disconnect eDP, but you can close the lid... */
4571 if (is_edp(intel_dp))
4572 status = edp_detect(intel_dp);
4573 else if (HAS_PCH_SPLIT(dev))
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004574 status = ironlake_dp_detect(intel_dp);
4575 else
4576 status = g4x_dp_detect(intel_dp);
4577 if (status != connector_status_connected)
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004578 goto out;
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004579
Adam Jackson0d198322012-05-14 16:05:47 -04004580 intel_dp_probe_oui(intel_dp);
4581
Dave Airlie0e32b392014-05-02 14:02:48 +10004582 ret = intel_dp_probe_mst(intel_dp);
4583 if (ret) {
4584 /* if we are in MST mode then this connector
4585 won't appear connected or have anything with EDID on it */
4586 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4587 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4588 status = connector_status_disconnected;
4589 goto out;
4590 }
4591
Chris Wilsonbeb60602014-09-02 20:04:00 +01004592 intel_dp_set_edid(intel_dp);
Zhenyu Wanga9756bb2010-09-19 13:09:06 +08004593
Paulo Zanonid63885d2012-10-26 19:05:49 -02004594 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4595 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004596 status = connector_status_connected;
4597
Todd Previte09b1eb12015-04-20 15:27:34 -07004598 /* Try to read the source of the interrupt */
4599 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4600 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4601 /* Clear interrupt source */
4602 drm_dp_dpcd_writeb(&intel_dp->aux,
4603 DP_DEVICE_SERVICE_IRQ_VECTOR,
4604 sink_irq_vector);
4605
4606 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4607 intel_dp_handle_test_request(intel_dp);
4608 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4609 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4610 }
4611
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004612out:
Chris Wilsonbeb60602014-09-02 20:04:00 +01004613 intel_dp_power_put(intel_dp, power_domain);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004614 return status;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004615}
4616
Chris Wilsonbeb60602014-09-02 20:04:00 +01004617static void
4618intel_dp_force(struct drm_connector *connector)
4619{
4620 struct intel_dp *intel_dp = intel_attached_dp(connector);
4621 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4622 enum intel_display_power_domain power_domain;
4623
4624 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4625 connector->base.id, connector->name);
4626 intel_dp_unset_edid(intel_dp);
4627
4628 if (connector->status != connector_status_connected)
4629 return;
4630
4631 power_domain = intel_dp_power_get(intel_dp);
4632
4633 intel_dp_set_edid(intel_dp);
4634
4635 intel_dp_power_put(intel_dp, power_domain);
4636
4637 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4638 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4639}
4640
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004641static int intel_dp_get_modes(struct drm_connector *connector)
4642{
Jani Nikuladd06f902012-10-19 14:51:50 +03004643 struct intel_connector *intel_connector = to_intel_connector(connector);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004644 struct edid *edid;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004645
Chris Wilsonbeb60602014-09-02 20:04:00 +01004646 edid = intel_connector->detect_edid;
4647 if (edid) {
4648 int ret = intel_connector_update_modes(connector, edid);
4649 if (ret)
4650 return ret;
4651 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004652
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004653 /* if eDP has no EDID, fall back to fixed mode */
Chris Wilsonbeb60602014-09-02 20:04:00 +01004654 if (is_edp(intel_attached_dp(connector)) &&
4655 intel_connector->panel.fixed_mode) {
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004656 struct drm_display_mode *mode;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004657
4658 mode = drm_mode_duplicate(connector->dev,
Jani Nikuladd06f902012-10-19 14:51:50 +03004659 intel_connector->panel.fixed_mode);
Jani Nikulaf8779fd2012-10-19 14:51:48 +03004660 if (mode) {
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004661 drm_mode_probed_add(connector, mode);
4662 return 1;
4663 }
4664 }
Chris Wilsonbeb60602014-09-02 20:04:00 +01004665
Zhenyu Wang32f9d652009-07-24 01:00:32 +08004666 return 0;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004667}
4668
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004669static bool
4670intel_dp_detect_audio(struct drm_connector *connector)
4671{
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004672 bool has_audio = false;
Chris Wilsonbeb60602014-09-02 20:04:00 +01004673 struct edid *edid;
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004674
Chris Wilsonbeb60602014-09-02 20:04:00 +01004675 edid = to_intel_connector(connector)->detect_edid;
4676 if (edid)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004677 has_audio = drm_detect_monitor_audio(edid);
Imre Deak671dedd2014-03-05 16:20:53 +02004678
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004679 return has_audio;
4680}
4681
Chris Wilsonf6849602010-09-19 09:29:33 +01004682static int
4683intel_dp_set_property(struct drm_connector *connector,
4684 struct drm_property *property,
4685 uint64_t val)
4686{
Chris Wilsone953fd72011-02-21 22:23:52 +00004687 struct drm_i915_private *dev_priv = connector->dev->dev_private;
Yuly Novikov53b41832012-10-26 12:04:00 +03004688 struct intel_connector *intel_connector = to_intel_connector(connector);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004689 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4690 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonf6849602010-09-19 09:29:33 +01004691 int ret;
4692
Rob Clark662595d2012-10-11 20:36:04 -05004693 ret = drm_object_property_set_value(&connector->base, property, val);
Chris Wilsonf6849602010-09-19 09:29:33 +01004694 if (ret)
4695 return ret;
4696
Chris Wilson3f43c482011-05-12 22:17:24 +01004697 if (property == dev_priv->force_audio_property) {
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004698 int i = val;
4699 bool has_audio;
4700
4701 if (i == intel_dp->force_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004702 return 0;
4703
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004704 intel_dp->force_audio = i;
Chris Wilsonf6849602010-09-19 09:29:33 +01004705
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004706 if (i == HDMI_AUDIO_AUTO)
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004707 has_audio = intel_dp_detect_audio(connector);
4708 else
Daniel Vetterc3e5f672012-02-23 17:14:47 +01004709 has_audio = (i == HDMI_AUDIO_ON);
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004710
4711 if (has_audio == intel_dp->has_audio)
Chris Wilsonf6849602010-09-19 09:29:33 +01004712 return 0;
4713
Chris Wilson1aad7ac2011-02-09 18:46:58 +00004714 intel_dp->has_audio = has_audio;
Chris Wilsonf6849602010-09-19 09:29:33 +01004715 goto done;
4716 }
4717
Chris Wilsone953fd72011-02-21 22:23:52 +00004718 if (property == dev_priv->broadcast_rgb_property) {
Daniel Vetterae4edb82013-04-22 17:07:23 +02004719 bool old_auto = intel_dp->color_range_auto;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004720 bool old_range = intel_dp->limited_color_range;
Daniel Vetterae4edb82013-04-22 17:07:23 +02004721
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004722 switch (val) {
4723 case INTEL_BROADCAST_RGB_AUTO:
4724 intel_dp->color_range_auto = true;
4725 break;
4726 case INTEL_BROADCAST_RGB_FULL:
4727 intel_dp->color_range_auto = false;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004728 intel_dp->limited_color_range = false;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004729 break;
4730 case INTEL_BROADCAST_RGB_LIMITED:
4731 intel_dp->color_range_auto = false;
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004732 intel_dp->limited_color_range = true;
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02004733 break;
4734 default:
4735 return -EINVAL;
4736 }
Daniel Vetterae4edb82013-04-22 17:07:23 +02004737
4738 if (old_auto == intel_dp->color_range_auto &&
Ville Syrjälä0f2a2a72015-07-06 15:10:00 +03004739 old_range == intel_dp->limited_color_range)
Daniel Vetterae4edb82013-04-22 17:07:23 +02004740 return 0;
4741
Chris Wilsone953fd72011-02-21 22:23:52 +00004742 goto done;
4743 }
4744
Yuly Novikov53b41832012-10-26 12:04:00 +03004745 if (is_edp(intel_dp) &&
4746 property == connector->dev->mode_config.scaling_mode_property) {
4747 if (val == DRM_MODE_SCALE_NONE) {
4748 DRM_DEBUG_KMS("no scaling not supported\n");
4749 return -EINVAL;
4750 }
4751
4752 if (intel_connector->panel.fitting_mode == val) {
4753 /* the eDP scaling property is not changed */
4754 return 0;
4755 }
4756 intel_connector->panel.fitting_mode = val;
4757
4758 goto done;
4759 }
4760
Chris Wilsonf6849602010-09-19 09:29:33 +01004761 return -EINVAL;
4762
4763done:
Chris Wilsonc0c36b942012-12-19 16:08:43 +00004764 if (intel_encoder->base.crtc)
4765 intel_crtc_restore_mode(intel_encoder->base.crtc);
Chris Wilsonf6849602010-09-19 09:29:33 +01004766
4767 return 0;
4768}
4769
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004770static void
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004771intel_dp_connector_destroy(struct drm_connector *connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004772{
Jani Nikula1d508702012-10-19 14:51:49 +03004773 struct intel_connector *intel_connector = to_intel_connector(connector);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004774
Chris Wilson10e972d2014-09-04 21:43:45 +01004775 kfree(intel_connector->detect_edid);
Chris Wilsonbeb60602014-09-02 20:04:00 +01004776
Jani Nikula9cd300e2012-10-19 14:51:52 +03004777 if (!IS_ERR_OR_NULL(intel_connector->edid))
4778 kfree(intel_connector->edid);
4779
Paulo Zanoniacd8db102013-06-12 17:27:23 -03004780 /* Can't call is_edp() since the encoder may have been destroyed
4781 * already. */
4782 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
Jani Nikula1d508702012-10-19 14:51:49 +03004783 intel_panel_fini(&intel_connector->panel);
Matthew Garrettaaa6fd22011-08-12 12:11:33 +02004784
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004785 drm_connector_cleanup(connector);
Zhenyu Wang55f78c42010-03-29 16:13:57 +08004786 kfree(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004787}
4788
Paulo Zanoni00c09d72012-10-26 19:05:52 -02004789void intel_dp_encoder_destroy(struct drm_encoder *encoder)
Daniel Vetter24d05922010-08-20 18:08:28 +02004790{
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004791 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4792 struct intel_dp *intel_dp = &intel_dig_port->dp;
Daniel Vetter24d05922010-08-20 18:08:28 +02004793
Dave Airlie4f71d0c2014-06-04 16:02:28 +10004794 drm_dp_aux_unregister(&intel_dp->aux);
Dave Airlie0e32b392014-05-02 14:02:48 +10004795 intel_dp_mst_encoder_cleanup(intel_dig_port);
Keith Packardbd943152011-09-18 23:09:52 -07004796 if (is_edp(intel_dp)) {
4797 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03004798 /*
4799 * vdd might still be enabled do to the delayed vdd off.
4800 * Make sure vdd is actually turned off here.
4801 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03004802 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01004803 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004804 pps_unlock(intel_dp);
4805
Clint Taylor01527b32014-07-07 13:01:46 -07004806 if (intel_dp->edp_notifier.notifier_call) {
4807 unregister_reboot_notifier(&intel_dp->edp_notifier);
4808 intel_dp->edp_notifier.notifier_call = NULL;
4809 }
Keith Packardbd943152011-09-18 23:09:52 -07004810 }
Imre Deakc8bd0e42014-12-12 17:57:38 +02004811 drm_encoder_cleanup(encoder);
Paulo Zanonida63a9f2012-10-26 19:05:46 -02004812 kfree(intel_dig_port);
Daniel Vetter24d05922010-08-20 18:08:28 +02004813}
4814
Imre Deak07f9cd02014-08-18 14:42:45 +03004815static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4816{
4817 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4818
4819 if (!is_edp(intel_dp))
4820 return;
4821
Ville Syrjälä951468f2014-09-04 14:55:31 +03004822 /*
4823 * vdd might still be enabled do to the delayed vdd off.
4824 * Make sure vdd is actually turned off here.
4825 */
Ville Syrjäläafa4e532014-11-25 15:43:48 +02004826 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004827 pps_lock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004828 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03004829 pps_unlock(intel_dp);
Imre Deak07f9cd02014-08-18 14:42:45 +03004830}
4831
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004832static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4833{
4834 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4835 struct drm_device *dev = intel_dig_port->base.base.dev;
4836 struct drm_i915_private *dev_priv = dev->dev_private;
4837 enum intel_display_power_domain power_domain;
4838
4839 lockdep_assert_held(&dev_priv->pps_mutex);
4840
4841 if (!edp_have_panel_vdd(intel_dp))
4842 return;
4843
4844 /*
4845 * The VDD bit needs a power domain reference, so if the bit is
4846 * already enabled when we boot or resume, grab this reference and
4847 * schedule a vdd off, so we don't hold on to the reference
4848 * indefinitely.
4849 */
4850 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4851 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4852 intel_display_power_get(dev_priv, power_domain);
4853
4854 edp_panel_vdd_schedule_off(intel_dp);
4855}
4856
Imre Deak6d93c0c2014-07-31 14:03:36 +03004857static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4858{
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02004859 struct intel_dp *intel_dp;
4860
4861 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4862 return;
4863
4864 intel_dp = enc_to_intel_dp(encoder);
4865
4866 pps_lock(intel_dp);
4867
4868 /*
4869 * Read out the current power sequencer assignment,
4870 * in case the BIOS did something with it.
4871 */
4872 if (IS_VALLEYVIEW(encoder->dev))
4873 vlv_initial_power_sequencer_setup(intel_dp);
4874
4875 intel_edp_panel_vdd_sanitize(intel_dp);
4876
4877 pps_unlock(intel_dp);
Imre Deak6d93c0c2014-07-31 14:03:36 +03004878}
4879
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004880static const struct drm_connector_funcs intel_dp_connector_funcs = {
Maarten Lankhorst4d688a22015-08-05 12:37:06 +02004881 .dpms = drm_atomic_helper_connector_dpms,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004882 .detect = intel_dp_detect,
Chris Wilsonbeb60602014-09-02 20:04:00 +01004883 .force = intel_dp_force,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004884 .fill_modes = drm_helper_probe_single_connector_modes,
Chris Wilsonf6849602010-09-19 09:29:33 +01004885 .set_property = intel_dp_set_property,
Matt Roper2545e4a2015-01-22 16:51:27 -08004886 .atomic_get_property = intel_connector_atomic_get_property,
Paulo Zanoni73845ad2013-06-12 17:27:30 -03004887 .destroy = intel_dp_connector_destroy,
Matt Roperc6f95f22015-01-22 16:50:32 -08004888 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
Ander Conselvan de Oliveira98969722015-03-20 16:18:06 +02004889 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004890};
4891
4892static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4893 .get_modes = intel_dp_get_modes,
4894 .mode_valid = intel_dp_mode_valid,
Chris Wilsondf0e9242010-09-09 16:20:55 +01004895 .best_encoder = intel_best_encoder,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004896};
4897
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004898static const struct drm_encoder_funcs intel_dp_enc_funcs = {
Imre Deak6d93c0c2014-07-31 14:03:36 +03004899 .reset = intel_dp_encoder_reset,
Daniel Vetter24d05922010-08-20 18:08:28 +02004900 .destroy = intel_dp_encoder_destroy,
Keith Packarda4fc5ed2009-04-07 16:16:42 -07004901};
4902
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004903enum irqreturn
Dave Airlie13cf5502014-06-18 11:29:35 +10004904intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4905{
4906 struct intel_dp *intel_dp = &intel_dig_port->dp;
Imre Deak1c767b32014-08-18 14:42:42 +03004907 struct intel_encoder *intel_encoder = &intel_dig_port->base;
Dave Airlie0e32b392014-05-02 14:02:48 +10004908 struct drm_device *dev = intel_dig_port->base.base.dev;
4909 struct drm_i915_private *dev_priv = dev->dev_private;
Imre Deak1c767b32014-08-18 14:42:42 +03004910 enum intel_display_power_domain power_domain;
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004911 enum irqreturn ret = IRQ_NONE;
Imre Deak1c767b32014-08-18 14:42:42 +03004912
Dave Airlie0e32b392014-05-02 14:02:48 +10004913 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4914 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
Dave Airlie13cf5502014-06-18 11:29:35 +10004915
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03004916 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4917 /*
4918 * vdd off can generate a long pulse on eDP which
4919 * would require vdd on to handle it, and thus we
4920 * would end up in an endless cycle of
4921 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4922 */
4923 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4924 port_name(intel_dig_port->port));
Ville Syrjäläa8b3d522015-02-10 14:11:46 +02004925 return IRQ_HANDLED;
Ville Syrjälä7a7f84c2014-10-16 20:46:10 +03004926 }
4927
Ville Syrjälä26fbb772014-08-11 18:37:37 +03004928 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4929 port_name(intel_dig_port->port),
Dave Airlie0e32b392014-05-02 14:02:48 +10004930 long_hpd ? "long" : "short");
Dave Airlie13cf5502014-06-18 11:29:35 +10004931
Imre Deak1c767b32014-08-18 14:42:42 +03004932 power_domain = intel_display_port_power_domain(intel_encoder);
4933 intel_display_power_get(dev_priv, power_domain);
4934
Dave Airlie0e32b392014-05-02 14:02:48 +10004935 if (long_hpd) {
Mika Kahola5fa836a2015-04-29 09:17:40 +03004936 /* indicate that we need to restart link training */
4937 intel_dp->train_set_valid = false;
Dave Airlie2a592be2014-09-01 16:58:12 +10004938
Jani Nikula7e66bcf2015-08-20 10:47:39 +03004939 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4940 goto mst_fail;
Dave Airlie0e32b392014-05-02 14:02:48 +10004941
4942 if (!intel_dp_get_dpcd(intel_dp)) {
4943 goto mst_fail;
4944 }
4945
4946 intel_dp_probe_oui(intel_dp);
4947
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03004948 if (!intel_dp_probe_mst(intel_dp)) {
4949 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4950 intel_dp_check_link_status(intel_dp);
4951 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10004952 goto mst_fail;
Ville Syrjäläd14e7b62015-08-20 19:37:29 +03004953 }
Dave Airlie0e32b392014-05-02 14:02:48 +10004954 } else {
4955 if (intel_dp->is_mst) {
Imre Deak1c767b32014-08-18 14:42:42 +03004956 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
Dave Airlie0e32b392014-05-02 14:02:48 +10004957 goto mst_fail;
4958 }
4959
4960 if (!intel_dp->is_mst) {
Dave Airlie5b215bc2014-08-05 10:40:20 +10004961 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
Dave Airlie0e32b392014-05-02 14:02:48 +10004962 intel_dp_check_link_status(intel_dp);
Dave Airlie5b215bc2014-08-05 10:40:20 +10004963 drm_modeset_unlock(&dev->mode_config.connection_mutex);
Dave Airlie0e32b392014-05-02 14:02:48 +10004964 }
4965 }
Daniel Vetterb2c5c182015-01-23 06:00:31 +01004966
4967 ret = IRQ_HANDLED;
4968
Imre Deak1c767b32014-08-18 14:42:42 +03004969 goto put_power;
Dave Airlie0e32b392014-05-02 14:02:48 +10004970mst_fail:
4971 /* if we were in MST mode, and device is not there get out of MST mode */
4972 if (intel_dp->is_mst) {
4973 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4974 intel_dp->is_mst = false;
4975 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4976 }
Imre Deak1c767b32014-08-18 14:42:42 +03004977put_power:
4978 intel_display_power_put(dev_priv, power_domain);
4979
4980 return ret;
Dave Airlie13cf5502014-06-18 11:29:35 +10004981}
4982
Zhenyu Wange3421a12010-04-08 09:43:27 +08004983/* Return which DP Port should be selected for Transcoder DP control */
4984int
Akshay Joshi0206e352011-08-16 15:34:10 -04004985intel_trans_dp_port_sel(struct drm_crtc *crtc)
Zhenyu Wange3421a12010-04-08 09:43:27 +08004986{
4987 struct drm_device *dev = crtc->dev;
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004988 struct intel_encoder *intel_encoder;
4989 struct intel_dp *intel_dp;
Zhenyu Wange3421a12010-04-08 09:43:27 +08004990
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004991 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4992 intel_dp = enc_to_intel_dp(&intel_encoder->base);
Chris Wilsonea5b2132010-08-04 13:50:23 +01004993
Paulo Zanonifa90ece2012-10-26 19:05:44 -02004994 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4995 intel_encoder->type == INTEL_OUTPUT_EDP)
Chris Wilsonea5b2132010-08-04 13:50:23 +01004996 return intel_dp->output_reg;
Zhenyu Wange3421a12010-04-08 09:43:27 +08004997 }
Chris Wilsonea5b2132010-08-04 13:50:23 +01004998
Zhenyu Wange3421a12010-04-08 09:43:27 +08004999 return -1;
5000}
5001
Rodrigo Vivi477ec322015-08-06 15:51:39 +08005002/* check the VBT to see whether the eDP is on another port */
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005003bool intel_dp_is_edp(struct drm_device *dev, enum port port)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005004{
5005 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni768f69c2013-09-11 18:02:47 -03005006 union child_device_config *p_child;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005007 int i;
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005008 static const short port_mapping[] = {
Rodrigo Vivi477ec322015-08-06 15:51:39 +08005009 [PORT_B] = DVO_PORT_DPB,
5010 [PORT_C] = DVO_PORT_DPC,
5011 [PORT_D] = DVO_PORT_DPD,
5012 [PORT_E] = DVO_PORT_DPE,
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005013 };
Zhao Yakui36e83a12010-06-12 14:32:21 +08005014
Ville Syrjälä53ce81a2015-09-11 21:04:38 +03005015 /*
5016 * eDP not supported on g4x. so bail out early just
5017 * for a bit extra safety in case the VBT is bonkers.
5018 */
5019 if (INTEL_INFO(dev)->gen < 5)
5020 return false;
5021
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005022 if (port == PORT_A)
5023 return true;
5024
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005025 if (!dev_priv->vbt.child_dev_num)
Zhao Yakui36e83a12010-06-12 14:32:21 +08005026 return false;
5027
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005028 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5029 p_child = dev_priv->vbt.child_dev + i;
Zhao Yakui36e83a12010-06-12 14:32:21 +08005030
Ville Syrjälä5d8a7752013-11-01 18:22:39 +02005031 if (p_child->common.dvo_port == port_mapping[port] &&
Ville Syrjäläf02586d2013-11-01 20:32:08 +02005032 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5033 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
Zhao Yakui36e83a12010-06-12 14:32:21 +08005034 return true;
5035 }
5036 return false;
5037}
5038
Dave Airlie0e32b392014-05-02 14:02:48 +10005039void
Chris Wilsonf6849602010-09-19 09:29:33 +01005040intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5041{
Yuly Novikov53b41832012-10-26 12:04:00 +03005042 struct intel_connector *intel_connector = to_intel_connector(connector);
5043
Chris Wilson3f43c482011-05-12 22:17:24 +01005044 intel_attach_force_audio_property(connector);
Chris Wilsone953fd72011-02-21 22:23:52 +00005045 intel_attach_broadcast_rgb_property(connector);
Ville Syrjälä55bc60d2013-01-17 16:31:29 +02005046 intel_dp->color_range_auto = true;
Yuly Novikov53b41832012-10-26 12:04:00 +03005047
5048 if (is_edp(intel_dp)) {
5049 drm_mode_create_scaling_mode_property(connector->dev);
Rob Clark6de6d842012-10-11 20:36:04 -05005050 drm_object_attach_property(
5051 &connector->base,
Yuly Novikov53b41832012-10-26 12:04:00 +03005052 connector->dev->mode_config.scaling_mode_property,
Yuly Novikov8e740cd2012-10-26 12:04:01 +03005053 DRM_MODE_SCALE_ASPECT);
5054 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
Yuly Novikov53b41832012-10-26 12:04:00 +03005055 }
Chris Wilsonf6849602010-09-19 09:29:33 +01005056}
5057
Imre Deakdada1a92014-01-29 13:25:41 +02005058static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5059{
5060 intel_dp->last_power_cycle = jiffies;
5061 intel_dp->last_power_on = jiffies;
5062 intel_dp->last_backlight_off = jiffies;
5063}
5064
Daniel Vetter67a54562012-10-20 20:57:45 +02005065static void
5066intel_dp_init_panel_power_sequencer(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005067 struct intel_dp *intel_dp)
Daniel Vetter67a54562012-10-20 20:57:45 +02005068{
5069 struct drm_i915_private *dev_priv = dev->dev_private;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005070 struct edp_power_seq cur, vbt, spec,
5071 *final = &intel_dp->pps_delays;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305072 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5073 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
Jesse Barnes453c5422013-03-28 09:55:41 -07005074
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005075 lockdep_assert_held(&dev_priv->pps_mutex);
5076
Ville Syrjälä81ddbc62014-10-16 21:27:31 +03005077 /* already initialized? */
5078 if (final->t11_t12 != 0)
5079 return;
5080
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305081 if (IS_BROXTON(dev)) {
5082 /*
5083 * TODO: BXT has 2 sets of PPS registers.
5084 * Correct Register for Broxton need to be identified
5085 * using VBT. hardcoding for now
5086 */
5087 pp_ctrl_reg = BXT_PP_CONTROL(0);
5088 pp_on_reg = BXT_PP_ON_DELAYS(0);
5089 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5090 } else if (HAS_PCH_SPLIT(dev)) {
Jani Nikulabf13e812013-09-06 07:40:05 +03005091 pp_ctrl_reg = PCH_PP_CONTROL;
Jesse Barnes453c5422013-03-28 09:55:41 -07005092 pp_on_reg = PCH_PP_ON_DELAYS;
5093 pp_off_reg = PCH_PP_OFF_DELAYS;
5094 pp_div_reg = PCH_PP_DIVISOR;
5095 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005096 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5097
5098 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5099 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5100 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5101 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005102 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005103
5104 /* Workaround: Need to write PP_CONTROL with the unlock key as
5105 * the very first thing. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305106 pp_ctl = ironlake_get_pp_control(intel_dp);
Daniel Vetter67a54562012-10-20 20:57:45 +02005107
Jesse Barnes453c5422013-03-28 09:55:41 -07005108 pp_on = I915_READ(pp_on_reg);
5109 pp_off = I915_READ(pp_off_reg);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305110 if (!IS_BROXTON(dev)) {
5111 I915_WRITE(pp_ctrl_reg, pp_ctl);
5112 pp_div = I915_READ(pp_div_reg);
5113 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005114
5115 /* Pull timing values out of registers */
5116 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5117 PANEL_POWER_UP_DELAY_SHIFT;
5118
5119 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5120 PANEL_LIGHT_ON_DELAY_SHIFT;
5121
5122 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5123 PANEL_LIGHT_OFF_DELAY_SHIFT;
5124
5125 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5126 PANEL_POWER_DOWN_DELAY_SHIFT;
5127
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305128 if (IS_BROXTON(dev)) {
5129 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5130 BXT_POWER_CYCLE_DELAY_SHIFT;
5131 if (tmp > 0)
5132 cur.t11_t12 = (tmp - 1) * 1000;
5133 else
5134 cur.t11_t12 = 0;
5135 } else {
5136 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
Daniel Vetter67a54562012-10-20 20:57:45 +02005137 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305138 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005139
5140 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5141 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5142
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03005143 vbt = dev_priv->vbt.edp_pps;
Daniel Vetter67a54562012-10-20 20:57:45 +02005144
5145 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5146 * our hw here, which are all in 100usec. */
5147 spec.t1_t3 = 210 * 10;
5148 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5149 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5150 spec.t10 = 500 * 10;
5151 /* This one is special and actually in units of 100ms, but zero
5152 * based in the hw (so we need to add 100 ms). But the sw vbt
5153 * table multiplies it with 1000 to make it in units of 100usec,
5154 * too. */
5155 spec.t11_t12 = (510 + 100) * 10;
5156
5157 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5158 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5159
5160 /* Use the max of the register settings and vbt. If both are
5161 * unset, fall back to the spec limits. */
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005162#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
Daniel Vetter67a54562012-10-20 20:57:45 +02005163 spec.field : \
5164 max(cur.field, vbt.field))
5165 assign_final(t1_t3);
5166 assign_final(t8);
5167 assign_final(t9);
5168 assign_final(t10);
5169 assign_final(t11_t12);
5170#undef assign_final
5171
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005172#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
Daniel Vetter67a54562012-10-20 20:57:45 +02005173 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5174 intel_dp->backlight_on_delay = get_delay(t8);
5175 intel_dp->backlight_off_delay = get_delay(t9);
5176 intel_dp->panel_power_down_delay = get_delay(t10);
5177 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5178#undef get_delay
5179
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005180 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5181 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5182 intel_dp->panel_power_cycle_delay);
5183
5184 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5185 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005186}
5187
5188static void
5189intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005190 struct intel_dp *intel_dp)
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005191{
5192 struct drm_i915_private *dev_priv = dev->dev_private;
Jesse Barnes453c5422013-03-28 09:55:41 -07005193 u32 pp_on, pp_off, pp_div, port_sel = 0;
5194 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305195 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
Ville Syrjäläad933b52014-08-18 22:15:56 +03005196 enum port port = dp_to_dig_port(intel_dp)->port;
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005197 const struct edp_power_seq *seq = &intel_dp->pps_delays;
Jesse Barnes453c5422013-03-28 09:55:41 -07005198
Ville Syrjäläe39b9992014-09-04 14:53:14 +03005199 lockdep_assert_held(&dev_priv->pps_mutex);
Jesse Barnes453c5422013-03-28 09:55:41 -07005200
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305201 if (IS_BROXTON(dev)) {
5202 /*
5203 * TODO: BXT has 2 sets of PPS registers.
5204 * Correct Register for Broxton need to be identified
5205 * using VBT. hardcoding for now
5206 */
5207 pp_ctrl_reg = BXT_PP_CONTROL(0);
5208 pp_on_reg = BXT_PP_ON_DELAYS(0);
5209 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5210
5211 } else if (HAS_PCH_SPLIT(dev)) {
Jesse Barnes453c5422013-03-28 09:55:41 -07005212 pp_on_reg = PCH_PP_ON_DELAYS;
5213 pp_off_reg = PCH_PP_OFF_DELAYS;
5214 pp_div_reg = PCH_PP_DIVISOR;
5215 } else {
Jani Nikulabf13e812013-09-06 07:40:05 +03005216 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5217
5218 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5219 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5220 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
Jesse Barnes453c5422013-03-28 09:55:41 -07005221 }
5222
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005223 /*
5224 * And finally store the new values in the power sequencer. The
5225 * backlight delays are set to 1 because we do manual waits on them. For
5226 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5227 * we'll end up waiting for the backlight off delay twice: once when we
5228 * do the manual sleep, and once when we disable the panel and wait for
5229 * the PP_STATUS bit to become zero.
5230 */
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005231 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
Paulo Zanonib2f19d12013-12-19 14:29:44 -02005232 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5233 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
Jani Nikulaf30d26e2013-01-16 10:53:40 +02005234 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
Daniel Vetter67a54562012-10-20 20:57:45 +02005235 /* Compute the divisor for the pp clock, simply match the Bspec
5236 * formula. */
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305237 if (IS_BROXTON(dev)) {
5238 pp_div = I915_READ(pp_ctrl_reg);
5239 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5240 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5241 << BXT_POWER_CYCLE_DELAY_SHIFT);
5242 } else {
5243 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5244 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5245 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5246 }
Daniel Vetter67a54562012-10-20 20:57:45 +02005247
5248 /* Haswell doesn't have any port selection bits for the panel
5249 * power sequencer any more. */
Imre Deakbc7d38a2013-05-16 14:40:36 +03005250 if (IS_VALLEYVIEW(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005251 port_sel = PANEL_PORT_SELECT_VLV(port);
Imre Deakbc7d38a2013-05-16 14:40:36 +03005252 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
Ville Syrjäläad933b52014-08-18 22:15:56 +03005253 if (port == PORT_A)
Jani Nikulaa24c1442013-09-05 16:44:46 +03005254 port_sel = PANEL_PORT_SELECT_DPA;
Daniel Vetter67a54562012-10-20 20:57:45 +02005255 else
Jani Nikulaa24c1442013-09-05 16:44:46 +03005256 port_sel = PANEL_PORT_SELECT_DPD;
Daniel Vetter67a54562012-10-20 20:57:45 +02005257 }
5258
Jesse Barnes453c5422013-03-28 09:55:41 -07005259 pp_on |= port_sel;
5260
5261 I915_WRITE(pp_on_reg, pp_on);
5262 I915_WRITE(pp_off_reg, pp_off);
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305263 if (IS_BROXTON(dev))
5264 I915_WRITE(pp_ctrl_reg, pp_div);
5265 else
5266 I915_WRITE(pp_div_reg, pp_div);
Daniel Vetter67a54562012-10-20 20:57:45 +02005267
Daniel Vetter67a54562012-10-20 20:57:45 +02005268 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
Jesse Barnes453c5422013-03-28 09:55:41 -07005269 I915_READ(pp_on_reg),
5270 I915_READ(pp_off_reg),
Vandana Kannanb0a08be2015-06-18 11:00:55 +05305271 IS_BROXTON(dev) ?
5272 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
Jesse Barnes453c5422013-03-28 09:55:41 -07005273 I915_READ(pp_div_reg));
Keith Packardc8110e52009-05-06 11:51:10 -07005274}
5275
Vandana Kannanb33a2812015-02-13 15:33:03 +05305276/**
5277 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5278 * @dev: DRM device
5279 * @refresh_rate: RR to be programmed
5280 *
5281 * This function gets called when refresh rate (RR) has to be changed from
5282 * one frequency to another. Switches can be between high and low RR
5283 * supported by the panel or to any other RR based on media playback (in
5284 * this case, RR value needs to be passed from user space).
5285 *
5286 * The caller of this function needs to take a lock on dev_priv->drrs.
5287 */
Vandana Kannan96178ee2015-01-10 02:25:56 +05305288static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305289{
5290 struct drm_i915_private *dev_priv = dev->dev_private;
5291 struct intel_encoder *encoder;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305292 struct intel_digital_port *dig_port = NULL;
5293 struct intel_dp *intel_dp = dev_priv->drrs.dp;
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02005294 struct intel_crtc_state *config = NULL;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305295 struct intel_crtc *intel_crtc = NULL;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305296 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305297
5298 if (refresh_rate <= 0) {
5299 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5300 return;
5301 }
5302
Vandana Kannan96178ee2015-01-10 02:25:56 +05305303 if (intel_dp == NULL) {
5304 DRM_DEBUG_KMS("DRRS not supported.\n");
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305305 return;
5306 }
5307
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005308 /*
Rodrigo Vivie4d59f62014-11-20 02:22:08 -08005309 * FIXME: This needs proper synchronization with psr state for some
5310 * platforms that cannot have PSR and DRRS enabled at the same time.
Daniel Vetter1fcc9d12014-07-11 10:30:10 -07005311 */
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305312
Vandana Kannan96178ee2015-01-10 02:25:56 +05305313 dig_port = dp_to_dig_port(intel_dp);
5314 encoder = &dig_port->base;
Ander Conselvan de Oliveira723f9aa2015-03-20 16:18:18 +02005315 intel_crtc = to_intel_crtc(encoder->base.crtc);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305316
5317 if (!intel_crtc) {
5318 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5319 return;
5320 }
5321
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005322 config = intel_crtc->config;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305323
Vandana Kannan96178ee2015-01-10 02:25:56 +05305324 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305325 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5326 return;
5327 }
5328
Vandana Kannan96178ee2015-01-10 02:25:56 +05305329 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5330 refresh_rate)
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305331 index = DRRS_LOW_RR;
5332
Vandana Kannan96178ee2015-01-10 02:25:56 +05305333 if (index == dev_priv->drrs.refresh_rate_type) {
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305334 DRM_DEBUG_KMS(
5335 "DRRS requested for previously set RR...ignoring\n");
5336 return;
5337 }
5338
5339 if (!intel_crtc->active) {
5340 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5341 return;
5342 }
5343
Durgadoss R44395bf2015-02-13 15:33:02 +05305344 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
Vandana Kannana4c30b12015-02-13 15:33:00 +05305345 switch (index) {
5346 case DRRS_HIGH_RR:
5347 intel_dp_set_m_n(intel_crtc, M1_N1);
5348 break;
5349 case DRRS_LOW_RR:
5350 intel_dp_set_m_n(intel_crtc, M2_N2);
5351 break;
5352 case DRRS_MAX_RR:
5353 default:
5354 DRM_ERROR("Unsupported refreshrate type\n");
5355 }
5356 } else if (INTEL_INFO(dev)->gen > 6) {
Ville Syrjälä649636e2015-09-22 19:50:01 +03005357 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5358 u32 val;
Vandana Kannana4c30b12015-02-13 15:33:00 +05305359
Ville Syrjälä649636e2015-09-22 19:50:01 +03005360 val = I915_READ(reg);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305361 if (index > DRRS_HIGH_RR) {
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305362 if (IS_VALLEYVIEW(dev))
5363 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5364 else
5365 val |= PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305366 } else {
Vandana Kannan6fa7aec2015-02-13 15:33:01 +05305367 if (IS_VALLEYVIEW(dev))
5368 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5369 else
5370 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305371 }
5372 I915_WRITE(reg, val);
5373 }
5374
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305375 dev_priv->drrs.refresh_rate_type = index;
5376
5377 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5378}
5379
Vandana Kannanb33a2812015-02-13 15:33:03 +05305380/**
5381 * intel_edp_drrs_enable - init drrs struct if supported
5382 * @intel_dp: DP struct
5383 *
5384 * Initializes frontbuffer_bits and drrs.dp
5385 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305386void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5387{
5388 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5389 struct drm_i915_private *dev_priv = dev->dev_private;
5390 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5391 struct drm_crtc *crtc = dig_port->base.base.crtc;
5392 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5393
5394 if (!intel_crtc->config->has_drrs) {
5395 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5396 return;
5397 }
5398
5399 mutex_lock(&dev_priv->drrs.mutex);
5400 if (WARN_ON(dev_priv->drrs.dp)) {
5401 DRM_ERROR("DRRS already enabled\n");
5402 goto unlock;
5403 }
5404
5405 dev_priv->drrs.busy_frontbuffer_bits = 0;
5406
5407 dev_priv->drrs.dp = intel_dp;
5408
5409unlock:
5410 mutex_unlock(&dev_priv->drrs.mutex);
5411}
5412
Vandana Kannanb33a2812015-02-13 15:33:03 +05305413/**
5414 * intel_edp_drrs_disable - Disable DRRS
5415 * @intel_dp: DP struct
5416 *
5417 */
Vandana Kannanc3955782015-01-22 15:17:40 +05305418void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5419{
5420 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5421 struct drm_i915_private *dev_priv = dev->dev_private;
5422 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5423 struct drm_crtc *crtc = dig_port->base.base.crtc;
5424 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5425
5426 if (!intel_crtc->config->has_drrs)
5427 return;
5428
5429 mutex_lock(&dev_priv->drrs.mutex);
5430 if (!dev_priv->drrs.dp) {
5431 mutex_unlock(&dev_priv->drrs.mutex);
5432 return;
5433 }
5434
5435 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5436 intel_dp_set_drrs_state(dev_priv->dev,
5437 intel_dp->attached_connector->panel.
5438 fixed_mode->vrefresh);
5439
5440 dev_priv->drrs.dp = NULL;
5441 mutex_unlock(&dev_priv->drrs.mutex);
5442
5443 cancel_delayed_work_sync(&dev_priv->drrs.work);
5444}
5445
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305446static void intel_edp_drrs_downclock_work(struct work_struct *work)
5447{
5448 struct drm_i915_private *dev_priv =
5449 container_of(work, typeof(*dev_priv), drrs.work.work);
5450 struct intel_dp *intel_dp;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305451
Vandana Kannan96178ee2015-01-10 02:25:56 +05305452 mutex_lock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305453
Vandana Kannan4e9ac942015-01-22 15:14:45 +05305454 intel_dp = dev_priv->drrs.dp;
5455
5456 if (!intel_dp)
5457 goto unlock;
5458
5459 /*
5460 * The delayed work can race with an invalidate hence we need to
5461 * recheck.
5462 */
5463
5464 if (dev_priv->drrs.busy_frontbuffer_bits)
5465 goto unlock;
5466
5467 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5468 intel_dp_set_drrs_state(dev_priv->dev,
5469 intel_dp->attached_connector->panel.
5470 downclock_mode->vrefresh);
5471
5472unlock:
Vandana Kannan96178ee2015-01-10 02:25:56 +05305473 mutex_unlock(&dev_priv->drrs.mutex);
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305474}
5475
Vandana Kannanb33a2812015-02-13 15:33:03 +05305476/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305477 * intel_edp_drrs_invalidate - Disable Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305478 * @dev: DRM device
5479 * @frontbuffer_bits: frontbuffer plane tracking bits
5480 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305481 * This function gets called everytime rendering on the given planes start.
5482 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
Vandana Kannanb33a2812015-02-13 15:33:03 +05305483 *
5484 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5485 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305486void intel_edp_drrs_invalidate(struct drm_device *dev,
5487 unsigned frontbuffer_bits)
5488{
5489 struct drm_i915_private *dev_priv = dev->dev_private;
5490 struct drm_crtc *crtc;
5491 enum pipe pipe;
5492
Daniel Vetter9da7d692015-04-09 16:44:15 +02005493 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305494 return;
5495
Daniel Vetter88f933a2015-04-09 16:44:16 +02005496 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305497
Vandana Kannana93fad02015-01-10 02:25:59 +05305498 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005499 if (!dev_priv->drrs.dp) {
5500 mutex_unlock(&dev_priv->drrs.mutex);
5501 return;
5502 }
5503
Vandana Kannana93fad02015-01-10 02:25:59 +05305504 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5505 pipe = to_intel_crtc(crtc)->pipe;
5506
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005507 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5508 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5509
Ramalingam C0ddfd202015-06-15 20:50:05 +05305510 /* invalidate means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005511 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Vandana Kannana93fad02015-01-10 02:25:59 +05305512 intel_dp_set_drrs_state(dev_priv->dev,
5513 dev_priv->drrs.dp->attached_connector->panel.
5514 fixed_mode->vrefresh);
Vandana Kannana93fad02015-01-10 02:25:59 +05305515
Vandana Kannana93fad02015-01-10 02:25:59 +05305516 mutex_unlock(&dev_priv->drrs.mutex);
5517}
5518
Vandana Kannanb33a2812015-02-13 15:33:03 +05305519/**
Ramalingam C0ddfd202015-06-15 20:50:05 +05305520 * intel_edp_drrs_flush - Restart Idleness DRRS
Vandana Kannanb33a2812015-02-13 15:33:03 +05305521 * @dev: DRM device
5522 * @frontbuffer_bits: frontbuffer plane tracking bits
5523 *
Ramalingam C0ddfd202015-06-15 20:50:05 +05305524 * This function gets called every time rendering on the given planes has
5525 * completed or flip on a crtc is completed. So DRRS should be upclocked
5526 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5527 * if no other planes are dirty.
Vandana Kannanb33a2812015-02-13 15:33:03 +05305528 *
5529 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5530 */
Vandana Kannana93fad02015-01-10 02:25:59 +05305531void intel_edp_drrs_flush(struct drm_device *dev,
5532 unsigned frontbuffer_bits)
5533{
5534 struct drm_i915_private *dev_priv = dev->dev_private;
5535 struct drm_crtc *crtc;
5536 enum pipe pipe;
5537
Daniel Vetter9da7d692015-04-09 16:44:15 +02005538 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
Vandana Kannana93fad02015-01-10 02:25:59 +05305539 return;
5540
Daniel Vetter88f933a2015-04-09 16:44:16 +02005541 cancel_delayed_work(&dev_priv->drrs.work);
Ramalingam C3954e732015-03-03 12:11:46 +05305542
Vandana Kannana93fad02015-01-10 02:25:59 +05305543 mutex_lock(&dev_priv->drrs.mutex);
Daniel Vetter9da7d692015-04-09 16:44:15 +02005544 if (!dev_priv->drrs.dp) {
5545 mutex_unlock(&dev_priv->drrs.mutex);
5546 return;
5547 }
5548
Vandana Kannana93fad02015-01-10 02:25:59 +05305549 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5550 pipe = to_intel_crtc(crtc)->pipe;
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005551
5552 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Vandana Kannana93fad02015-01-10 02:25:59 +05305553 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5554
Ramalingam C0ddfd202015-06-15 20:50:05 +05305555 /* flush means busy screen hence upclock */
Daniel Vetterc1d038c2015-06-18 10:30:25 +02005556 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
Ramalingam C0ddfd202015-06-15 20:50:05 +05305557 intel_dp_set_drrs_state(dev_priv->dev,
5558 dev_priv->drrs.dp->attached_connector->panel.
5559 fixed_mode->vrefresh);
5560
5561 /*
5562 * flush also means no more activity hence schedule downclock, if all
5563 * other fbs are quiescent too
5564 */
5565 if (!dev_priv->drrs.busy_frontbuffer_bits)
Vandana Kannana93fad02015-01-10 02:25:59 +05305566 schedule_delayed_work(&dev_priv->drrs.work,
5567 msecs_to_jiffies(1000));
5568 mutex_unlock(&dev_priv->drrs.mutex);
5569}
5570
Vandana Kannanb33a2812015-02-13 15:33:03 +05305571/**
5572 * DOC: Display Refresh Rate Switching (DRRS)
5573 *
5574 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5575 * which enables swtching between low and high refresh rates,
5576 * dynamically, based on the usage scenario. This feature is applicable
5577 * for internal panels.
5578 *
5579 * Indication that the panel supports DRRS is given by the panel EDID, which
5580 * would list multiple refresh rates for one resolution.
5581 *
5582 * DRRS is of 2 types - static and seamless.
5583 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5584 * (may appear as a blink on screen) and is used in dock-undock scenario.
5585 * Seamless DRRS involves changing RR without any visual effect to the user
5586 * and can be used during normal system usage. This is done by programming
5587 * certain registers.
5588 *
5589 * Support for static/seamless DRRS may be indicated in the VBT based on
5590 * inputs from the panel spec.
5591 *
5592 * DRRS saves power by switching to low RR based on usage scenarios.
5593 *
5594 * eDP DRRS:-
5595 * The implementation is based on frontbuffer tracking implementation.
5596 * When there is a disturbance on the screen triggered by user activity or a
5597 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5598 * When there is no movement on screen, after a timeout of 1 second, a switch
5599 * to low RR is made.
5600 * For integration with frontbuffer tracking code,
5601 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5602 *
5603 * DRRS can be further extended to support other internal panels and also
5604 * the scenario of video playback wherein RR is set based on the rate
5605 * requested by userspace.
5606 */
5607
5608/**
5609 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5610 * @intel_connector: eDP connector
5611 * @fixed_mode: preferred mode of panel
5612 *
5613 * This function is called only once at driver load to initialize basic
5614 * DRRS stuff.
5615 *
5616 * Returns:
5617 * Downclock mode if panel supports it, else return NULL.
5618 * DRRS support is determined by the presence of downclock mode (apart
5619 * from VBT setting).
5620 */
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305621static struct drm_display_mode *
Vandana Kannan96178ee2015-01-10 02:25:56 +05305622intel_dp_drrs_init(struct intel_connector *intel_connector,
5623 struct drm_display_mode *fixed_mode)
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305624{
5625 struct drm_connector *connector = &intel_connector->base;
Vandana Kannan96178ee2015-01-10 02:25:56 +05305626 struct drm_device *dev = connector->dev;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305627 struct drm_i915_private *dev_priv = dev->dev_private;
5628 struct drm_display_mode *downclock_mode = NULL;
5629
Daniel Vetter9da7d692015-04-09 16:44:15 +02005630 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5631 mutex_init(&dev_priv->drrs.mutex);
5632
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305633 if (INTEL_INFO(dev)->gen <= 6) {
5634 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5635 return NULL;
5636 }
5637
5638 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005639 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305640 return NULL;
5641 }
5642
5643 downclock_mode = intel_find_panel_downclock
5644 (dev, fixed_mode, connector);
5645
5646 if (!downclock_mode) {
Ramalingam Ca1d26342015-02-23 17:38:33 +05305647 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305648 return NULL;
5649 }
5650
Vandana Kannan96178ee2015-01-10 02:25:56 +05305651 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
Pradeep Bhat439d7ac2014-04-05 12:13:28 +05305652
Vandana Kannan96178ee2015-01-10 02:25:56 +05305653 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
Damien Lespiau4079b8d2014-08-05 10:39:42 +01005654 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305655 return downclock_mode;
5656}
5657
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005658static bool intel_edp_init_connector(struct intel_dp *intel_dp,
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005659 struct intel_connector *intel_connector)
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005660{
5661 struct drm_connector *connector = &intel_connector->base;
5662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005663 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5664 struct drm_device *dev = intel_encoder->base.dev;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005665 struct drm_i915_private *dev_priv = dev->dev_private;
5666 struct drm_display_mode *fixed_mode = NULL;
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305667 struct drm_display_mode *downclock_mode = NULL;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005668 bool has_dpcd;
5669 struct drm_display_mode *scan;
5670 struct edid *edid;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005671 enum pipe pipe = INVALID_PIPE;
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005672
5673 if (!is_edp(intel_dp))
5674 return true;
5675
Ville Syrjälä49e6bc52014-10-28 16:15:52 +02005676 pps_lock(intel_dp);
5677 intel_edp_panel_vdd_sanitize(intel_dp);
5678 pps_unlock(intel_dp);
Paulo Zanoni63635212014-04-22 19:55:42 -03005679
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005680 /* Cache DPCD and EDID for edp. */
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005681 has_dpcd = intel_dp_get_dpcd(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005682
5683 if (has_dpcd) {
5684 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5685 dev_priv->no_aux_handshake =
5686 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5687 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5688 } else {
5689 /* if this fails, presume the device is a ghost */
5690 DRM_INFO("failed to retrieve link info, disabling eDP\n");
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005691 return false;
5692 }
5693
5694 /* We now know it's not a ghost, init power sequence regs. */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005695 pps_lock(intel_dp);
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005696 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005697 pps_unlock(intel_dp);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005698
Daniel Vetter060c8772014-03-21 23:22:35 +01005699 mutex_lock(&dev->mode_config.mutex);
Jani Nikula0b998362014-03-14 16:51:17 +02005700 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005701 if (edid) {
5702 if (drm_add_edid_modes(connector, edid)) {
5703 drm_mode_connector_update_edid_property(connector,
5704 edid);
5705 drm_edid_to_eld(connector, edid);
5706 } else {
5707 kfree(edid);
5708 edid = ERR_PTR(-EINVAL);
5709 }
5710 } else {
5711 edid = ERR_PTR(-ENOENT);
5712 }
5713 intel_connector->edid = edid;
5714
5715 /* prefer fixed mode from EDID if available */
5716 list_for_each_entry(scan, &connector->probed_modes, head) {
5717 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5718 fixed_mode = drm_mode_duplicate(dev, scan);
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305719 downclock_mode = intel_dp_drrs_init(
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305720 intel_connector, fixed_mode);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005721 break;
5722 }
5723 }
5724
5725 /* fallback to VBT if available for eDP */
5726 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5727 fixed_mode = drm_mode_duplicate(dev,
5728 dev_priv->vbt.lfp_lvds_vbt_mode);
5729 if (fixed_mode)
5730 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5731 }
Daniel Vetter060c8772014-03-21 23:22:35 +01005732 mutex_unlock(&dev->mode_config.mutex);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005733
Clint Taylor01527b32014-07-07 13:01:46 -07005734 if (IS_VALLEYVIEW(dev)) {
5735 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5736 register_reboot_notifier(&intel_dp->edp_notifier);
Ville Syrjälä6517d272014-11-07 11:16:02 +02005737
5738 /*
5739 * Figure out the current pipe for the initial backlight setup.
5740 * If the current pipe isn't valid, try the PPS pipe, and if that
5741 * fails just assume pipe A.
5742 */
5743 if (IS_CHERRYVIEW(dev))
5744 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5745 else
5746 pipe = PORT_TO_PIPE(intel_dp->DP);
5747
5748 if (pipe != PIPE_A && pipe != PIPE_B)
5749 pipe = intel_dp->pps_pipe;
5750
5751 if (pipe != PIPE_A && pipe != PIPE_B)
5752 pipe = PIPE_A;
5753
5754 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5755 pipe_name(pipe));
Clint Taylor01527b32014-07-07 13:01:46 -07005756 }
5757
Pradeep Bhat4f9db5b2014-04-05 12:12:31 +05305758 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
Jani Nikula5507fae2015-09-14 14:03:48 +03005759 intel_connector->panel.backlight.power = intel_edp_backlight_power;
Ville Syrjälä6517d272014-11-07 11:16:02 +02005760 intel_panel_setup_backlight(connector, pipe);
Paulo Zanonied92f0b2013-06-12 17:27:24 -03005761
5762 return true;
5763}
5764
Paulo Zanoni16c25532013-06-12 17:27:25 -03005765bool
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005766intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5767 struct intel_connector *intel_connector)
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005768{
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005769 struct drm_connector *connector = &intel_connector->base;
5770 struct intel_dp *intel_dp = &intel_dig_port->dp;
5771 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5772 struct drm_device *dev = intel_encoder->base.dev;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005773 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanoni174edf12012-10-26 19:05:50 -02005774 enum port port = intel_dig_port->port;
Jani Nikula0b998362014-03-14 16:51:17 +02005775 int type;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005776
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005777 intel_dp->pps_pipe = INVALID_PIPE;
5778
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005779 /* intel_dp vfuncs */
Damien Lespiaub6b5e382014-01-20 16:00:59 +00005780 if (INTEL_INFO(dev)->gen >= 9)
5781 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5782 else if (IS_VALLEYVIEW(dev))
Damien Lespiauec5b01d2014-01-21 13:35:39 +00005783 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5784 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5785 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5786 else if (HAS_PCH_SPLIT(dev))
5787 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5788 else
5789 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5790
Damien Lespiaub9ca5fa2014-01-20 16:01:00 +00005791 if (INTEL_INFO(dev)->gen >= 9)
5792 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5793 else
5794 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
Damien Lespiau153b1102014-01-21 13:37:15 +00005795
Ander Conselvan de Oliveiraad642172015-10-23 13:01:49 +03005796 if (HAS_DDI(dev))
5797 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5798
Daniel Vetter07679352012-09-06 22:15:42 +02005799 /* Preserve the current hw state. */
5800 intel_dp->DP = I915_READ(intel_dp->output_reg);
Jani Nikuladd06f902012-10-19 14:51:50 +03005801 intel_dp->attached_connector = intel_connector;
Chris Wilson3d3dc142011-02-12 10:33:12 +00005802
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005803 if (intel_dp_is_edp(dev, port))
Gajanan Bhat19c03922012-09-27 19:13:07 +05305804 type = DRM_MODE_CONNECTOR_eDP;
Ville Syrjälä3b32a352013-11-01 18:22:41 +02005805 else
5806 type = DRM_MODE_CONNECTOR_DisplayPort;
Adam Jacksonb3295302010-07-16 14:46:28 -04005807
Imre Deakf7d24902013-05-08 13:14:05 +03005808 /*
5809 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5810 * for DP the encoder type can be set by the caller to
5811 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5812 */
5813 if (type == DRM_MODE_CONNECTOR_eDP)
5814 intel_encoder->type = INTEL_OUTPUT_EDP;
5815
Ville Syrjäläc17ed5b2014-10-16 21:27:27 +03005816 /* eDP only on port B and/or C on vlv/chv */
5817 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5818 port != PORT_B && port != PORT_C))
5819 return false;
5820
Imre Deake7281ea2013-05-08 13:14:08 +03005821 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5822 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5823 port_name(port));
5824
Adam Jacksonb3295302010-07-16 14:46:28 -04005825 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005826 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5827
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005828 connector->interlace_allowed = true;
5829 connector->doublescan_allowed = 0;
Ma Lingf8aed702009-08-24 13:50:24 +08005830
Daniel Vetter66a92782012-07-12 20:08:18 +02005831 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
Daniel Vetter4be73782014-01-17 14:39:48 +01005832 edp_panel_vdd_work);
Zhenyu Wang6251ec02010-01-12 05:38:32 +08005833
Chris Wilsondf0e9242010-09-09 16:20:55 +01005834 intel_connector_attach_encoder(intel_connector, intel_encoder);
Thomas Wood34ea3d32014-05-29 16:57:41 +01005835 drm_connector_register(connector);
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005836
Paulo Zanoniaffa9352012-11-23 15:30:39 -02005837 if (HAS_DDI(dev))
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005838 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5839 else
5840 intel_connector->get_hw_state = intel_connector_get_hw_state;
Imre Deak80f65de2014-02-11 17:12:49 +02005841 intel_connector->unregister = intel_dp_connector_unregister;
Paulo Zanonibcbc8892012-10-26 19:05:51 -02005842
Jani Nikula0b998362014-03-14 16:51:17 +02005843 /* Set up the hotplug pin. */
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005844 switch (port) {
5845 case PORT_A:
Egbert Eich1d843f92013-02-25 12:06:49 -05005846 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005847 break;
5848 case PORT_B:
Egbert Eich1d843f92013-02-25 12:06:49 -05005849 intel_encoder->hpd_pin = HPD_PORT_B;
Jani Nikulae87a0052015-10-20 15:22:02 +03005850 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
Sonika Jindalcf1d5882015-08-10 10:35:36 +05305851 intel_encoder->hpd_pin = HPD_PORT_A;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005852 break;
5853 case PORT_C:
Egbert Eich1d843f92013-02-25 12:06:49 -05005854 intel_encoder->hpd_pin = HPD_PORT_C;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005855 break;
5856 case PORT_D:
Egbert Eich1d843f92013-02-25 12:06:49 -05005857 intel_encoder->hpd_pin = HPD_PORT_D;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005858 break;
Xiong Zhang26951ca2015-08-17 15:55:50 +08005859 case PORT_E:
5860 intel_encoder->hpd_pin = HPD_PORT_E;
5861 break;
Paulo Zanoniab9d7c32012-07-17 17:53:45 -03005862 default:
Damien Lespiauad1c0b12013-03-07 15:30:28 +00005863 BUG();
Zhenyu Wang5eb08b62009-07-24 01:00:31 +08005864 }
5865
Imre Deakdada1a92014-01-29 13:25:41 +02005866 if (is_edp(intel_dp)) {
Ville Syrjälä773538e82014-09-04 14:54:56 +03005867 pps_lock(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005868 intel_dp_init_panel_power_timestamps(intel_dp);
5869 if (IS_VALLEYVIEW(dev))
Ville Syrjäläa4a5d2f2014-09-04 14:54:20 +03005870 vlv_initial_power_sequencer_setup(intel_dp);
Ville Syrjälä1e74a322014-10-28 16:15:51 +02005871 else
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005872 intel_dp_init_panel_power_sequencer(dev, intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005873 pps_unlock(intel_dp);
Imre Deakdada1a92014-01-29 13:25:41 +02005874 }
Paulo Zanoni0095e6d2013-12-19 14:29:39 -02005875
Jani Nikula9d1a1032014-03-14 16:51:15 +02005876 intel_dp_aux_init(intel_dp, intel_connector);
Dave Airliec1f05262012-08-30 11:06:18 +10005877
Dave Airlie0e32b392014-05-02 14:02:48 +10005878 /* init MST on ports that can support it */
Jani Nikula0c9b3712015-05-18 17:10:01 +03005879 if (HAS_DP_MST(dev) &&
5880 (port == PORT_B || port == PORT_C || port == PORT_D))
5881 intel_dp_mst_encoder_init(intel_dig_port,
5882 intel_connector->base.base.id);
Dave Airlie0e32b392014-05-02 14:02:48 +10005883
Ville Syrjälä36b5f422014-10-16 21:27:30 +03005884 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
Dave Airlie4f71d0c2014-06-04 16:02:28 +10005885 drm_dp_aux_unregister(&intel_dp->aux);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03005886 if (is_edp(intel_dp)) {
5887 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
Ville Syrjälä951468f2014-09-04 14:55:31 +03005888 /*
5889 * vdd might still be enabled do to the delayed vdd off.
5890 * Make sure vdd is actually turned off here.
5891 */
Ville Syrjälä773538e82014-09-04 14:54:56 +03005892 pps_lock(intel_dp);
Daniel Vetter4be73782014-01-17 14:39:48 +01005893 edp_panel_vdd_off_sync(intel_dp);
Ville Syrjälä773538e82014-09-04 14:54:56 +03005894 pps_unlock(intel_dp);
Paulo Zanoni15b1d172013-06-12 17:27:27 -03005895 }
Thomas Wood34ea3d32014-05-29 16:57:41 +01005896 drm_connector_unregister(connector);
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005897 drm_connector_cleanup(connector);
Paulo Zanoni16c25532013-06-12 17:27:25 -03005898 return false;
Paulo Zanonib2f246a2013-06-12 17:27:26 -03005899 }
Zhenyu Wang32f9d652009-07-24 01:00:32 +08005900
Chris Wilsonf6849602010-09-19 09:29:33 +01005901 intel_dp_add_properties(intel_dp, connector);
5902
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005903 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5904 * 0xd. Failure to do so will result in spurious interrupts being
5905 * generated on the port when a cable is not attached.
5906 */
5907 if (IS_G4X(dev) && !IS_GM45(dev)) {
5908 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5909 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5910 }
Paulo Zanoni16c25532013-06-12 17:27:25 -03005911
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005912 i915_debugfs_connector_add(connector);
5913
Paulo Zanoni16c25532013-06-12 17:27:25 -03005914 return true;
Keith Packarda4fc5ed2009-04-07 16:16:42 -07005915}
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005916
5917void
5918intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5919{
Dave Airlie13cf5502014-06-18 11:29:35 +10005920 struct drm_i915_private *dev_priv = dev->dev_private;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005921 struct intel_digital_port *intel_dig_port;
5922 struct intel_encoder *intel_encoder;
5923 struct drm_encoder *encoder;
5924 struct intel_connector *intel_connector;
5925
Daniel Vetterb14c5672013-09-19 12:18:32 +02005926 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005927 if (!intel_dig_port)
5928 return;
5929
Ander Conselvan de Oliveira08d9bc92015-04-10 10:59:10 +03005930 intel_connector = intel_connector_alloc();
Sudip Mukherjee11aee0f2015-10-08 19:27:59 +05305931 if (!intel_connector)
5932 goto err_connector_alloc;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005933
5934 intel_encoder = &intel_dig_port->base;
5935 encoder = &intel_encoder->base;
5936
5937 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5938 DRM_MODE_ENCODER_TMDS);
5939
Daniel Vetter5bfe2ac2013-03-27 00:44:55 +01005940 intel_encoder->compute_config = intel_dp_compute_config;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005941 intel_encoder->disable = intel_disable_dp;
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005942 intel_encoder->get_hw_state = intel_dp_get_hw_state;
Jesse Barnes045ac3b2013-05-14 17:08:26 -07005943 intel_encoder->get_config = intel_dp_get_config;
Imre Deak07f9cd02014-08-18 14:42:45 +03005944 intel_encoder->suspend = intel_dp_encoder_suspend;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005945 if (IS_CHERRYVIEW(dev)) {
Ville Syrjälä9197c882014-04-09 13:29:05 +03005946 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005947 intel_encoder->pre_enable = chv_pre_enable_dp;
5948 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä580d3812014-04-09 13:29:00 +03005949 intel_encoder->post_disable = chv_post_disable_dp;
Ville Syrjäläd6db9952015-07-08 23:45:49 +03005950 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
Chon Ming Leee4a1d842014-04-09 13:28:20 +03005951 } else if (IS_VALLEYVIEW(dev)) {
Jani Nikulaecff4f32013-09-06 07:38:29 +03005952 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005953 intel_encoder->pre_enable = vlv_pre_enable_dp;
5954 intel_encoder->enable = vlv_enable_dp;
Ville Syrjälä49277c32014-03-31 18:21:26 +03005955 intel_encoder->post_disable = vlv_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005956 } else {
Jani Nikulaecff4f32013-09-06 07:38:29 +03005957 intel_encoder->pre_enable = g4x_pre_enable_dp;
5958 intel_encoder->enable = g4x_enable_dp;
Ville Syrjälä08aff3f2014-08-18 22:16:09 +03005959 if (INTEL_INFO(dev)->gen >= 5)
5960 intel_encoder->post_disable = ilk_post_disable_dp;
Jani Nikulaab1f90f2013-07-30 12:20:30 +03005961 }
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005962
Paulo Zanoni174edf12012-10-26 19:05:50 -02005963 intel_dig_port->port = port;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005964 intel_dig_port->dp.output_reg = output_reg;
5965
Paulo Zanoni00c09d72012-10-26 19:05:52 -02005966 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
Ville Syrjälä882ec382014-04-28 14:07:43 +03005967 if (IS_CHERRYVIEW(dev)) {
5968 if (port == PORT_D)
5969 intel_encoder->crtc_mask = 1 << 2;
5970 else
5971 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5972 } else {
5973 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5974 }
Ville Syrjäläbc079e82014-03-03 16:15:28 +02005975 intel_encoder->cloneable = 0;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005976
Dave Airlie13cf5502014-06-18 11:29:35 +10005977 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
Jani Nikula5fcece82015-05-27 15:03:42 +03005978 dev_priv->hotplug.irq_port[port] = intel_dig_port;
Dave Airlie13cf5502014-06-18 11:29:35 +10005979
Sudip Mukherjee11aee0f2015-10-08 19:27:59 +05305980 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5981 goto err_init_connector;
5982
5983 return;
5984
5985err_init_connector:
5986 drm_encoder_cleanup(encoder);
5987 kfree(intel_connector);
5988err_connector_alloc:
5989 kfree(intel_dig_port);
5990
5991 return;
Paulo Zanonif0fec3f2012-10-26 19:05:48 -02005992}
Dave Airlie0e32b392014-05-02 14:02:48 +10005993
5994void intel_dp_mst_suspend(struct drm_device *dev)
5995{
5996 struct drm_i915_private *dev_priv = dev->dev_private;
5997 int i;
5998
5999 /* disable MST */
6000 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006001 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006002 if (!intel_dig_port)
6003 continue;
6004
6005 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6006 if (!intel_dig_port->dp.can_mst)
6007 continue;
6008 if (intel_dig_port->dp.is_mst)
6009 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6010 }
6011 }
6012}
6013
6014void intel_dp_mst_resume(struct drm_device *dev)
6015{
6016 struct drm_i915_private *dev_priv = dev->dev_private;
6017 int i;
6018
6019 for (i = 0; i < I915_MAX_PORTS; i++) {
Jani Nikula5fcece82015-05-27 15:03:42 +03006020 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
Dave Airlie0e32b392014-05-02 14:02:48 +10006021 if (!intel_dig_port)
6022 continue;
6023 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6024 int ret;
6025
6026 if (!intel_dig_port->dp.can_mst)
6027 continue;
6028
6029 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6030 if (ret != 0) {
6031 intel_dp_check_mst_status(&intel_dig_port->dp);
6032 }
6033 }
6034 }
6035}