blob: 3c164a27d50bf3354c84042a54ae31ca7b628ee3 [file] [log] [blame]
Sean Paulee5e5e72018-01-08 14:55:39 -05001/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright (C) 2017 Google, Inc.
4 *
5 * Authors:
6 * Sean Paul <seanpaul@chromium.org>
7 */
8
9#include <drm/drmP.h>
10#include <drm/drm_hdcp.h>
11#include <linux/i2c.h>
12#include <linux/random.h>
13
14#include "intel_drv.h"
15#include "i915_reg.h"
16
17#define KEY_LOAD_TRIES 5
18
19static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
20 const struct intel_hdcp_shim *shim)
21{
22 int ret, read_ret;
23 bool ksv_ready;
24
25 /* Poll for ksv list ready (spec says max time allowed is 5s) */
26 ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
27 &ksv_ready),
28 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
29 100 * 1000);
30 if (ret)
31 return ret;
32 if (read_ret)
33 return read_ret;
34 if (!ksv_ready)
35 return -ETIMEDOUT;
36
37 return 0;
38}
39
40static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
41{
42 I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
43 I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
44 HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
45}
46
47static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
48{
49 int ret;
50 u32 val;
51
52 /* Initiate loading the HDCP key from fuses */
53 mutex_lock(&dev_priv->pcu_lock);
54 ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1);
55 mutex_unlock(&dev_priv->pcu_lock);
56 if (ret) {
57 DRM_ERROR("Failed to initiate HDCP key load (%d)\n", ret);
58 return ret;
59 }
60
61 /* Wait for the keys to load (500us) */
62 ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
63 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
64 10, 1, &val);
65 if (ret)
66 return ret;
67 else if (!(val & HDCP_KEY_LOAD_STATUS))
68 return -ENXIO;
69
70 /* Send Aksv over to PCH display for use in authentication */
71 I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
72
73 return 0;
74}
75
76/* Returns updated SHA-1 index */
77static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
78{
79 I915_WRITE(HDCP_SHA_TEXT, sha_text);
80 if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
81 HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
82 DRM_ERROR("Timed out waiting for SHA1 ready\n");
83 return -ETIMEDOUT;
84 }
85 return 0;
86}
87
88static
89u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
90{
91 enum port port = intel_dig_port->base.port;
92 switch (port) {
93 case PORT_A:
94 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
95 case PORT_B:
96 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
97 case PORT_C:
98 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
99 case PORT_D:
100 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
101 case PORT_E:
102 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
103 default:
104 break;
105 }
106 DRM_ERROR("Unknown port %d\n", port);
107 return -EINVAL;
108}
109
110static
111bool intel_hdcp_is_ksv_valid(u8 *ksv)
112{
113 int i, ones = 0;
114 /* KSV has 20 1's and 20 0's */
115 for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
116 ones += hweight8(ksv[i]);
117 if (ones != 20)
118 return false;
119 return true;
120}
121
122/* Implements Part 2 of the HDCP authorization procedure */
123static
124int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
125 const struct intel_hdcp_shim *shim)
126{
127 struct drm_i915_private *dev_priv;
128 u32 vprime, sha_text, sha_leftovers, rep_ctl;
129 u8 bstatus[2], num_downstream, *ksv_fifo;
130 int ret, i, j, sha_idx;
131
132 dev_priv = intel_dig_port->base.base.dev->dev_private;
133
134 ret = shim->read_bstatus(intel_dig_port, bstatus);
135 if (ret)
136 return ret;
137
138 /* If there are no downstream devices, we're all done. */
139 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
140 if (num_downstream == 0) {
141 DRM_INFO("HDCP is enabled (no downstream devices)\n");
142 return 0;
143 }
144
145 ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
146 if (ret) {
147 DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
148 return ret;
149 }
150
151 ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL);
152 if (!ksv_fifo)
153 return -ENOMEM;
154
155 ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
156 if (ret)
157 return ret;
158
159 /* Process V' values from the receiver */
160 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
161 ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
162 if (ret)
163 return ret;
164 I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
165 }
166
167 /*
168 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
169 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
170 * stream is written via the HDCP_SHA_TEXT register in 32-bit
171 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
172 * index will keep track of our progress through the 64 bytes as well as
173 * helping us work the 40-bit KSVs through our 32-bit register.
174 *
175 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
176 */
177 sha_idx = 0;
178 sha_text = 0;
179 sha_leftovers = 0;
180 rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
181 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
182 for (i = 0; i < num_downstream; i++) {
183 unsigned int sha_empty;
184 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
185
186 /* Fill up the empty slots in sha_text and write it out */
187 sha_empty = sizeof(sha_text) - sha_leftovers;
188 for (j = 0; j < sha_empty; j++)
189 sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
190
191 ret = intel_write_sha_text(dev_priv, sha_text);
192 if (ret < 0)
193 return ret;
194
195 /* Programming guide writes this every 64 bytes */
196 sha_idx += sizeof(sha_text);
197 if (!(sha_idx % 64))
198 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
199
200 /* Store the leftover bytes from the ksv in sha_text */
201 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
202 sha_text = 0;
203 for (j = 0; j < sha_leftovers; j++)
204 sha_text |= ksv[sha_empty + j] <<
205 ((sizeof(sha_text) - j - 1) * 8);
206
207 /*
208 * If we still have room in sha_text for more data, continue.
209 * Otherwise, write it out immediately.
210 */
211 if (sizeof(sha_text) > sha_leftovers)
212 continue;
213
214 ret = intel_write_sha_text(dev_priv, sha_text);
215 if (ret < 0)
216 return ret;
217 sha_leftovers = 0;
218 sha_text = 0;
219 sha_idx += sizeof(sha_text);
220 }
221
222 /*
223 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
224 * bytes are leftover from the last ksv, we might be able to fit them
225 * all in sha_text (first 2 cases), or we might need to split them up
226 * into 2 writes (last 2 cases).
227 */
228 if (sha_leftovers == 0) {
229 /* Write 16 bits of text, 16 bits of M0 */
230 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
231 ret = intel_write_sha_text(dev_priv,
232 bstatus[0] << 8 | bstatus[1]);
233 if (ret < 0)
234 return ret;
235 sha_idx += sizeof(sha_text);
236
237 /* Write 32 bits of M0 */
238 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
239 ret = intel_write_sha_text(dev_priv, 0);
240 if (ret < 0)
241 return ret;
242 sha_idx += sizeof(sha_text);
243
244 /* Write 16 bits of M0 */
245 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
246 ret = intel_write_sha_text(dev_priv, 0);
247 if (ret < 0)
248 return ret;
249 sha_idx += sizeof(sha_text);
250
251 } else if (sha_leftovers == 1) {
252 /* Write 24 bits of text, 8 bits of M0 */
253 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
254 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
255 /* Only 24-bits of data, must be in the LSB */
256 sha_text = (sha_text & 0xffffff00) >> 8;
257 ret = intel_write_sha_text(dev_priv, sha_text);
258 if (ret < 0)
259 return ret;
260 sha_idx += sizeof(sha_text);
261
262 /* Write 32 bits of M0 */
263 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
264 ret = intel_write_sha_text(dev_priv, 0);
265 if (ret < 0)
266 return ret;
267 sha_idx += sizeof(sha_text);
268
269 /* Write 24 bits of M0 */
270 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
271 ret = intel_write_sha_text(dev_priv, 0);
272 if (ret < 0)
273 return ret;
274 sha_idx += sizeof(sha_text);
275
276 } else if (sha_leftovers == 2) {
277 /* Write 32 bits of text */
278 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
279 sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
280 ret = intel_write_sha_text(dev_priv, sha_text);
281 if (ret < 0)
282 return ret;
283 sha_idx += sizeof(sha_text);
284
285 /* Write 64 bits of M0 */
286 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
287 for (i = 0; i < 2; i++) {
288 ret = intel_write_sha_text(dev_priv, 0);
289 if (ret < 0)
290 return ret;
291 sha_idx += sizeof(sha_text);
292 }
293 } else if (sha_leftovers == 3) {
294 /* Write 32 bits of text */
295 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
296 sha_text |= bstatus[0] << 24;
297 ret = intel_write_sha_text(dev_priv, sha_text);
298 if (ret < 0)
299 return ret;
300 sha_idx += sizeof(sha_text);
301
302 /* Write 8 bits of text, 24 bits of M0 */
303 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
304 ret = intel_write_sha_text(dev_priv, bstatus[1]);
305 if (ret < 0)
306 return ret;
307 sha_idx += sizeof(sha_text);
308
309 /* Write 32 bits of M0 */
310 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
311 ret = intel_write_sha_text(dev_priv, 0);
312 if (ret < 0)
313 return ret;
314 sha_idx += sizeof(sha_text);
315
316 /* Write 8 bits of M0 */
317 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
318 ret = intel_write_sha_text(dev_priv, 0);
319 if (ret < 0)
320 return ret;
321 sha_idx += sizeof(sha_text);
322 } else {
323 DRM_ERROR("Invalid number of leftovers %d\n", sha_leftovers);
324 return -EINVAL;
325 }
326
327 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
328 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
329 while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
330 ret = intel_write_sha_text(dev_priv, 0);
331 if (ret < 0)
332 return ret;
333 sha_idx += sizeof(sha_text);
334 }
335
336 /*
337 * Last write gets the length of the concatenation in bits. That is:
338 * - 5 bytes per device
339 * - 10 bytes for BINFO/BSTATUS(2), M0(8)
340 */
341 sha_text = (num_downstream * 5 + 10) * 8;
342 ret = intel_write_sha_text(dev_priv, sha_text);
343 if (ret < 0)
344 return ret;
345
346 /* Tell the HW we're done with the hash and wait for it to ACK */
347 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
348 if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
349 HDCP_SHA1_COMPLETE,
350 HDCP_SHA1_COMPLETE, 1)) {
351 DRM_ERROR("Timed out waiting for SHA1 complete\n");
352 return -ETIMEDOUT;
353 }
354 if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
355 DRM_ERROR("SHA-1 mismatch, HDCP failed\n");
356 return -ENXIO;
357 }
358
359 DRM_INFO("HDCP is enabled (%d downstream devices)\n", num_downstream);
360 return 0;
361}
362
363/* Implements Part 1 of the HDCP authorization procedure */
364static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
365 const struct intel_hdcp_shim *shim)
366{
367 struct drm_i915_private *dev_priv;
368 enum port port;
369 unsigned long r0_prime_gen_start;
370 int ret, i;
371 union {
372 u32 reg[2];
373 u8 shim[DRM_HDCP_AN_LEN];
374 } an;
375 union {
376 u32 reg[2];
377 u8 shim[DRM_HDCP_KSV_LEN];
378 } bksv;
379 union {
380 u32 reg;
381 u8 shim[DRM_HDCP_RI_LEN];
382 } ri;
383 bool repeater_present;
384
385 dev_priv = intel_dig_port->base.base.dev->dev_private;
386
387 port = intel_dig_port->base.port;
388
389 /* Initialize An with 2 random values and acquire it */
390 for (i = 0; i < 2; i++)
391 I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
392 I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
393
394 /* Wait for An to be acquired */
395 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
396 HDCP_STATUS_AN_READY,
397 HDCP_STATUS_AN_READY, 1)) {
398 DRM_ERROR("Timed out waiting for An\n");
399 return -ETIMEDOUT;
400 }
401
402 an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
403 an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
404 ret = shim->write_an_aksv(intel_dig_port, an.shim);
405 if (ret)
406 return ret;
407
408 r0_prime_gen_start = jiffies;
409
410 memset(&bksv, 0, sizeof(bksv));
411 ret = shim->read_bksv(intel_dig_port, bksv.shim);
412 if (ret)
413 return ret;
414 else if (!intel_hdcp_is_ksv_valid(bksv.shim))
415 return -ENODEV;
416
417 I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
418 I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
419
420 ret = shim->repeater_present(intel_dig_port, &repeater_present);
421 if (ret)
422 return ret;
423 if (repeater_present)
424 I915_WRITE(HDCP_REP_CTL,
425 intel_hdcp_get_repeater_ctl(intel_dig_port));
426
427 ret = shim->toggle_signalling(intel_dig_port, true);
428 if (ret)
429 return ret;
430
431 I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
432
433 /* Wait for R0 ready */
434 if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
435 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
436 DRM_ERROR("Timed out waiting for R0 ready\n");
437 return -ETIMEDOUT;
438 }
439
440 /*
441 * Wait for R0' to become available. The spec says 100ms from Aksv, but
442 * some monitors can take longer than this. We'll set the timeout at
443 * 300ms just to be sure.
444 *
445 * On DP, there's an R0_READY bit available but no such bit
446 * exists on HDMI. Since the upper-bound is the same, we'll just do
447 * the stupid thing instead of polling on one and not the other.
448 */
449 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
450
451 ri.reg = 0;
452 ret = shim->read_ri_prime(intel_dig_port, ri.shim);
453 if (ret)
454 return ret;
455 I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
456
457 /* Wait for Ri prime match */
458 if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
459 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
460 DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
461 I915_READ(PORT_HDCP_STATUS(port)));
462 return -ETIMEDOUT;
463 }
464
465 /* Wait for encryption confirmation */
466 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
467 HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) {
468 DRM_ERROR("Timed out waiting for encryption\n");
469 return -ETIMEDOUT;
470 }
471
472 /*
473 * XXX: If we have MST-connected devices, we need to enable encryption
474 * on those as well.
475 */
476
477 return intel_hdcp_auth_downstream(intel_dig_port, shim);
478}
479
480static
481struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
482{
483 return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
484}
485
486static int _intel_hdcp_disable(struct intel_connector *connector)
487{
488 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
489 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
490 enum port port = intel_dig_port->base.port;
491 int ret;
492
493 I915_WRITE(PORT_HDCP_CONF(port), 0);
494 if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
495 20)) {
496 DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
497 return -ETIMEDOUT;
498 }
499
500 intel_hdcp_clear_keys(dev_priv);
501
502 ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
503 if (ret) {
504 DRM_ERROR("Failed to disable HDCP signalling\n");
505 return ret;
506 }
507
508 DRM_INFO("HDCP is disabled\n");
509 return 0;
510}
511
512static int _intel_hdcp_enable(struct intel_connector *connector)
513{
514 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
515 int i, ret;
516
517 if (!(I915_READ(SKL_FUSE_STATUS) & SKL_FUSE_PG_DIST_STATUS(1))) {
518 DRM_ERROR("PG1 is disabled, cannot load keys\n");
519 return -ENXIO;
520 }
521
522 for (i = 0; i < KEY_LOAD_TRIES; i++) {
523 ret = intel_hdcp_load_keys(dev_priv);
524 if (!ret)
525 break;
526 intel_hdcp_clear_keys(dev_priv);
527 }
528 if (ret) {
529 DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
530 return ret;
531 }
532
533 ret = intel_hdcp_auth(conn_to_dig_port(connector),
534 connector->hdcp_shim);
535 if (ret) {
536 DRM_ERROR("Failed to authenticate HDCP (%d)\n", ret);
537 return ret;
538 }
539
540 return 0;
541}
542
543static void intel_hdcp_check_work(struct work_struct *work)
544{
545 struct intel_connector *connector = container_of(to_delayed_work(work),
546 struct intel_connector,
547 hdcp_check_work);
548 if (!intel_hdcp_check_link(connector))
549 schedule_delayed_work(&connector->hdcp_check_work,
550 DRM_HDCP_CHECK_PERIOD_MS);
551}
552
553static void intel_hdcp_prop_work(struct work_struct *work)
554{
555 struct intel_connector *connector = container_of(work,
556 struct intel_connector,
557 hdcp_prop_work);
558 struct drm_device *dev = connector->base.dev;
559 struct drm_connector_state *state;
560
561 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
562 mutex_lock(&connector->hdcp_mutex);
563
564 /*
565 * This worker is only used to flip between ENABLED/DESIRED. Either of
566 * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED,
567 * we're running just after hdcp has been disabled, so just exit
568 */
569 if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
570 state = connector->base.state;
571 state->content_protection = connector->hdcp_value;
572 }
573
574 mutex_unlock(&connector->hdcp_mutex);
575 drm_modeset_unlock(&dev->mode_config.connection_mutex);
576}
577
578int intel_hdcp_init(struct intel_connector *connector,
579 const struct intel_hdcp_shim *hdcp_shim)
580{
581 int ret;
582
583 ret = drm_connector_attach_content_protection_property(
584 &connector->base);
585 if (ret)
586 return ret;
587
588 connector->hdcp_shim = hdcp_shim;
589 mutex_init(&connector->hdcp_mutex);
590 INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work);
591 INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work);
592 return 0;
593}
594
595int intel_hdcp_enable(struct intel_connector *connector)
596{
597 int ret;
598
599 if (!connector->hdcp_shim)
600 return -ENOENT;
601
602 mutex_lock(&connector->hdcp_mutex);
603
604 ret = _intel_hdcp_enable(connector);
605 if (ret)
606 goto out;
607
608 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
609 schedule_work(&connector->hdcp_prop_work);
610 schedule_delayed_work(&connector->hdcp_check_work,
611 DRM_HDCP_CHECK_PERIOD_MS);
612out:
613 mutex_unlock(&connector->hdcp_mutex);
614 return ret;
615}
616
617int intel_hdcp_disable(struct intel_connector *connector)
618{
619 int ret;
620
621 if (!connector->hdcp_shim)
622 return -ENOENT;
623
624 mutex_lock(&connector->hdcp_mutex);
625
626 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
627 ret = _intel_hdcp_disable(connector);
628
629 mutex_unlock(&connector->hdcp_mutex);
630 cancel_delayed_work_sync(&connector->hdcp_check_work);
631 return ret;
632}
633
634void intel_hdcp_atomic_check(struct drm_connector *connector,
635 struct drm_connector_state *old_state,
636 struct drm_connector_state *new_state)
637{
638 uint64_t old_cp = old_state->content_protection;
639 uint64_t new_cp = new_state->content_protection;
640 struct drm_crtc_state *crtc_state;
641
642 if (!new_state->crtc) {
643 /*
644 * If the connector is being disabled with CP enabled, mark it
645 * desired so it's re-enabled when the connector is brought back
646 */
647 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
648 new_state->content_protection =
649 DRM_MODE_CONTENT_PROTECTION_DESIRED;
650 return;
651 }
652
653 /*
654 * Nothing to do if the state didn't change, or HDCP was activated since
655 * the last commit
656 */
657 if (old_cp == new_cp ||
658 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
659 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
660 return;
661
662 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
663 new_state->crtc);
664 crtc_state->mode_changed = true;
665}
666
667/* Implements Part 3 of the HDCP authorization procedure */
668int intel_hdcp_check_link(struct intel_connector *connector)
669{
670 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
671 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
672 enum port port = intel_dig_port->base.port;
673 int ret = 0;
674
675 if (!connector->hdcp_shim)
676 return -ENOENT;
677
678 mutex_lock(&connector->hdcp_mutex);
679
680 if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
681 goto out;
682
683 if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
684 DRM_ERROR("HDCP check failed: link is not encrypted, %x\n",
685 I915_READ(PORT_HDCP_STATUS(port)));
686 ret = -ENXIO;
687 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
688 schedule_work(&connector->hdcp_prop_work);
689 goto out;
690 }
691
692 if (connector->hdcp_shim->check_link(intel_dig_port)) {
693 if (connector->hdcp_value !=
694 DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
695 connector->hdcp_value =
696 DRM_MODE_CONTENT_PROTECTION_ENABLED;
697 schedule_work(&connector->hdcp_prop_work);
698 }
699 goto out;
700 }
701
702 DRM_INFO("HDCP link failed, retrying authentication\n");
703
704 ret = _intel_hdcp_disable(connector);
705 if (ret) {
706 DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
707 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
708 schedule_work(&connector->hdcp_prop_work);
709 goto out;
710 }
711
712 ret = _intel_hdcp_enable(connector);
713 if (ret) {
714 DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
715 connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
716 schedule_work(&connector->hdcp_prop_work);
717 goto out;
718 }
719
720out:
721 mutex_unlock(&connector->hdcp_mutex);
722 return ret;
723}