Merge tag 'gpio-v5.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux...
[linux-2.6-block.git] / drivers / gpu / drm / i915 / display / intel_hdcp.c
CommitLineData
ee5e5e7a
SP
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright (C) 2017 Google, Inc.
0dcceb35 4 * Copyright _ 2017-2019, Intel Corporation.
ee5e5e7a
SP
5 *
6 * Authors:
7 * Sean Paul <seanpaul@chromium.org>
0dcceb35 8 * Ramalingam C <ramalingam.c@intel.com>
ee5e5e7a
SP
9 */
10
408bd917 11#include <linux/component.h>
ee5e5e7a
SP
12#include <linux/i2c.h>
13#include <linux/random.h>
14
408bd917
JN
15#include <drm/drm_hdcp.h>
16#include <drm/i915_component.h>
17
ee5e5e7a 18#include "i915_reg.h"
3e5d0641 19#include "intel_display_power.h"
1d455f8d 20#include "intel_display_types.h"
408bd917 21#include "intel_hdcp.h"
e0516e83 22#include "intel_sideband.h"
69205931 23#include "intel_connector.h"
ee5e5e7a
SP
24
25#define KEY_LOAD_TRIES 5
7e90e8d0 26#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
bd90d7c7 27#define HDCP2_LC_RETRY_CNT 3
ee5e5e7a 28
f106d100
R
29static
30bool intel_hdcp_is_ksv_valid(u8 *ksv)
31{
32 int i, ones = 0;
33 /* KSV has 20 1's and 20 0's */
34 for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
35 ones += hweight8(ksv[i]);
36 if (ones != 20)
37 return false;
38
39 return true;
40}
41
42static
7801f3b7 43int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
f106d100
R
44 const struct intel_hdcp_shim *shim, u8 *bksv)
45{
7801f3b7 46 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
f106d100
R
47 int ret, i, tries = 2;
48
49 /* HDCP spec states that we must retry the bksv if it is invalid */
50 for (i = 0; i < tries; i++) {
7801f3b7 51 ret = shim->read_bksv(dig_port, bksv);
f106d100
R
52 if (ret)
53 return ret;
54 if (intel_hdcp_is_ksv_valid(bksv))
55 break;
56 }
57 if (i == tries) {
51279100 58 drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
f106d100
R
59 return -ENODEV;
60 }
61
62 return 0;
63}
64
bdc93fe0
R
65/* Is HDCP1.4 capable on Platform and Sink */
66bool intel_hdcp_capable(struct intel_connector *connector)
67{
7801f3b7 68 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
d3dacc70 69 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
bdc93fe0
R
70 bool capable = false;
71 u8 bksv[5];
72
73 if (!shim)
74 return capable;
75
76 if (shim->hdcp_capable) {
7801f3b7 77 shim->hdcp_capable(dig_port, &capable);
bdc93fe0 78 } else {
7801f3b7 79 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
bdc93fe0
R
80 capable = true;
81 }
82
83 return capable;
84}
85
49a630b0 86/* Is HDCP2.2 capable on Platform and Sink */
43318c0a 87bool intel_hdcp2_capable(struct intel_connector *connector)
49a630b0 88{
7801f3b7 89 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
486bba45 90 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
49a630b0
R
91 struct intel_hdcp *hdcp = &connector->hdcp;
92 bool capable = false;
93
94 /* I915 support for HDCP2.2 */
95 if (!hdcp->hdcp2_supported)
96 return false;
97
98 /* MEI interface is solid */
99 mutex_lock(&dev_priv->hdcp_comp_mutex);
c5568ed2
CW
100 if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) {
101 mutex_unlock(&dev_priv->hdcp_comp_mutex);
102 return false;
103 }
49a630b0
R
104 mutex_unlock(&dev_priv->hdcp_comp_mutex);
105
106 /* Sink's capability for HDCP2.2 */
7801f3b7 107 hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
49a630b0
R
108
109 return capable;
110}
111
81b55ef1
JN
112static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
113 enum transcoder cpu_transcoder, enum port port)
09d56393 114{
667944ad
JN
115 return intel_de_read(dev_priv,
116 HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
69205931 117 HDCP_STATUS_ENC;
09d56393
R
118}
119
81b55ef1
JN
120static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
121 enum transcoder cpu_transcoder, enum port port)
22ce2d94 122{
667944ad
JN
123 return intel_de_read(dev_priv,
124 HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
69205931 125 LINK_ENCRYPTION_STATUS;
22ce2d94
R
126}
127
7801f3b7 128static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
ee5e5e7a
SP
129 const struct intel_hdcp_shim *shim)
130{
131 int ret, read_ret;
132 bool ksv_ready;
133
134 /* Poll for ksv list ready (spec says max time allowed is 5s) */
7801f3b7 135 ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
ee5e5e7a
SP
136 &ksv_ready),
137 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
138 100 * 1000);
139 if (ret)
140 return ret;
141 if (read_ret)
142 return read_ret;
143 if (!ksv_ready)
144 return -ETIMEDOUT;
145
146 return 0;
147}
148
6308a315
R
149static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
150{
151 struct i915_power_domains *power_domains = &dev_priv->power_domains;
152 struct i915_power_well *power_well;
153 enum i915_power_well_id id;
154 bool enabled = false;
155
156 /*
157 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
158 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
159 */
160 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
161 id = HSW_DISP_PW_GLOBAL;
162 else
163 id = SKL_DISP_PW_1;
164
165 mutex_lock(&power_domains->lock);
166
167 /* PG1 (power well #1) needs to be enabled */
168 for_each_power_well(dev_priv, power_well) {
f28ec6f4
ID
169 if (power_well->desc->id == id) {
170 enabled = power_well->desc->ops->is_enabled(dev_priv,
171 power_well);
6308a315
R
172 break;
173 }
174 }
175 mutex_unlock(&power_domains->lock);
176
177 /*
178 * Another req for hdcp key loadability is enabled state of pll for
179 * cdclk. Without active crtc we wont land here. So we are assuming that
180 * cdclk is already on.
181 */
182
183 return enabled;
184}
185
ee5e5e7a
SP
186static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
187{
667944ad
JN
188 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
189 intel_de_write(dev_priv, HDCP_KEY_STATUS,
190 HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
ee5e5e7a
SP
191}
192
193static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
194{
195 int ret;
196 u32 val;
197
667944ad 198 val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
7ee57988
R
199 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
200 return 0;
201
fdddd08c
R
202 /*
203 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
204 * out of reset. So if Key is not already loaded, its an error state.
205 */
206 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
667944ad 207 if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
fdddd08c
R
208 return -ENXIO;
209
210 /*
211 * Initiate loading the HDCP key from fuses.
212 *
083d2a07
R
213 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
214 * platforms except BXT and GLK, differ in the key load trigger process
215 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
fdddd08c 216 */
083d2a07 217 if (IS_GEN9_BC(dev_priv)) {
fdddd08c
R
218 ret = sandybridge_pcode_write(dev_priv,
219 SKL_PCODE_LOAD_HDCP_KEYS, 1);
fdddd08c 220 if (ret) {
65833c46
WK
221 drm_err(&dev_priv->drm,
222 "Failed to initiate HDCP key load (%d)\n",
223 ret);
fdddd08c
R
224 return ret;
225 }
226 } else {
667944ad 227 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
ee5e5e7a
SP
228 }
229
230 /* Wait for the keys to load (500us) */
97a04e0d 231 ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
ee5e5e7a
SP
232 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
233 10, 1, &val);
234 if (ret)
235 return ret;
236 else if (!(val & HDCP_KEY_LOAD_STATUS))
237 return -ENXIO;
238
239 /* Send Aksv over to PCH display for use in authentication */
667944ad 240 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
ee5e5e7a
SP
241
242 return 0;
243}
244
245/* Returns updated SHA-1 index */
246static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
247{
667944ad 248 intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
4cb3b44d 249 if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
65833c46 250 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
ee5e5e7a
SP
251 return -ETIMEDOUT;
252 }
253 return 0;
254}
255
256static
69205931
R
257u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
258 enum transcoder cpu_transcoder, enum port port)
ee5e5e7a 259{
69205931
R
260 if (INTEL_GEN(dev_priv) >= 12) {
261 switch (cpu_transcoder) {
262 case TRANSCODER_A:
263 return HDCP_TRANSA_REP_PRESENT |
264 HDCP_TRANSA_SHA1_M0;
265 case TRANSCODER_B:
266 return HDCP_TRANSB_REP_PRESENT |
267 HDCP_TRANSB_SHA1_M0;
268 case TRANSCODER_C:
269 return HDCP_TRANSC_REP_PRESENT |
270 HDCP_TRANSC_SHA1_M0;
271 case TRANSCODER_D:
272 return HDCP_TRANSD_REP_PRESENT |
273 HDCP_TRANSD_SHA1_M0;
274 default:
65833c46
WK
275 drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
276 cpu_transcoder);
69205931
R
277 return -EINVAL;
278 }
279 }
280
ee5e5e7a
SP
281 switch (port) {
282 case PORT_A:
283 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
284 case PORT_B:
285 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
286 case PORT_C:
287 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
288 case PORT_D:
289 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
290 case PORT_E:
291 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
292 default:
65833c46 293 drm_err(&dev_priv->drm, "Unknown port %d\n", port);
69205931 294 return -EINVAL;
ee5e5e7a 295 }
ee5e5e7a
SP
296}
297
ee5e5e7a 298static
69205931 299int intel_hdcp_validate_v_prime(struct intel_connector *connector,
41baafae
R
300 const struct intel_hdcp_shim *shim,
301 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
ee5e5e7a 302{
7801f3b7 303 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
486bba45 304 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
69205931 305 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
7801f3b7 306 enum port port = dig_port->base.port;
ee5e5e7a 307 u32 vprime, sha_text, sha_leftovers, rep_ctl;
ee5e5e7a
SP
308 int ret, i, j, sha_idx;
309
ee5e5e7a
SP
310 /* Process V' values from the receiver */
311 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
7801f3b7 312 ret = shim->read_v_prime_part(dig_port, i, &vprime);
ee5e5e7a
SP
313 if (ret)
314 return ret;
667944ad 315 intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
ee5e5e7a
SP
316 }
317
318 /*
319 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
320 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
321 * stream is written via the HDCP_SHA_TEXT register in 32-bit
322 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
323 * index will keep track of our progress through the 64 bytes as well as
324 * helping us work the 40-bit KSVs through our 32-bit register.
325 *
326 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
327 */
328 sha_idx = 0;
329 sha_text = 0;
330 sha_leftovers = 0;
69205931 331 rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
667944ad 332 intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
ee5e5e7a
SP
333 for (i = 0; i < num_downstream; i++) {
334 unsigned int sha_empty;
335 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
336
337 /* Fill up the empty slots in sha_text and write it out */
338 sha_empty = sizeof(sha_text) - sha_leftovers;
9ab57658
SP
339 for (j = 0; j < sha_empty; j++) {
340 u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
341 sha_text |= ksv[j] << off;
342 }
ee5e5e7a
SP
343
344 ret = intel_write_sha_text(dev_priv, sha_text);
345 if (ret < 0)
346 return ret;
347
348 /* Programming guide writes this every 64 bytes */
349 sha_idx += sizeof(sha_text);
350 if (!(sha_idx % 64))
667944ad
JN
351 intel_de_write(dev_priv, HDCP_REP_CTL,
352 rep_ctl | HDCP_SHA1_TEXT_32);
ee5e5e7a
SP
353
354 /* Store the leftover bytes from the ksv in sha_text */
355 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
356 sha_text = 0;
357 for (j = 0; j < sha_leftovers; j++)
358 sha_text |= ksv[sha_empty + j] <<
359 ((sizeof(sha_text) - j - 1) * 8);
360
361 /*
362 * If we still have room in sha_text for more data, continue.
363 * Otherwise, write it out immediately.
364 */
365 if (sizeof(sha_text) > sha_leftovers)
366 continue;
367
368 ret = intel_write_sha_text(dev_priv, sha_text);
369 if (ret < 0)
370 return ret;
371 sha_leftovers = 0;
372 sha_text = 0;
373 sha_idx += sizeof(sha_text);
374 }
375
376 /*
377 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
378 * bytes are leftover from the last ksv, we might be able to fit them
379 * all in sha_text (first 2 cases), or we might need to split them up
380 * into 2 writes (last 2 cases).
381 */
382 if (sha_leftovers == 0) {
383 /* Write 16 bits of text, 16 bits of M0 */
667944ad
JN
384 intel_de_write(dev_priv, HDCP_REP_CTL,
385 rep_ctl | HDCP_SHA1_TEXT_16);
ee5e5e7a
SP
386 ret = intel_write_sha_text(dev_priv,
387 bstatus[0] << 8 | bstatus[1]);
388 if (ret < 0)
389 return ret;
390 sha_idx += sizeof(sha_text);
391
392 /* Write 32 bits of M0 */
667944ad
JN
393 intel_de_write(dev_priv, HDCP_REP_CTL,
394 rep_ctl | HDCP_SHA1_TEXT_0);
ee5e5e7a
SP
395 ret = intel_write_sha_text(dev_priv, 0);
396 if (ret < 0)
397 return ret;
398 sha_idx += sizeof(sha_text);
399
400 /* Write 16 bits of M0 */
667944ad
JN
401 intel_de_write(dev_priv, HDCP_REP_CTL,
402 rep_ctl | HDCP_SHA1_TEXT_16);
ee5e5e7a
SP
403 ret = intel_write_sha_text(dev_priv, 0);
404 if (ret < 0)
405 return ret;
406 sha_idx += sizeof(sha_text);
407
408 } else if (sha_leftovers == 1) {
409 /* Write 24 bits of text, 8 bits of M0 */
667944ad
JN
410 intel_de_write(dev_priv, HDCP_REP_CTL,
411 rep_ctl | HDCP_SHA1_TEXT_24);
ee5e5e7a
SP
412 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
413 /* Only 24-bits of data, must be in the LSB */
414 sha_text = (sha_text & 0xffffff00) >> 8;
415 ret = intel_write_sha_text(dev_priv, sha_text);
416 if (ret < 0)
417 return ret;
418 sha_idx += sizeof(sha_text);
419
420 /* Write 32 bits of M0 */
667944ad
JN
421 intel_de_write(dev_priv, HDCP_REP_CTL,
422 rep_ctl | HDCP_SHA1_TEXT_0);
ee5e5e7a
SP
423 ret = intel_write_sha_text(dev_priv, 0);
424 if (ret < 0)
425 return ret;
426 sha_idx += sizeof(sha_text);
427
428 /* Write 24 bits of M0 */
667944ad
JN
429 intel_de_write(dev_priv, HDCP_REP_CTL,
430 rep_ctl | HDCP_SHA1_TEXT_8);
ee5e5e7a
SP
431 ret = intel_write_sha_text(dev_priv, 0);
432 if (ret < 0)
433 return ret;
434 sha_idx += sizeof(sha_text);
435
436 } else if (sha_leftovers == 2) {
437 /* Write 32 bits of text */
667944ad
JN
438 intel_de_write(dev_priv, HDCP_REP_CTL,
439 rep_ctl | HDCP_SHA1_TEXT_32);
9ab57658 440 sha_text |= bstatus[0] << 8 | bstatus[1];
ee5e5e7a
SP
441 ret = intel_write_sha_text(dev_priv, sha_text);
442 if (ret < 0)
443 return ret;
444 sha_idx += sizeof(sha_text);
445
446 /* Write 64 bits of M0 */
667944ad
JN
447 intel_de_write(dev_priv, HDCP_REP_CTL,
448 rep_ctl | HDCP_SHA1_TEXT_0);
ee5e5e7a
SP
449 for (i = 0; i < 2; i++) {
450 ret = intel_write_sha_text(dev_priv, 0);
451 if (ret < 0)
452 return ret;
453 sha_idx += sizeof(sha_text);
454 }
9ab57658
SP
455
456 /*
457 * Terminate the SHA-1 stream by hand. For the other leftover
458 * cases this is appended by the hardware.
459 */
460 intel_de_write(dev_priv, HDCP_REP_CTL,
461 rep_ctl | HDCP_SHA1_TEXT_32);
462 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
463 ret = intel_write_sha_text(dev_priv, sha_text);
464 if (ret < 0)
465 return ret;
466 sha_idx += sizeof(sha_text);
ee5e5e7a 467 } else if (sha_leftovers == 3) {
9ab57658 468 /* Write 32 bits of text (filled from LSB) */
667944ad
JN
469 intel_de_write(dev_priv, HDCP_REP_CTL,
470 rep_ctl | HDCP_SHA1_TEXT_32);
9ab57658 471 sha_text |= bstatus[0];
ee5e5e7a
SP
472 ret = intel_write_sha_text(dev_priv, sha_text);
473 if (ret < 0)
474 return ret;
475 sha_idx += sizeof(sha_text);
476
9ab57658 477 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
667944ad
JN
478 intel_de_write(dev_priv, HDCP_REP_CTL,
479 rep_ctl | HDCP_SHA1_TEXT_8);
ee5e5e7a
SP
480 ret = intel_write_sha_text(dev_priv, bstatus[1]);
481 if (ret < 0)
482 return ret;
483 sha_idx += sizeof(sha_text);
484
485 /* Write 32 bits of M0 */
667944ad
JN
486 intel_de_write(dev_priv, HDCP_REP_CTL,
487 rep_ctl | HDCP_SHA1_TEXT_0);
ee5e5e7a
SP
488 ret = intel_write_sha_text(dev_priv, 0);
489 if (ret < 0)
490 return ret;
491 sha_idx += sizeof(sha_text);
492
493 /* Write 8 bits of M0 */
667944ad
JN
494 intel_de_write(dev_priv, HDCP_REP_CTL,
495 rep_ctl | HDCP_SHA1_TEXT_24);
ee5e5e7a
SP
496 ret = intel_write_sha_text(dev_priv, 0);
497 if (ret < 0)
498 return ret;
499 sha_idx += sizeof(sha_text);
500 } else {
51279100
R
501 drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
502 sha_leftovers);
ee5e5e7a
SP
503 return -EINVAL;
504 }
505
667944ad 506 intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
ee5e5e7a
SP
507 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
508 while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
509 ret = intel_write_sha_text(dev_priv, 0);
510 if (ret < 0)
511 return ret;
512 sha_idx += sizeof(sha_text);
513 }
514
515 /*
516 * Last write gets the length of the concatenation in bits. That is:
517 * - 5 bytes per device
518 * - 10 bytes for BINFO/BSTATUS(2), M0(8)
519 */
520 sha_text = (num_downstream * 5 + 10) * 8;
521 ret = intel_write_sha_text(dev_priv, sha_text);
522 if (ret < 0)
523 return ret;
524
525 /* Tell the HW we're done with the hash and wait for it to ACK */
667944ad
JN
526 intel_de_write(dev_priv, HDCP_REP_CTL,
527 rep_ctl | HDCP_SHA1_COMPLETE_HASH);
4cb3b44d
DCS
528 if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
529 HDCP_SHA1_COMPLETE, 1)) {
51279100 530 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
ee5e5e7a
SP
531 return -ETIMEDOUT;
532 }
667944ad 533 if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
51279100 534 drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
ee5e5e7a
SP
535 return -ENXIO;
536 }
537
41baafae
R
538 return 0;
539}
540
541/* Implements Part 2 of the HDCP authorization procedure */
542static
f26ae6a6 543int intel_hdcp_auth_downstream(struct intel_connector *connector)
41baafae 544{
7801f3b7 545 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
486bba45 546 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
f26ae6a6 547 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
41baafae
R
548 u8 bstatus[2], num_downstream, *ksv_fifo;
549 int ret, i, tries = 3;
550
7801f3b7 551 ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
41baafae 552 if (ret) {
51279100
R
553 drm_dbg_kms(&dev_priv->drm,
554 "KSV list failed to become ready (%d)\n", ret);
41baafae
R
555 return ret;
556 }
557
7801f3b7 558 ret = shim->read_bstatus(dig_port, bstatus);
41baafae
R
559 if (ret)
560 return ret;
561
562 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
563 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
51279100 564 drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
41baafae
R
565 return -EPERM;
566 }
567
568 /*
569 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
570 * the HDCP encryption. That implies that repeater can't have its own
571 * display. As there is no consumption of encrypted content in the
572 * repeater with 0 downstream devices, we are failing the
573 * authentication.
574 */
575 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
4fb76782 576 if (num_downstream == 0) {
51279100
R
577 drm_dbg_kms(&dev_priv->drm,
578 "Repeater with zero downstream devices\n");
41baafae 579 return -EINVAL;
4fb76782 580 }
41baafae 581
6396bb22 582 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
4fb76782 583 if (!ksv_fifo) {
51279100 584 drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
41baafae 585 return -ENOMEM;
4fb76782 586 }
41baafae 587
7801f3b7 588 ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
41baafae 589 if (ret)
46a67c4d 590 goto err;
41baafae 591
486bba45 592 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
018532e9 593 num_downstream) > 0) {
51279100 594 drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
de70fdd7
WY
595 ret = -EPERM;
596 goto err;
f26ae6a6
R
597 }
598
41baafae
R
599 /*
600 * When V prime mismatches, DP Spec mandates re-read of
601 * V prime atleast twice.
602 */
603 for (i = 0; i < tries; i++) {
69205931 604 ret = intel_hdcp_validate_v_prime(connector, shim,
41baafae
R
605 ksv_fifo, num_downstream,
606 bstatus);
607 if (!ret)
608 break;
609 }
610
611 if (i == tries) {
51279100
R
612 drm_dbg_kms(&dev_priv->drm,
613 "V Prime validation failed.(%d)\n", ret);
46a67c4d 614 goto err;
41baafae
R
615 }
616
51279100
R
617 drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
618 num_downstream);
46a67c4d
RS
619 ret = 0;
620err:
621 kfree(ksv_fifo);
622 return ret;
ee5e5e7a
SP
623}
624
625/* Implements Part 1 of the HDCP authorization procedure */
f26ae6a6 626static int intel_hdcp_auth(struct intel_connector *connector)
ee5e5e7a 627{
7801f3b7 628 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
486bba45 629 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
f26ae6a6 630 struct intel_hdcp *hdcp = &connector->hdcp;
f26ae6a6 631 const struct intel_hdcp_shim *shim = hdcp->shim;
69205931 632 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
7801f3b7 633 enum port port = dig_port->base.port;
ee5e5e7a 634 unsigned long r0_prime_gen_start;
f622a71d 635 int ret, i, tries = 2;
ee5e5e7a
SP
636 union {
637 u32 reg[2];
638 u8 shim[DRM_HDCP_AN_LEN];
639 } an;
640 union {
641 u32 reg[2];
642 u8 shim[DRM_HDCP_KSV_LEN];
643 } bksv;
644 union {
645 u32 reg;
646 u8 shim[DRM_HDCP_RI_LEN];
647 } ri;
791a98dd 648 bool repeater_present, hdcp_capable;
ee5e5e7a 649
791a98dd
R
650 /*
651 * Detects whether the display is HDCP capable. Although we check for
652 * valid Bksv below, the HDCP over DP spec requires that we check
653 * whether the display supports HDCP before we write An. For HDMI
654 * displays, this is not necessary.
655 */
656 if (shim->hdcp_capable) {
7801f3b7 657 ret = shim->hdcp_capable(dig_port, &hdcp_capable);
791a98dd
R
658 if (ret)
659 return ret;
660 if (!hdcp_capable) {
51279100
R
661 drm_dbg_kms(&dev_priv->drm,
662 "Panel is not HDCP capable\n");
791a98dd
R
663 return -EINVAL;
664 }
665 }
666
ee5e5e7a
SP
667 /* Initialize An with 2 random values and acquire it */
668 for (i = 0; i < 2; i++)
667944ad
JN
669 intel_de_write(dev_priv,
670 HDCP_ANINIT(dev_priv, cpu_transcoder, port),
671 get_random_u32());
672 intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
673 HDCP_CONF_CAPTURE_AN);
ee5e5e7a
SP
674
675 /* Wait for An to be acquired */
69205931
R
676 if (intel_de_wait_for_set(dev_priv,
677 HDCP_STATUS(dev_priv, cpu_transcoder, port),
4cb3b44d 678 HDCP_STATUS_AN_READY, 1)) {
51279100 679 drm_err(&dev_priv->drm, "Timed out waiting for An\n");
ee5e5e7a
SP
680 return -ETIMEDOUT;
681 }
682
667944ad
JN
683 an.reg[0] = intel_de_read(dev_priv,
684 HDCP_ANLO(dev_priv, cpu_transcoder, port));
685 an.reg[1] = intel_de_read(dev_priv,
686 HDCP_ANHI(dev_priv, cpu_transcoder, port));
7801f3b7 687 ret = shim->write_an_aksv(dig_port, an.shim);
ee5e5e7a
SP
688 if (ret)
689 return ret;
690
691 r0_prime_gen_start = jiffies;
692
693 memset(&bksv, 0, sizeof(bksv));
f622a71d 694
7801f3b7 695 ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
f106d100
R
696 if (ret < 0)
697 return ret;
ee5e5e7a 698
018532e9 699 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
51279100 700 drm_err(&dev_priv->drm, "BKSV is revoked\n");
f26ae6a6
R
701 return -EPERM;
702 }
703
667944ad
JN
704 intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
705 bksv.reg[0]);
706 intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
707 bksv.reg[1]);
ee5e5e7a 708
7801f3b7 709 ret = shim->repeater_present(dig_port, &repeater_present);
ee5e5e7a
SP
710 if (ret)
711 return ret;
712 if (repeater_present)
667944ad
JN
713 intel_de_write(dev_priv, HDCP_REP_CTL,
714 intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
ee5e5e7a 715
7801f3b7 716 ret = shim->toggle_signalling(dig_port, true);
ee5e5e7a
SP
717 if (ret)
718 return ret;
719
667944ad
JN
720 intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
721 HDCP_CONF_AUTH_AND_ENC);
ee5e5e7a
SP
722
723 /* Wait for R0 ready */
667944ad 724 if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
ee5e5e7a 725 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
51279100 726 drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
ee5e5e7a
SP
727 return -ETIMEDOUT;
728 }
729
730 /*
731 * Wait for R0' to become available. The spec says 100ms from Aksv, but
732 * some monitors can take longer than this. We'll set the timeout at
733 * 300ms just to be sure.
734 *
735 * On DP, there's an R0_READY bit available but no such bit
736 * exists on HDMI. Since the upper-bound is the same, we'll just do
737 * the stupid thing instead of polling on one and not the other.
738 */
739 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
740
4bfbec68 741 tries = 3;
ee5e5e7a 742
4bfbec68
R
743 /*
744 * DP HDCP Spec mandates the two more reattempt to read R0, incase
745 * of R0 mismatch.
746 */
747 for (i = 0; i < tries; i++) {
748 ri.reg = 0;
7801f3b7 749 ret = shim->read_ri_prime(dig_port, ri.shim);
4bfbec68
R
750 if (ret)
751 return ret;
667944ad
JN
752 intel_de_write(dev_priv,
753 HDCP_RPRIME(dev_priv, cpu_transcoder, port),
754 ri.reg);
4bfbec68
R
755
756 /* Wait for Ri prime match */
667944ad
JN
757 if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
758 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
4bfbec68
R
759 break;
760 }
761
762 if (i == tries) {
51279100
R
763 drm_dbg_kms(&dev_priv->drm,
764 "Timed out waiting for Ri prime match (%x)\n",
765 intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
766 cpu_transcoder, port)));
ee5e5e7a
SP
767 return -ETIMEDOUT;
768 }
769
770 /* Wait for encryption confirmation */
69205931
R
771 if (intel_de_wait_for_set(dev_priv,
772 HDCP_STATUS(dev_priv, cpu_transcoder, port),
4cb3b44d
DCS
773 HDCP_STATUS_ENC,
774 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
51279100 775 drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
ee5e5e7a
SP
776 return -ETIMEDOUT;
777 }
778
779 /*
780 * XXX: If we have MST-connected devices, we need to enable encryption
781 * on those as well.
782 */
783
87eb3ec8 784 if (repeater_present)
f26ae6a6 785 return intel_hdcp_auth_downstream(connector);
87eb3ec8 786
51279100 787 drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
87eb3ec8 788 return 0;
ee5e5e7a
SP
789}
790
ee5e5e7a
SP
791static int _intel_hdcp_disable(struct intel_connector *connector)
792{
7801f3b7 793 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
486bba45
VS
794 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
795 struct intel_hdcp *hdcp = &connector->hdcp;
7801f3b7 796 enum port port = dig_port->base.port;
69205931 797 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
57537b4e 798 u32 repeater_ctl;
ee5e5e7a
SP
799 int ret;
800
65833c46
WK
801 drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
802 connector->base.name, connector->base.base.id);
cb340bf3 803
09d56393 804 hdcp->hdcp_encrypted = false;
667944ad 805 intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
69205931
R
806 if (intel_de_wait_for_clear(dev_priv,
807 HDCP_STATUS(dev_priv, cpu_transcoder, port),
808 ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
65833c46
WK
809 drm_err(&dev_priv->drm,
810 "Failed to disable HDCP, timeout clearing status\n");
ee5e5e7a
SP
811 return -ETIMEDOUT;
812 }
813
57537b4e
SP
814 repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
815 port);
816 intel_de_write(dev_priv, HDCP_REP_CTL,
817 intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
818
7801f3b7 819 ret = hdcp->shim->toggle_signalling(dig_port, false);
ee5e5e7a 820 if (ret) {
65833c46 821 drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
ee5e5e7a
SP
822 return ret;
823 }
824
65833c46 825 drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
ee5e5e7a
SP
826 return 0;
827}
828
829static int _intel_hdcp_enable(struct intel_connector *connector)
830{
486bba45 831 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
d3dacc70 832 struct intel_hdcp *hdcp = &connector->hdcp;
6d983946 833 int i, ret, tries = 3;
ee5e5e7a 834
65833c46
WK
835 drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
836 connector->base.name, connector->base.base.id);
cb340bf3 837
6308a315 838 if (!hdcp_key_loadable(dev_priv)) {
65833c46 839 drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
ee5e5e7a
SP
840 return -ENXIO;
841 }
842
843 for (i = 0; i < KEY_LOAD_TRIES; i++) {
844 ret = intel_hdcp_load_keys(dev_priv);
845 if (!ret)
846 break;
847 intel_hdcp_clear_keys(dev_priv);
848 }
849 if (ret) {
65833c46
WK
850 drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
851 ret);
ee5e5e7a
SP
852 return ret;
853 }
854
6d983946
R
855 /* Incase of authentication failures, HDCP spec expects reauth. */
856 for (i = 0; i < tries; i++) {
f26ae6a6 857 ret = intel_hdcp_auth(connector);
09d56393
R
858 if (!ret) {
859 hdcp->hdcp_encrypted = true;
6d983946 860 return 0;
09d56393 861 }
6d983946 862
65833c46 863 drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
a0124496
R
864
865 /* Ensuring HDCP encryption and signalling are stopped. */
866 _intel_hdcp_disable(connector);
ee5e5e7a
SP
867 }
868
65833c46
WK
869 drm_dbg_kms(&dev_priv->drm,
870 "HDCP authentication failed (%d tries/%d)\n", tries, ret);
6d983946 871 return ret;
ee5e5e7a
SP
872}
873
81b55ef1 874static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
d3dacc70
R
875{
876 return container_of(hdcp, struct intel_connector, hdcp);
877}
878
4c719c25 879/* Implements Part 3 of the HDCP authorization procedure */
c5568ed2 880static int intel_hdcp_check_link(struct intel_connector *connector)
4c719c25 881{
7801f3b7 882 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
486bba45
VS
883 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
884 struct intel_hdcp *hdcp = &connector->hdcp;
7801f3b7 885 enum port port = dig_port->base.port;
69205931 886 enum transcoder cpu_transcoder;
4c719c25
R
887 int ret = 0;
888
4c719c25 889 mutex_lock(&hdcp->mutex);
69205931 890 cpu_transcoder = hdcp->cpu_transcoder;
4c719c25 891
09d56393
R
892 /* Check_link valid only when HDCP1.4 is enabled */
893 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
894 !hdcp->hdcp_encrypted) {
895 ret = -EINVAL;
4c719c25 896 goto out;
09d56393 897 }
4c719c25 898
bb393dc5
PB
899 if (drm_WARN_ON(&dev_priv->drm,
900 !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
65833c46
WK
901 drm_err(&dev_priv->drm,
902 "%s:%d HDCP link stopped encryption,%x\n",
903 connector->base.name, connector->base.base.id,
667944ad 904 intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
4c719c25
R
905 ret = -ENXIO;
906 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
907 schedule_work(&hdcp->prop_work);
908 goto out;
909 }
910
7801f3b7 911 if (hdcp->shim->check_link(dig_port)) {
4c719c25
R
912 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
913 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
914 schedule_work(&hdcp->prop_work);
915 }
916 goto out;
917 }
918
65833c46
WK
919 drm_dbg_kms(&dev_priv->drm,
920 "[%s:%d] HDCP link failed, retrying authentication\n",
921 connector->base.name, connector->base.base.id);
4c719c25
R
922
923 ret = _intel_hdcp_disable(connector);
924 if (ret) {
65833c46 925 drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
4c719c25
R
926 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
927 schedule_work(&hdcp->prop_work);
928 goto out;
929 }
930
931 ret = _intel_hdcp_enable(connector);
932 if (ret) {
65833c46 933 drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
4c719c25
R
934 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
935 schedule_work(&hdcp->prop_work);
936 goto out;
937 }
938
939out:
940 mutex_unlock(&hdcp->mutex);
941 return ret;
942}
943
ee5e5e7a
SP
944static void intel_hdcp_prop_work(struct work_struct *work)
945{
d3dacc70
R
946 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
947 prop_work);
948 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
486bba45 949 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
ee5e5e7a 950
486bba45 951 drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
d3dacc70 952 mutex_lock(&hdcp->mutex);
ee5e5e7a
SP
953
954 /*
955 * This worker is only used to flip between ENABLED/DESIRED. Either of
d3dacc70 956 * those to UNDESIRED is handled by core. If value == UNDESIRED,
ee5e5e7a
SP
957 * we're running just after hdcp has been disabled, so just exit
958 */
a41e71f4
R
959 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
960 drm_hdcp_update_content_protection(&connector->base,
961 hdcp->value);
ee5e5e7a 962
d3dacc70 963 mutex_unlock(&hdcp->mutex);
486bba45 964 drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
ee5e5e7a
SP
965}
966
fdddd08c
R
967bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
968{
3d9c13a6
AG
969 return INTEL_INFO(dev_priv)->display.has_hdcp &&
970 (INTEL_GEN(dev_priv) >= 12 || port < PORT_E);
fdddd08c
R
971}
972
bd90d7c7 973static int
9055aac7
R
974hdcp2_prepare_ake_init(struct intel_connector *connector,
975 struct hdcp2_ake_init *ake_data)
976{
977 struct hdcp_port_data *data = &connector->hdcp.port_data;
978 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
979 struct i915_hdcp_comp_master *comp;
980 int ret;
981
982 mutex_lock(&dev_priv->hdcp_comp_mutex);
983 comp = dev_priv->hdcp_master;
984
985 if (!comp || !comp->ops) {
986 mutex_unlock(&dev_priv->hdcp_comp_mutex);
987 return -EINVAL;
988 }
989
990 ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
991 if (ret)
65833c46
WK
992 drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
993 ret);
9055aac7
R
994 mutex_unlock(&dev_priv->hdcp_comp_mutex);
995
996 return ret;
997}
998
bd90d7c7 999static int
9055aac7
R
1000hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1001 struct hdcp2_ake_send_cert *rx_cert,
1002 bool *paired,
1003 struct hdcp2_ake_no_stored_km *ek_pub_km,
1004 size_t *msg_sz)
1005{
1006 struct hdcp_port_data *data = &connector->hdcp.port_data;
1007 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1008 struct i915_hdcp_comp_master *comp;
1009 int ret;
1010
1011 mutex_lock(&dev_priv->hdcp_comp_mutex);
1012 comp = dev_priv->hdcp_master;
1013
1014 if (!comp || !comp->ops) {
1015 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1016 return -EINVAL;
1017 }
1018
1019 ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
1020 rx_cert, paired,
1021 ek_pub_km, msg_sz);
1022 if (ret < 0)
65833c46
WK
1023 drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1024 ret);
9055aac7
R
1025 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1026
1027 return ret;
1028}
1029
bd90d7c7
R
1030static int hdcp2_verify_hprime(struct intel_connector *connector,
1031 struct hdcp2_ake_send_hprime *rx_hprime)
9055aac7
R
1032{
1033 struct hdcp_port_data *data = &connector->hdcp.port_data;
1034 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1035 struct i915_hdcp_comp_master *comp;
1036 int ret;
1037
1038 mutex_lock(&dev_priv->hdcp_comp_mutex);
1039 comp = dev_priv->hdcp_master;
1040
1041 if (!comp || !comp->ops) {
1042 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1043 return -EINVAL;
1044 }
1045
1046 ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1047 if (ret < 0)
65833c46 1048 drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
9055aac7
R
1049 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1050
1051 return ret;
1052}
1053
bd90d7c7 1054static int
9055aac7
R
1055hdcp2_store_pairing_info(struct intel_connector *connector,
1056 struct hdcp2_ake_send_pairing_info *pairing_info)
1057{
1058 struct hdcp_port_data *data = &connector->hdcp.port_data;
1059 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1060 struct i915_hdcp_comp_master *comp;
1061 int ret;
1062
1063 mutex_lock(&dev_priv->hdcp_comp_mutex);
1064 comp = dev_priv->hdcp_master;
1065
1066 if (!comp || !comp->ops) {
1067 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1068 return -EINVAL;
1069 }
1070
1071 ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1072 if (ret < 0)
65833c46
WK
1073 drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1074 ret);
9055aac7
R
1075 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1076
1077 return ret;
1078}
1079
bd90d7c7 1080static int
9055aac7
R
1081hdcp2_prepare_lc_init(struct intel_connector *connector,
1082 struct hdcp2_lc_init *lc_init)
1083{
1084 struct hdcp_port_data *data = &connector->hdcp.port_data;
1085 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1086 struct i915_hdcp_comp_master *comp;
1087 int ret;
1088
1089 mutex_lock(&dev_priv->hdcp_comp_mutex);
1090 comp = dev_priv->hdcp_master;
1091
1092 if (!comp || !comp->ops) {
1093 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1094 return -EINVAL;
1095 }
1096
1097 ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1098 if (ret < 0)
65833c46
WK
1099 drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1100 ret);
9055aac7
R
1101 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1102
1103 return ret;
1104}
1105
bd90d7c7 1106static int
9055aac7
R
1107hdcp2_verify_lprime(struct intel_connector *connector,
1108 struct hdcp2_lc_send_lprime *rx_lprime)
1109{
1110 struct hdcp_port_data *data = &connector->hdcp.port_data;
1111 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1112 struct i915_hdcp_comp_master *comp;
1113 int ret;
1114
1115 mutex_lock(&dev_priv->hdcp_comp_mutex);
1116 comp = dev_priv->hdcp_master;
1117
1118 if (!comp || !comp->ops) {
1119 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1120 return -EINVAL;
1121 }
1122
1123 ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1124 if (ret < 0)
65833c46
WK
1125 drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1126 ret);
9055aac7
R
1127 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1128
1129 return ret;
1130}
1131
bd90d7c7
R
1132static int hdcp2_prepare_skey(struct intel_connector *connector,
1133 struct hdcp2_ske_send_eks *ske_data)
9055aac7
R
1134{
1135 struct hdcp_port_data *data = &connector->hdcp.port_data;
1136 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1137 struct i915_hdcp_comp_master *comp;
1138 int ret;
1139
1140 mutex_lock(&dev_priv->hdcp_comp_mutex);
1141 comp = dev_priv->hdcp_master;
1142
1143 if (!comp || !comp->ops) {
1144 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1145 return -EINVAL;
1146 }
1147
1148 ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1149 if (ret < 0)
65833c46
WK
1150 drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1151 ret);
9055aac7
R
1152 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1153
1154 return ret;
1155}
1156
d849178e 1157static int
9055aac7
R
1158hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1159 struct hdcp2_rep_send_receiverid_list
1160 *rep_topology,
1161 struct hdcp2_rep_send_ack *rep_send_ack)
1162{
1163 struct hdcp_port_data *data = &connector->hdcp.port_data;
1164 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1165 struct i915_hdcp_comp_master *comp;
1166 int ret;
1167
1168 mutex_lock(&dev_priv->hdcp_comp_mutex);
1169 comp = dev_priv->hdcp_master;
1170
1171 if (!comp || !comp->ops) {
1172 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1173 return -EINVAL;
1174 }
1175
1176 ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1177 rep_topology,
1178 rep_send_ack);
1179 if (ret < 0)
65833c46
WK
1180 drm_dbg_kms(&dev_priv->drm,
1181 "Verify rep topology failed. %d\n", ret);
9055aac7
R
1182 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1183
1184 return ret;
1185}
1186
d849178e 1187static int
9055aac7
R
1188hdcp2_verify_mprime(struct intel_connector *connector,
1189 struct hdcp2_rep_stream_ready *stream_ready)
1190{
1191 struct hdcp_port_data *data = &connector->hdcp.port_data;
1192 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1193 struct i915_hdcp_comp_master *comp;
1194 int ret;
1195
1196 mutex_lock(&dev_priv->hdcp_comp_mutex);
1197 comp = dev_priv->hdcp_master;
1198
1199 if (!comp || !comp->ops) {
1200 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1201 return -EINVAL;
1202 }
1203
1204 ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1205 if (ret < 0)
65833c46 1206 drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
9055aac7
R
1207 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1208
1209 return ret;
1210}
1211
bd90d7c7 1212static int hdcp2_authenticate_port(struct intel_connector *connector)
9055aac7
R
1213{
1214 struct hdcp_port_data *data = &connector->hdcp.port_data;
1215 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1216 struct i915_hdcp_comp_master *comp;
1217 int ret;
1218
1219 mutex_lock(&dev_priv->hdcp_comp_mutex);
1220 comp = dev_priv->hdcp_master;
1221
1222 if (!comp || !comp->ops) {
1223 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1224 return -EINVAL;
1225 }
1226
1227 ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1228 if (ret < 0)
65833c46
WK
1229 drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1230 ret);
9055aac7
R
1231 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1232
1233 return ret;
1234}
1235
49a630b0 1236static int hdcp2_close_mei_session(struct intel_connector *connector)
9055aac7
R
1237{
1238 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1239 struct i915_hdcp_comp_master *comp;
1240 int ret;
1241
1242 mutex_lock(&dev_priv->hdcp_comp_mutex);
1243 comp = dev_priv->hdcp_master;
1244
1245 if (!comp || !comp->ops) {
1246 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1247 return -EINVAL;
1248 }
1249
1250 ret = comp->ops->close_hdcp_session(comp->mei_dev,
1251 &connector->hdcp.port_data);
1252 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1253
1254 return ret;
1255}
1256
49a630b0 1257static int hdcp2_deauthenticate_port(struct intel_connector *connector)
9055aac7
R
1258{
1259 return hdcp2_close_mei_session(connector);
1260}
1261
bd90d7c7
R
1262/* Authentication flow starts from here */
1263static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1264{
7801f3b7 1265 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
486bba45 1266 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
bd90d7c7
R
1267 struct intel_hdcp *hdcp = &connector->hdcp;
1268 union {
1269 struct hdcp2_ake_init ake_init;
1270 struct hdcp2_ake_send_cert send_cert;
1271 struct hdcp2_ake_no_stored_km no_stored_km;
1272 struct hdcp2_ake_send_hprime send_hprime;
1273 struct hdcp2_ake_send_pairing_info pairing_info;
1274 } msgs;
1275 const struct intel_hdcp_shim *shim = hdcp->shim;
1276 size_t size;
1277 int ret;
1278
1279 /* Init for seq_num */
1280 hdcp->seq_num_v = 0;
1281 hdcp->seq_num_m = 0;
1282
1283 ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1284 if (ret < 0)
1285 return ret;
1286
7801f3b7 1287 ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
bd90d7c7
R
1288 sizeof(msgs.ake_init));
1289 if (ret < 0)
1290 return ret;
1291
7801f3b7 1292 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
bd90d7c7
R
1293 &msgs.send_cert, sizeof(msgs.send_cert));
1294 if (ret < 0)
1295 return ret;
1296
4fb76782 1297 if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
51279100 1298 drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
bd90d7c7 1299 return -EINVAL;
4fb76782 1300 }
bd90d7c7
R
1301
1302 hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1303
486bba45
VS
1304 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1305 msgs.send_cert.cert_rx.receiver_id,
018532e9 1306 1) > 0) {
51279100 1307 drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
f26ae6a6
R
1308 return -EPERM;
1309 }
1310
bd90d7c7
R
1311 /*
1312 * Here msgs.no_stored_km will hold msgs corresponding to the km
1313 * stored also.
1314 */
1315 ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1316 &hdcp->is_paired,
1317 &msgs.no_stored_km, &size);
1318 if (ret < 0)
1319 return ret;
1320
7801f3b7 1321 ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
bd90d7c7
R
1322 if (ret < 0)
1323 return ret;
1324
7801f3b7 1325 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
bd90d7c7
R
1326 &msgs.send_hprime, sizeof(msgs.send_hprime));
1327 if (ret < 0)
1328 return ret;
1329
1330 ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1331 if (ret < 0)
1332 return ret;
1333
1334 if (!hdcp->is_paired) {
1335 /* Pairing is required */
7801f3b7 1336 ret = shim->read_2_2_msg(dig_port,
bd90d7c7
R
1337 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1338 &msgs.pairing_info,
1339 sizeof(msgs.pairing_info));
1340 if (ret < 0)
1341 return ret;
1342
1343 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1344 if (ret < 0)
1345 return ret;
1346 hdcp->is_paired = true;
1347 }
1348
1349 return 0;
1350}
1351
1352static int hdcp2_locality_check(struct intel_connector *connector)
1353{
7801f3b7 1354 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
bd90d7c7
R
1355 struct intel_hdcp *hdcp = &connector->hdcp;
1356 union {
1357 struct hdcp2_lc_init lc_init;
1358 struct hdcp2_lc_send_lprime send_lprime;
1359 } msgs;
1360 const struct intel_hdcp_shim *shim = hdcp->shim;
1361 int tries = HDCP2_LC_RETRY_CNT, ret, i;
1362
1363 for (i = 0; i < tries; i++) {
1364 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1365 if (ret < 0)
1366 continue;
1367
7801f3b7 1368 ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
bd90d7c7
R
1369 sizeof(msgs.lc_init));
1370 if (ret < 0)
1371 continue;
1372
7801f3b7 1373 ret = shim->read_2_2_msg(dig_port,
bd90d7c7
R
1374 HDCP_2_2_LC_SEND_LPRIME,
1375 &msgs.send_lprime,
1376 sizeof(msgs.send_lprime));
1377 if (ret < 0)
1378 continue;
1379
1380 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1381 if (!ret)
1382 break;
1383 }
1384
1385 return ret;
1386}
1387
1388static int hdcp2_session_key_exchange(struct intel_connector *connector)
1389{
7801f3b7 1390 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
bd90d7c7
R
1391 struct intel_hdcp *hdcp = &connector->hdcp;
1392 struct hdcp2_ske_send_eks send_eks;
1393 int ret;
1394
1395 ret = hdcp2_prepare_skey(connector, &send_eks);
1396 if (ret < 0)
1397 return ret;
1398
7801f3b7 1399 ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
bd90d7c7
R
1400 sizeof(send_eks));
1401 if (ret < 0)
1402 return ret;
1403
1404 return 0;
1405}
1406
d849178e
R
1407static
1408int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1409{
7801f3b7 1410 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
260fcfc0 1411 struct drm_i915_private *i915 = to_i915(connector->base.dev);
d849178e
R
1412 struct intel_hdcp *hdcp = &connector->hdcp;
1413 union {
1414 struct hdcp2_rep_stream_manage stream_manage;
1415 struct hdcp2_rep_stream_ready stream_ready;
1416 } msgs;
1417 const struct intel_hdcp_shim *shim = hdcp->shim;
1418 int ret;
1419
1420 /* Prepare RepeaterAuth_Stream_Manage msg */
1421 msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
0de655ca 1422 drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
d849178e
R
1423
1424 /* K no of streams is fixed as 1. Stored as big-endian. */
1425 msgs.stream_manage.k = cpu_to_be16(1);
1426
1427 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
1428 msgs.stream_manage.streams[0].stream_id = 0;
1429 msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
1430
1431 /* Send it to Repeater */
7801f3b7 1432 ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
d849178e
R
1433 sizeof(msgs.stream_manage));
1434 if (ret < 0)
1435 return ret;
1436
7801f3b7 1437 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
d849178e
R
1438 &msgs.stream_ready, sizeof(msgs.stream_ready));
1439 if (ret < 0)
1440 return ret;
1441
1442 hdcp->port_data.seq_num_m = hdcp->seq_num_m;
1443 hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1444
1445 ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1446 if (ret < 0)
1447 return ret;
1448
1449 hdcp->seq_num_m++;
1450
1451 if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
260fcfc0 1452 drm_dbg_kms(&i915->drm, "seq_num_m roll over.\n");
d849178e
R
1453 return -1;
1454 }
1455
1456 return 0;
1457}
1458
1459static
1460int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1461{
7801f3b7 1462 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
486bba45 1463 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
d849178e
R
1464 struct intel_hdcp *hdcp = &connector->hdcp;
1465 union {
1466 struct hdcp2_rep_send_receiverid_list recvid_list;
1467 struct hdcp2_rep_send_ack rep_ack;
1468 } msgs;
1469 const struct intel_hdcp_shim *shim = hdcp->shim;
f26ae6a6 1470 u32 seq_num_v, device_cnt;
d849178e 1471 u8 *rx_info;
d849178e
R
1472 int ret;
1473
7801f3b7 1474 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
d849178e
R
1475 &msgs.recvid_list, sizeof(msgs.recvid_list));
1476 if (ret < 0)
1477 return ret;
1478
1479 rx_info = msgs.recvid_list.rx_info;
1480
1481 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1482 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
51279100 1483 drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
d849178e
R
1484 return -EINVAL;
1485 }
1486
1487 /* Converting and Storing the seq_num_v to local variable as DWORD */
0de655ca
R
1488 seq_num_v =
1489 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
d849178e 1490
beb77129
AG
1491 if (!hdcp->hdcp2_encrypted && seq_num_v) {
1492 drm_dbg_kms(&dev_priv->drm,
1493 "Non zero Seq_num_v at first RecvId_List msg\n");
1494 return -EINVAL;
1495 }
1496
d849178e
R
1497 if (seq_num_v < hdcp->seq_num_v) {
1498 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
51279100 1499 drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
d849178e
R
1500 return -EINVAL;
1501 }
1502
af461ff3
CW
1503 device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1504 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
486bba45
VS
1505 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1506 msgs.recvid_list.receiver_ids,
018532e9 1507 device_cnt) > 0) {
51279100 1508 drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
f26ae6a6
R
1509 return -EPERM;
1510 }
1511
d849178e
R
1512 ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1513 &msgs.recvid_list,
1514 &msgs.rep_ack);
1515 if (ret < 0)
1516 return ret;
1517
1518 hdcp->seq_num_v = seq_num_v;
7801f3b7 1519 ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
d849178e
R
1520 sizeof(msgs.rep_ack));
1521 if (ret < 0)
1522 return ret;
1523
1524 return 0;
1525}
1526
1527static int hdcp2_authenticate_repeater(struct intel_connector *connector)
1528{
1529 int ret;
1530
1531 ret = hdcp2_authenticate_repeater_topology(connector);
1532 if (ret < 0)
1533 return ret;
1534
1535 return hdcp2_propagate_stream_management_info(connector);
1536}
1537
49a630b0
R
1538static int hdcp2_authenticate_sink(struct intel_connector *connector)
1539{
7801f3b7 1540 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
51279100 1541 struct drm_i915_private *i915 = to_i915(connector->base.dev);
bd90d7c7
R
1542 struct intel_hdcp *hdcp = &connector->hdcp;
1543 const struct intel_hdcp_shim *shim = hdcp->shim;
1544 int ret;
49a630b0 1545
bd90d7c7
R
1546 ret = hdcp2_authentication_key_exchange(connector);
1547 if (ret < 0) {
51279100 1548 drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
bd90d7c7
R
1549 return ret;
1550 }
1551
1552 ret = hdcp2_locality_check(connector);
1553 if (ret < 0) {
51279100
R
1554 drm_dbg_kms(&i915->drm,
1555 "Locality Check failed. Err : %d\n", ret);
bd90d7c7
R
1556 return ret;
1557 }
1558
1559 ret = hdcp2_session_key_exchange(connector);
1560 if (ret < 0) {
51279100 1561 drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
bd90d7c7
R
1562 return ret;
1563 }
1564
1565 if (shim->config_stream_type) {
7801f3b7 1566 ret = shim->config_stream_type(dig_port,
bd90d7c7
R
1567 hdcp->is_repeater,
1568 hdcp->content_type);
1569 if (ret < 0)
1570 return ret;
1571 }
1572
d849178e
R
1573 if (hdcp->is_repeater) {
1574 ret = hdcp2_authenticate_repeater(connector);
1575 if (ret < 0) {
51279100
R
1576 drm_dbg_kms(&i915->drm,
1577 "Repeater Auth Failed. Err: %d\n", ret);
d849178e
R
1578 return ret;
1579 }
1580 }
1581
bd90d7c7
R
1582 hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1583 ret = hdcp2_authenticate_port(connector);
1584 if (ret < 0)
1585 return ret;
1586
1587 return ret;
49a630b0
R
1588}
1589
1590static int hdcp2_enable_encryption(struct intel_connector *connector)
1591{
7801f3b7 1592 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
49a630b0
R
1593 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1594 struct intel_hdcp *hdcp = &connector->hdcp;
7801f3b7 1595 enum port port = dig_port->base.port;
69205931 1596 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
49a630b0
R
1597 int ret;
1598
bb393dc5
PB
1599 drm_WARN_ON(&dev_priv->drm,
1600 intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1601 LINK_ENCRYPTION_STATUS);
49a630b0 1602 if (hdcp->shim->toggle_signalling) {
7801f3b7 1603 ret = hdcp->shim->toggle_signalling(dig_port, true);
49a630b0 1604 if (ret) {
65833c46
WK
1605 drm_err(&dev_priv->drm,
1606 "Failed to enable HDCP signalling. %d\n",
1607 ret);
49a630b0
R
1608 return ret;
1609 }
1610 }
1611
667944ad 1612 if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
69205931 1613 LINK_AUTH_STATUS) {
49a630b0 1614 /* Link is Authenticated. Now set for Encryption */
667944ad
JN
1615 intel_de_write(dev_priv,
1616 HDCP2_CTL(dev_priv, cpu_transcoder, port),
1617 intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
49a630b0
R
1618 }
1619
69205931
R
1620 ret = intel_de_wait_for_set(dev_priv,
1621 HDCP2_STATUS(dev_priv, cpu_transcoder,
1622 port),
4cb3b44d
DCS
1623 LINK_ENCRYPTION_STATUS,
1624 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
49a630b0
R
1625
1626 return ret;
1627}
1628
1629static int hdcp2_disable_encryption(struct intel_connector *connector)
1630{
7801f3b7 1631 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
49a630b0
R
1632 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1633 struct intel_hdcp *hdcp = &connector->hdcp;
7801f3b7 1634 enum port port = dig_port->base.port;
69205931 1635 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
49a630b0
R
1636 int ret;
1637
bb393dc5
PB
1638 drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1639 LINK_ENCRYPTION_STATUS));
49a630b0 1640
667944ad
JN
1641 intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1642 intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
49a630b0 1643
69205931
R
1644 ret = intel_de_wait_for_clear(dev_priv,
1645 HDCP2_STATUS(dev_priv, cpu_transcoder,
1646 port),
4cb3b44d 1647 LINK_ENCRYPTION_STATUS,
49a630b0
R
1648 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1649 if (ret == -ETIMEDOUT)
65833c46 1650 drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
49a630b0
R
1651
1652 if (hdcp->shim->toggle_signalling) {
7801f3b7 1653 ret = hdcp->shim->toggle_signalling(dig_port, false);
49a630b0 1654 if (ret) {
65833c46
WK
1655 drm_err(&dev_priv->drm,
1656 "Failed to disable HDCP signalling. %d\n",
1657 ret);
49a630b0
R
1658 return ret;
1659 }
1660 }
1661
1662 return ret;
1663}
1664
1665static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1666{
51279100 1667 struct drm_i915_private *i915 = to_i915(connector->base.dev);
49a630b0
R
1668 int ret, i, tries = 3;
1669
1670 for (i = 0; i < tries; i++) {
1671 ret = hdcp2_authenticate_sink(connector);
1672 if (!ret)
1673 break;
1674
1675 /* Clearing the mei hdcp session */
51279100
R
1676 drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1677 i + 1, tries, ret);
49a630b0 1678 if (hdcp2_deauthenticate_port(connector) < 0)
51279100 1679 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
49a630b0
R
1680 }
1681
1682 if (i != tries) {
1683 /*
1684 * Ensuring the required 200mSec min time interval between
1685 * Session Key Exchange and encryption.
1686 */
1687 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1688 ret = hdcp2_enable_encryption(connector);
1689 if (ret < 0) {
51279100
R
1690 drm_dbg_kms(&i915->drm,
1691 "Encryption Enable Failed.(%d)\n", ret);
49a630b0 1692 if (hdcp2_deauthenticate_port(connector) < 0)
51279100 1693 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
49a630b0
R
1694 }
1695 }
1696
1697 return ret;
1698}
1699
1700static int _intel_hdcp2_enable(struct intel_connector *connector)
1701{
51279100 1702 struct drm_i915_private *i915 = to_i915(connector->base.dev);
49a630b0
R
1703 struct intel_hdcp *hdcp = &connector->hdcp;
1704 int ret;
1705
51279100
R
1706 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1707 connector->base.name, connector->base.base.id,
1708 hdcp->content_type);
49a630b0
R
1709
1710 ret = hdcp2_authenticate_and_encrypt(connector);
1711 if (ret) {
51279100
R
1712 drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
1713 hdcp->content_type, ret);
49a630b0
R
1714 return ret;
1715 }
1716
51279100
R
1717 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1718 connector->base.name, connector->base.base.id,
1719 hdcp->content_type);
49a630b0
R
1720
1721 hdcp->hdcp2_encrypted = true;
1722 return 0;
1723}
1724
1725static int _intel_hdcp2_disable(struct intel_connector *connector)
1726{
51279100 1727 struct drm_i915_private *i915 = to_i915(connector->base.dev);
49a630b0
R
1728 int ret;
1729
51279100
R
1730 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1731 connector->base.name, connector->base.base.id);
49a630b0
R
1732
1733 ret = hdcp2_disable_encryption(connector);
1734
1735 if (hdcp2_deauthenticate_port(connector) < 0)
51279100 1736 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
49a630b0
R
1737
1738 connector->hdcp.hdcp2_encrypted = false;
1739
1740 return ret;
1741}
1742
22ce2d94
R
1743/* Implements the Link Integrity Check for HDCP2.2 */
1744static int intel_hdcp2_check_link(struct intel_connector *connector)
1745{
7801f3b7 1746 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
22ce2d94
R
1747 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1748 struct intel_hdcp *hdcp = &connector->hdcp;
7801f3b7 1749 enum port port = dig_port->base.port;
69205931 1750 enum transcoder cpu_transcoder;
22ce2d94
R
1751 int ret = 0;
1752
1753 mutex_lock(&hdcp->mutex);
69205931 1754 cpu_transcoder = hdcp->cpu_transcoder;
22ce2d94
R
1755
1756 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
1757 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1758 !hdcp->hdcp2_encrypted) {
1759 ret = -EINVAL;
1760 goto out;
1761 }
1762
bb393dc5
PB
1763 if (drm_WARN_ON(&dev_priv->drm,
1764 !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
65833c46
WK
1765 drm_err(&dev_priv->drm,
1766 "HDCP2.2 link stopped the encryption, %x\n",
667944ad 1767 intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
22ce2d94
R
1768 ret = -ENXIO;
1769 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1770 schedule_work(&hdcp->prop_work);
1771 goto out;
1772 }
1773
7801f3b7 1774 ret = hdcp->shim->check_2_2_link(dig_port);
22ce2d94
R
1775 if (ret == HDCP_LINK_PROTECTED) {
1776 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1777 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1778 schedule_work(&hdcp->prop_work);
1779 }
1780 goto out;
1781 }
1782
dfe4cbc2
R
1783 if (ret == HDCP_TOPOLOGY_CHANGE) {
1784 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1785 goto out;
1786
65833c46
WK
1787 drm_dbg_kms(&dev_priv->drm,
1788 "HDCP2.2 Downstream topology change\n");
dfe4cbc2
R
1789 ret = hdcp2_authenticate_repeater_topology(connector);
1790 if (!ret) {
1791 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1792 schedule_work(&hdcp->prop_work);
1793 goto out;
1794 }
65833c46
WK
1795 drm_dbg_kms(&dev_priv->drm,
1796 "[%s:%d] Repeater topology auth failed.(%d)\n",
1797 connector->base.name, connector->base.base.id,
1798 ret);
dfe4cbc2 1799 } else {
65833c46
WK
1800 drm_dbg_kms(&dev_priv->drm,
1801 "[%s:%d] HDCP2.2 link failed, retrying auth\n",
1802 connector->base.name, connector->base.base.id);
dfe4cbc2 1803 }
22ce2d94
R
1804
1805 ret = _intel_hdcp2_disable(connector);
1806 if (ret) {
65833c46
WK
1807 drm_err(&dev_priv->drm,
1808 "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
1809 connector->base.name, connector->base.base.id, ret);
22ce2d94
R
1810 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1811 schedule_work(&hdcp->prop_work);
1812 goto out;
1813 }
1814
1815 ret = _intel_hdcp2_enable(connector);
1816 if (ret) {
65833c46
WK
1817 drm_dbg_kms(&dev_priv->drm,
1818 "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
1819 connector->base.name, connector->base.base.id,
1820 ret);
22ce2d94
R
1821 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1822 schedule_work(&hdcp->prop_work);
1823 goto out;
1824 }
1825
1826out:
1827 mutex_unlock(&hdcp->mutex);
1828 return ret;
1829}
1830
09d56393
R
1831static void intel_hdcp_check_work(struct work_struct *work)
1832{
1833 struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
1834 struct intel_hdcp,
1835 check_work);
1836 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1837
22ce2d94
R
1838 if (!intel_hdcp2_check_link(connector))
1839 schedule_delayed_work(&hdcp->check_work,
1840 DRM_HDCP2_CHECK_PERIOD_MS);
1841 else if (!intel_hdcp_check_link(connector))
09d56393
R
1842 schedule_delayed_work(&hdcp->check_work,
1843 DRM_HDCP_CHECK_PERIOD_MS);
1844}
1845
9055aac7
R
1846static int i915_hdcp_component_bind(struct device *i915_kdev,
1847 struct device *mei_kdev, void *data)
1848{
1849 struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1850
65833c46 1851 drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
9055aac7
R
1852 mutex_lock(&dev_priv->hdcp_comp_mutex);
1853 dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
1854 dev_priv->hdcp_master->mei_dev = mei_kdev;
1855 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1856
1857 return 0;
1858}
1859
1860static void i915_hdcp_component_unbind(struct device *i915_kdev,
1861 struct device *mei_kdev, void *data)
1862{
1863 struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1864
65833c46 1865 drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
9055aac7
R
1866 mutex_lock(&dev_priv->hdcp_comp_mutex);
1867 dev_priv->hdcp_master = NULL;
1868 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1869}
1870
1871static const struct component_ops i915_hdcp_component_ops = {
1872 .bind = i915_hdcp_component_bind,
1873 .unbind = i915_hdcp_component_unbind,
1874};
1875
81b55ef1 1876static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
0dcceb35
R
1877{
1878 switch (port) {
1879 case PORT_A:
1880 return MEI_DDI_A;
1881 case PORT_B ... PORT_F:
1882 return (enum mei_fw_ddi)port;
1883 default:
1884 return MEI_DDI_INVALID_PORT;
1885 }
1886}
1887
81b55ef1 1888static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
39e2df09
R
1889{
1890 switch (cpu_transcoder) {
1891 case TRANSCODER_A ... TRANSCODER_D:
1892 return (enum mei_fw_tc)(cpu_transcoder | 0x10);
1893 default: /* eDP, DSI TRANSCODERS are non HDCP capable */
1894 return MEI_INVALID_TRANSCODER;
1895 }
1896}
1897
81b55ef1
JN
1898static int initialize_hdcp_port_data(struct intel_connector *connector,
1899 const struct intel_hdcp_shim *shim)
9055aac7 1900{
39e2df09 1901 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
9055aac7
R
1902 struct intel_hdcp *hdcp = &connector->hdcp;
1903 struct hdcp_port_data *data = &hdcp->port_data;
1904
39e2df09
R
1905 if (INTEL_GEN(dev_priv) < 12)
1906 data->fw_ddi =
fa7edcd2 1907 intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port);
39e2df09
R
1908 else
1909 /*
1910 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
1911 * with zero(INVALID PORT index).
1912 */
1913 data->fw_ddi = MEI_DDI_INVALID_PORT;
1914
1915 /*
1916 * As associated transcoder is set and modified at modeset, here fw_tc
1917 * is initialized to zero (invalid transcoder index). This will be
1918 * retained for <Gen12 forever.
1919 */
1920 data->fw_tc = MEI_INVALID_TRANSCODER;
1921
9055aac7 1922 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
d456512c 1923 data->protocol = (u8)shim->protocol;
9055aac7
R
1924
1925 data->k = 1;
1926 if (!data->streams)
1927 data->streams = kcalloc(data->k,
1928 sizeof(struct hdcp2_streamid_type),
1929 GFP_KERNEL);
1930 if (!data->streams) {
65833c46 1931 drm_err(&dev_priv->drm, "Out of Memory\n");
9055aac7
R
1932 return -ENOMEM;
1933 }
1934
1935 data->streams[0].stream_id = 0;
1936 data->streams[0].stream_type = hdcp->content_type;
1937
1938 return 0;
1939}
1940
04707f97
R
1941static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
1942{
1943 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
1944 return false;
1945
5f4ae270
CW
1946 return (INTEL_GEN(dev_priv) >= 10 ||
1947 IS_GEMINILAKE(dev_priv) ||
1948 IS_KABYLAKE(dev_priv) ||
1949 IS_COFFEELAKE(dev_priv) ||
1950 IS_COMETLAKE(dev_priv));
04707f97
R
1951}
1952
9055aac7
R
1953void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
1954{
1955 int ret;
1956
1957 if (!is_hdcp2_supported(dev_priv))
1958 return;
1959
1960 mutex_lock(&dev_priv->hdcp_comp_mutex);
bb393dc5 1961 drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
9055aac7
R
1962
1963 dev_priv->hdcp_comp_added = true;
1964 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1965 ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
1966 I915_COMPONENT_HDCP);
1967 if (ret < 0) {
65833c46
WK
1968 drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
1969 ret);
9055aac7
R
1970 mutex_lock(&dev_priv->hdcp_comp_mutex);
1971 dev_priv->hdcp_comp_added = false;
1972 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1973 return;
1974 }
1975}
1976
d456512c
R
1977static void intel_hdcp2_init(struct intel_connector *connector,
1978 const struct intel_hdcp_shim *shim)
04707f97 1979{
51279100 1980 struct drm_i915_private *i915 = to_i915(connector->base.dev);
04707f97 1981 struct intel_hdcp *hdcp = &connector->hdcp;
9055aac7
R
1982 int ret;
1983
d456512c 1984 ret = initialize_hdcp_port_data(connector, shim);
9055aac7 1985 if (ret) {
51279100 1986 drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
9055aac7
R
1987 return;
1988 }
04707f97 1989
04707f97
R
1990 hdcp->hdcp2_supported = true;
1991}
1992
ee5e5e7a 1993int intel_hdcp_init(struct intel_connector *connector,
d3dacc70 1994 const struct intel_hdcp_shim *shim)
ee5e5e7a 1995{
04707f97 1996 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
d3dacc70 1997 struct intel_hdcp *hdcp = &connector->hdcp;
ee5e5e7a
SP
1998 int ret;
1999
04707f97
R
2000 if (!shim)
2001 return -EINVAL;
2002
d456512c
R
2003 if (is_hdcp2_supported(dev_priv))
2004 intel_hdcp2_init(connector, shim);
2005
7672dbba
R
2006 ret =
2007 drm_connector_attach_content_protection_property(&connector->base,
d456512c
R
2008 hdcp->hdcp2_supported);
2009 if (ret) {
2010 hdcp->hdcp2_supported = false;
2011 kfree(hdcp->port_data.streams);
ee5e5e7a 2012 return ret;
d456512c 2013 }
ee5e5e7a 2014
d3dacc70
R
2015 hdcp->shim = shim;
2016 mutex_init(&hdcp->mutex);
2017 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2018 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
cf9cb35f 2019 init_waitqueue_head(&hdcp->cp_irq_queue);
04707f97 2020
ee5e5e7a
SP
2021 return 0;
2022}
2023
67e1d5ed
VS
2024int intel_hdcp_enable(struct intel_connector *connector,
2025 enum transcoder cpu_transcoder, u8 content_type)
ee5e5e7a 2026{
67e1d5ed 2027 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
d3dacc70 2028 struct intel_hdcp *hdcp = &connector->hdcp;
22ce2d94 2029 unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
49a630b0 2030 int ret = -EINVAL;
ee5e5e7a 2031
d3dacc70 2032 if (!hdcp->shim)
ee5e5e7a
SP
2033 return -ENOENT;
2034
d3dacc70 2035 mutex_lock(&hdcp->mutex);
bb393dc5
PB
2036 drm_WARN_ON(&dev_priv->drm,
2037 hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
d456512c 2038 hdcp->content_type = content_type;
ee5e5e7a 2039
67e1d5ed
VS
2040 if (INTEL_GEN(dev_priv) >= 12) {
2041 hdcp->cpu_transcoder = cpu_transcoder;
2042 hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
2043 }
2044
49a630b0
R
2045 /*
2046 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2047 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2048 */
22ce2d94 2049 if (intel_hdcp2_capable(connector)) {
49a630b0 2050 ret = _intel_hdcp2_enable(connector);
22ce2d94
R
2051 if (!ret)
2052 check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2053 }
49a630b0 2054
d456512c
R
2055 /*
2056 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2057 * be attempted.
2058 */
2059 if (ret && intel_hdcp_capable(connector) &&
2060 hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
49a630b0 2061 ret = _intel_hdcp_enable(connector);
49a630b0
R
2062 }
2063
2064 if (!ret) {
22ce2d94 2065 schedule_delayed_work(&hdcp->check_work, check_link_interval);
49a630b0
R
2066 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
2067 schedule_work(&hdcp->prop_work);
2068 }
ee5e5e7a 2069
d3dacc70 2070 mutex_unlock(&hdcp->mutex);
ee5e5e7a
SP
2071 return ret;
2072}
2073
2074int intel_hdcp_disable(struct intel_connector *connector)
2075{
d3dacc70 2076 struct intel_hdcp *hdcp = &connector->hdcp;
01468d6c 2077 int ret = 0;
ee5e5e7a 2078
d3dacc70 2079 if (!hdcp->shim)
ee5e5e7a
SP
2080 return -ENOENT;
2081
d3dacc70 2082 mutex_lock(&hdcp->mutex);
ee5e5e7a 2083
d3dacc70
R
2084 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2085 hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
49a630b0
R
2086 if (hdcp->hdcp2_encrypted)
2087 ret = _intel_hdcp2_disable(connector);
2088 else if (hdcp->hdcp_encrypted)
09d56393 2089 ret = _intel_hdcp_disable(connector);
01468d6c 2090 }
ee5e5e7a 2091
d3dacc70
R
2092 mutex_unlock(&hdcp->mutex);
2093 cancel_delayed_work_sync(&hdcp->check_work);
ee5e5e7a
SP
2094 return ret;
2095}
2096
ede9771d
VS
2097void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2098 struct intel_encoder *encoder,
5758e073
JN
2099 const struct intel_crtc_state *crtc_state,
2100 const struct drm_connector_state *conn_state)
2101{
2102 struct intel_connector *connector =
2103 to_intel_connector(conn_state->connector);
2104 struct intel_hdcp *hdcp = &connector->hdcp;
2105 bool content_protection_type_changed =
2106 (conn_state->hdcp_content_type != hdcp->content_type &&
2107 conn_state->content_protection !=
2108 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
33f9a623 2109 bool desired_and_not_enabled = false;
5758e073
JN
2110
2111 /*
2112 * During the HDCP encryption session if Type change is requested,
2113 * disable the HDCP and reenable it with new TYPE value.
2114 */
2115 if (conn_state->content_protection ==
2116 DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2117 content_protection_type_changed)
2118 intel_hdcp_disable(connector);
2119
2120 /*
2121 * Mark the hdcp state as DESIRED after the hdcp disable of type
2122 * change procedure.
2123 */
2124 if (content_protection_type_changed) {
2125 mutex_lock(&hdcp->mutex);
2126 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2127 schedule_work(&hdcp->prop_work);
2128 mutex_unlock(&hdcp->mutex);
2129 }
2130
2131 if (conn_state->content_protection ==
33f9a623
AG
2132 DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2133 mutex_lock(&hdcp->mutex);
2134 /* Avoid enabling hdcp, if it already ENABLED */
2135 desired_and_not_enabled =
2136 hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2137 mutex_unlock(&hdcp->mutex);
2138 }
2139
2140 if (desired_and_not_enabled || content_protection_type_changed)
5758e073
JN
2141 intel_hdcp_enable(connector,
2142 crtc_state->cpu_transcoder,
2143 (u8)conn_state->hdcp_content_type);
2144}
2145
9055aac7
R
2146void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2147{
2148 mutex_lock(&dev_priv->hdcp_comp_mutex);
2149 if (!dev_priv->hdcp_comp_added) {
2150 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2151 return;
2152 }
2153
2154 dev_priv->hdcp_comp_added = false;
2155 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2156
2157 component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
2158}
2159
2160void intel_hdcp_cleanup(struct intel_connector *connector)
2161{
2162 if (!connector->hdcp.shim)
2163 return;
2164
2165 mutex_lock(&connector->hdcp.mutex);
2166 kfree(connector->hdcp.port_data.streams);
2167 mutex_unlock(&connector->hdcp.mutex);
2168}
2169
ee5e5e7a
SP
2170void intel_hdcp_atomic_check(struct drm_connector *connector,
2171 struct drm_connector_state *old_state,
2172 struct drm_connector_state *new_state)
2173{
739f3abd
JN
2174 u64 old_cp = old_state->content_protection;
2175 u64 new_cp = new_state->content_protection;
ee5e5e7a
SP
2176 struct drm_crtc_state *crtc_state;
2177
2178 if (!new_state->crtc) {
2179 /*
2180 * If the connector is being disabled with CP enabled, mark it
2181 * desired so it's re-enabled when the connector is brought back
2182 */
2183 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2184 new_state->content_protection =
2185 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2186 return;
2187 }
2188
33f9a623
AG
2189 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2190 new_state->crtc);
2191 /*
2192 * Fix the HDCP uapi content protection state in case of modeset.
2193 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2194 * need to be sent if there is transition from ENABLED->DESIRED.
2195 */
2196 if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2197 (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2198 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2199 new_state->content_protection =
2200 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2201
ee5e5e7a
SP
2202 /*
2203 * Nothing to do if the state didn't change, or HDCP was activated since
d456512c 2204 * the last commit. And also no change in hdcp content type.
ee5e5e7a
SP
2205 */
2206 if (old_cp == new_cp ||
2207 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
d456512c
R
2208 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2209 if (old_state->hdcp_content_type ==
2210 new_state->hdcp_content_type)
2211 return;
2212 }
ee5e5e7a 2213
ee5e5e7a
SP
2214 crtc_state->mode_changed = true;
2215}
09d56393
R
2216
2217/* Handles the CP_IRQ raised from the DP HDCP sink */
2218void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2219{
2220 struct intel_hdcp *hdcp = &connector->hdcp;
2221
2222 if (!hdcp->shim)
2223 return;
2224
cf9cb35f
R
2225 atomic_inc(&connector->hdcp.cp_irq_count);
2226 wake_up_all(&connector->hdcp.cp_irq_queue);
2227
09d56393
R
2228 schedule_delayed_work(&hdcp->check_work, 0);
2229}