Commit | Line | Data |
---|---|---|
ee5e5e7a SP |
1 | /* SPDX-License-Identifier: MIT */ |
2 | /* | |
3 | * Copyright (C) 2017 Google, Inc. | |
4 | * | |
5 | * Authors: | |
6 | * Sean Paul <seanpaul@chromium.org> | |
7 | */ | |
8 | ||
408bd917 | 9 | #include <linux/component.h> |
ee5e5e7a SP |
10 | #include <linux/i2c.h> |
11 | #include <linux/random.h> | |
12 | ||
408bd917 JN |
13 | #include <drm/drm_hdcp.h> |
14 | #include <drm/i915_component.h> | |
15 | ||
ee5e5e7a | 16 | #include "i915_reg.h" |
408bd917 JN |
17 | #include "intel_drv.h" |
18 | #include "intel_hdcp.h" | |
e0516e83 | 19 | #include "intel_sideband.h" |
ee5e5e7a SP |
20 | |
21 | #define KEY_LOAD_TRIES 5 | |
7e90e8d0 | 22 | #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50 |
bd90d7c7 | 23 | #define HDCP2_LC_RETRY_CNT 3 |
ee5e5e7a | 24 | |
f106d100 R |
25 | static |
26 | bool intel_hdcp_is_ksv_valid(u8 *ksv) | |
27 | { | |
28 | int i, ones = 0; | |
29 | /* KSV has 20 1's and 20 0's */ | |
30 | for (i = 0; i < DRM_HDCP_KSV_LEN; i++) | |
31 | ones += hweight8(ksv[i]); | |
32 | if (ones != 20) | |
33 | return false; | |
34 | ||
35 | return true; | |
36 | } | |
37 | ||
38 | static | |
39 | int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port, | |
40 | const struct intel_hdcp_shim *shim, u8 *bksv) | |
41 | { | |
42 | int ret, i, tries = 2; | |
43 | ||
44 | /* HDCP spec states that we must retry the bksv if it is invalid */ | |
45 | for (i = 0; i < tries; i++) { | |
46 | ret = shim->read_bksv(intel_dig_port, bksv); | |
47 | if (ret) | |
48 | return ret; | |
49 | if (intel_hdcp_is_ksv_valid(bksv)) | |
50 | break; | |
51 | } | |
52 | if (i == tries) { | |
3aae21fc | 53 | DRM_DEBUG_KMS("Bksv is invalid\n"); |
f106d100 R |
54 | return -ENODEV; |
55 | } | |
56 | ||
57 | return 0; | |
58 | } | |
59 | ||
bdc93fe0 R |
60 | /* Is HDCP1.4 capable on Platform and Sink */ |
61 | bool intel_hdcp_capable(struct intel_connector *connector) | |
62 | { | |
63 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | |
d3dacc70 | 64 | const struct intel_hdcp_shim *shim = connector->hdcp.shim; |
bdc93fe0 R |
65 | bool capable = false; |
66 | u8 bksv[5]; | |
67 | ||
68 | if (!shim) | |
69 | return capable; | |
70 | ||
71 | if (shim->hdcp_capable) { | |
72 | shim->hdcp_capable(intel_dig_port, &capable); | |
73 | } else { | |
74 | if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv)) | |
75 | capable = true; | |
76 | } | |
77 | ||
78 | return capable; | |
79 | } | |
80 | ||
49a630b0 | 81 | /* Is HDCP2.2 capable on Platform and Sink */ |
43318c0a | 82 | bool intel_hdcp2_capable(struct intel_connector *connector) |
49a630b0 R |
83 | { |
84 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
85 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | |
86 | struct intel_hdcp *hdcp = &connector->hdcp; | |
87 | bool capable = false; | |
88 | ||
89 | /* I915 support for HDCP2.2 */ | |
90 | if (!hdcp->hdcp2_supported) | |
91 | return false; | |
92 | ||
93 | /* MEI interface is solid */ | |
94 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
c5568ed2 CW |
95 | if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) { |
96 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
97 | return false; | |
98 | } | |
49a630b0 R |
99 | mutex_unlock(&dev_priv->hdcp_comp_mutex); |
100 | ||
101 | /* Sink's capability for HDCP2.2 */ | |
102 | hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable); | |
103 | ||
104 | return capable; | |
105 | } | |
106 | ||
09d56393 R |
107 | static inline bool intel_hdcp_in_use(struct intel_connector *connector) |
108 | { | |
109 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
110 | enum port port = connector->encoder->port; | |
111 | u32 reg; | |
112 | ||
113 | reg = I915_READ(PORT_HDCP_STATUS(port)); | |
114 | return reg & HDCP_STATUS_ENC; | |
115 | } | |
116 | ||
22ce2d94 R |
117 | static inline bool intel_hdcp2_in_use(struct intel_connector *connector) |
118 | { | |
119 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
120 | enum port port = connector->encoder->port; | |
121 | u32 reg; | |
122 | ||
123 | reg = I915_READ(HDCP2_STATUS_DDI(port)); | |
124 | return reg & LINK_ENCRYPTION_STATUS; | |
125 | } | |
126 | ||
ee5e5e7a SP |
127 | static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, |
128 | const struct intel_hdcp_shim *shim) | |
129 | { | |
130 | int ret, read_ret; | |
131 | bool ksv_ready; | |
132 | ||
133 | /* Poll for ksv list ready (spec says max time allowed is 5s) */ | |
134 | ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port, | |
135 | &ksv_ready), | |
136 | read_ret || ksv_ready, 5 * 1000 * 1000, 1000, | |
137 | 100 * 1000); | |
138 | if (ret) | |
139 | return ret; | |
140 | if (read_ret) | |
141 | return read_ret; | |
142 | if (!ksv_ready) | |
143 | return -ETIMEDOUT; | |
144 | ||
145 | return 0; | |
146 | } | |
147 | ||
6308a315 R |
148 | static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) |
149 | { | |
150 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | |
151 | struct i915_power_well *power_well; | |
152 | enum i915_power_well_id id; | |
153 | bool enabled = false; | |
154 | ||
155 | /* | |
156 | * On HSW and BDW, Display HW loads the Key as soon as Display resumes. | |
157 | * On all BXT+, SW can load the keys only when the PW#1 is turned on. | |
158 | */ | |
159 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | |
160 | id = HSW_DISP_PW_GLOBAL; | |
161 | else | |
162 | id = SKL_DISP_PW_1; | |
163 | ||
164 | mutex_lock(&power_domains->lock); | |
165 | ||
166 | /* PG1 (power well #1) needs to be enabled */ | |
167 | for_each_power_well(dev_priv, power_well) { | |
f28ec6f4 ID |
168 | if (power_well->desc->id == id) { |
169 | enabled = power_well->desc->ops->is_enabled(dev_priv, | |
170 | power_well); | |
6308a315 R |
171 | break; |
172 | } | |
173 | } | |
174 | mutex_unlock(&power_domains->lock); | |
175 | ||
176 | /* | |
177 | * Another req for hdcp key loadability is enabled state of pll for | |
178 | * cdclk. Without active crtc we wont land here. So we are assuming that | |
179 | * cdclk is already on. | |
180 | */ | |
181 | ||
182 | return enabled; | |
183 | } | |
184 | ||
ee5e5e7a SP |
185 | static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv) |
186 | { | |
187 | I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); | |
188 | I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | | |
189 | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); | |
190 | } | |
191 | ||
192 | static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) | |
193 | { | |
194 | int ret; | |
195 | u32 val; | |
196 | ||
7ee57988 R |
197 | val = I915_READ(HDCP_KEY_STATUS); |
198 | if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) | |
199 | return 0; | |
200 | ||
fdddd08c R |
201 | /* |
202 | * On HSW and BDW HW loads the HDCP1.4 Key when Display comes | |
203 | * out of reset. So if Key is not already loaded, its an error state. | |
204 | */ | |
205 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | |
206 | if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) | |
207 | return -ENXIO; | |
208 | ||
209 | /* | |
210 | * Initiate loading the HDCP key from fuses. | |
211 | * | |
083d2a07 R |
212 | * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9 |
213 | * platforms except BXT and GLK, differ in the key load trigger process | |
214 | * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f. | |
fdddd08c | 215 | */ |
083d2a07 | 216 | if (IS_GEN9_BC(dev_priv)) { |
fdddd08c R |
217 | ret = sandybridge_pcode_write(dev_priv, |
218 | SKL_PCODE_LOAD_HDCP_KEYS, 1); | |
fdddd08c R |
219 | if (ret) { |
220 | DRM_ERROR("Failed to initiate HDCP key load (%d)\n", | |
221 | ret); | |
222 | return ret; | |
223 | } | |
224 | } else { | |
225 | I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); | |
ee5e5e7a SP |
226 | } |
227 | ||
228 | /* Wait for the keys to load (500us) */ | |
97a04e0d | 229 | ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS, |
ee5e5e7a SP |
230 | HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, |
231 | 10, 1, &val); | |
232 | if (ret) | |
233 | return ret; | |
234 | else if (!(val & HDCP_KEY_LOAD_STATUS)) | |
235 | return -ENXIO; | |
236 | ||
237 | /* Send Aksv over to PCH display for use in authentication */ | |
238 | I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); | |
239 | ||
240 | return 0; | |
241 | } | |
242 | ||
243 | /* Returns updated SHA-1 index */ | |
244 | static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text) | |
245 | { | |
246 | I915_WRITE(HDCP_SHA_TEXT, sha_text); | |
97a04e0d | 247 | if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL, |
ee5e5e7a SP |
248 | HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) { |
249 | DRM_ERROR("Timed out waiting for SHA1 ready\n"); | |
250 | return -ETIMEDOUT; | |
251 | } | |
252 | return 0; | |
253 | } | |
254 | ||
255 | static | |
256 | u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port) | |
257 | { | |
258 | enum port port = intel_dig_port->base.port; | |
259 | switch (port) { | |
260 | case PORT_A: | |
261 | return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0; | |
262 | case PORT_B: | |
263 | return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0; | |
264 | case PORT_C: | |
265 | return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0; | |
266 | case PORT_D: | |
267 | return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0; | |
268 | case PORT_E: | |
269 | return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; | |
270 | default: | |
271 | break; | |
272 | } | |
273 | DRM_ERROR("Unknown port %d\n", port); | |
274 | return -EINVAL; | |
275 | } | |
276 | ||
ee5e5e7a | 277 | static |
41baafae R |
278 | int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, |
279 | const struct intel_hdcp_shim *shim, | |
280 | u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) | |
ee5e5e7a SP |
281 | { |
282 | struct drm_i915_private *dev_priv; | |
283 | u32 vprime, sha_text, sha_leftovers, rep_ctl; | |
ee5e5e7a SP |
284 | int ret, i, j, sha_idx; |
285 | ||
286 | dev_priv = intel_dig_port->base.base.dev->dev_private; | |
287 | ||
ee5e5e7a SP |
288 | /* Process V' values from the receiver */ |
289 | for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { | |
290 | ret = shim->read_v_prime_part(intel_dig_port, i, &vprime); | |
291 | if (ret) | |
292 | return ret; | |
293 | I915_WRITE(HDCP_SHA_V_PRIME(i), vprime); | |
294 | } | |
295 | ||
296 | /* | |
297 | * We need to write the concatenation of all device KSVs, BINFO (DP) || | |
298 | * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte | |
299 | * stream is written via the HDCP_SHA_TEXT register in 32-bit | |
300 | * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This | |
301 | * index will keep track of our progress through the 64 bytes as well as | |
302 | * helping us work the 40-bit KSVs through our 32-bit register. | |
303 | * | |
304 | * NOTE: data passed via HDCP_SHA_TEXT should be big-endian | |
305 | */ | |
306 | sha_idx = 0; | |
307 | sha_text = 0; | |
308 | sha_leftovers = 0; | |
309 | rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port); | |
310 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); | |
311 | for (i = 0; i < num_downstream; i++) { | |
312 | unsigned int sha_empty; | |
313 | u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; | |
314 | ||
315 | /* Fill up the empty slots in sha_text and write it out */ | |
316 | sha_empty = sizeof(sha_text) - sha_leftovers; | |
317 | for (j = 0; j < sha_empty; j++) | |
318 | sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8); | |
319 | ||
320 | ret = intel_write_sha_text(dev_priv, sha_text); | |
321 | if (ret < 0) | |
322 | return ret; | |
323 | ||
324 | /* Programming guide writes this every 64 bytes */ | |
325 | sha_idx += sizeof(sha_text); | |
326 | if (!(sha_idx % 64)) | |
327 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); | |
328 | ||
329 | /* Store the leftover bytes from the ksv in sha_text */ | |
330 | sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty; | |
331 | sha_text = 0; | |
332 | for (j = 0; j < sha_leftovers; j++) | |
333 | sha_text |= ksv[sha_empty + j] << | |
334 | ((sizeof(sha_text) - j - 1) * 8); | |
335 | ||
336 | /* | |
337 | * If we still have room in sha_text for more data, continue. | |
338 | * Otherwise, write it out immediately. | |
339 | */ | |
340 | if (sizeof(sha_text) > sha_leftovers) | |
341 | continue; | |
342 | ||
343 | ret = intel_write_sha_text(dev_priv, sha_text); | |
344 | if (ret < 0) | |
345 | return ret; | |
346 | sha_leftovers = 0; | |
347 | sha_text = 0; | |
348 | sha_idx += sizeof(sha_text); | |
349 | } | |
350 | ||
351 | /* | |
352 | * We need to write BINFO/BSTATUS, and M0 now. Depending on how many | |
353 | * bytes are leftover from the last ksv, we might be able to fit them | |
354 | * all in sha_text (first 2 cases), or we might need to split them up | |
355 | * into 2 writes (last 2 cases). | |
356 | */ | |
357 | if (sha_leftovers == 0) { | |
358 | /* Write 16 bits of text, 16 bits of M0 */ | |
359 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); | |
360 | ret = intel_write_sha_text(dev_priv, | |
361 | bstatus[0] << 8 | bstatus[1]); | |
362 | if (ret < 0) | |
363 | return ret; | |
364 | sha_idx += sizeof(sha_text); | |
365 | ||
366 | /* Write 32 bits of M0 */ | |
367 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); | |
368 | ret = intel_write_sha_text(dev_priv, 0); | |
369 | if (ret < 0) | |
370 | return ret; | |
371 | sha_idx += sizeof(sha_text); | |
372 | ||
373 | /* Write 16 bits of M0 */ | |
374 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); | |
375 | ret = intel_write_sha_text(dev_priv, 0); | |
376 | if (ret < 0) | |
377 | return ret; | |
378 | sha_idx += sizeof(sha_text); | |
379 | ||
380 | } else if (sha_leftovers == 1) { | |
381 | /* Write 24 bits of text, 8 bits of M0 */ | |
382 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); | |
383 | sha_text |= bstatus[0] << 16 | bstatus[1] << 8; | |
384 | /* Only 24-bits of data, must be in the LSB */ | |
385 | sha_text = (sha_text & 0xffffff00) >> 8; | |
386 | ret = intel_write_sha_text(dev_priv, sha_text); | |
387 | if (ret < 0) | |
388 | return ret; | |
389 | sha_idx += sizeof(sha_text); | |
390 | ||
391 | /* Write 32 bits of M0 */ | |
392 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); | |
393 | ret = intel_write_sha_text(dev_priv, 0); | |
394 | if (ret < 0) | |
395 | return ret; | |
396 | sha_idx += sizeof(sha_text); | |
397 | ||
398 | /* Write 24 bits of M0 */ | |
399 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); | |
400 | ret = intel_write_sha_text(dev_priv, 0); | |
401 | if (ret < 0) | |
402 | return ret; | |
403 | sha_idx += sizeof(sha_text); | |
404 | ||
405 | } else if (sha_leftovers == 2) { | |
406 | /* Write 32 bits of text */ | |
407 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); | |
408 | sha_text |= bstatus[0] << 24 | bstatus[1] << 16; | |
409 | ret = intel_write_sha_text(dev_priv, sha_text); | |
410 | if (ret < 0) | |
411 | return ret; | |
412 | sha_idx += sizeof(sha_text); | |
413 | ||
414 | /* Write 64 bits of M0 */ | |
415 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); | |
416 | for (i = 0; i < 2; i++) { | |
417 | ret = intel_write_sha_text(dev_priv, 0); | |
418 | if (ret < 0) | |
419 | return ret; | |
420 | sha_idx += sizeof(sha_text); | |
421 | } | |
422 | } else if (sha_leftovers == 3) { | |
423 | /* Write 32 bits of text */ | |
424 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); | |
425 | sha_text |= bstatus[0] << 24; | |
426 | ret = intel_write_sha_text(dev_priv, sha_text); | |
427 | if (ret < 0) | |
428 | return ret; | |
429 | sha_idx += sizeof(sha_text); | |
430 | ||
431 | /* Write 8 bits of text, 24 bits of M0 */ | |
432 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); | |
433 | ret = intel_write_sha_text(dev_priv, bstatus[1]); | |
434 | if (ret < 0) | |
435 | return ret; | |
436 | sha_idx += sizeof(sha_text); | |
437 | ||
438 | /* Write 32 bits of M0 */ | |
439 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); | |
440 | ret = intel_write_sha_text(dev_priv, 0); | |
441 | if (ret < 0) | |
442 | return ret; | |
443 | sha_idx += sizeof(sha_text); | |
444 | ||
445 | /* Write 8 bits of M0 */ | |
446 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); | |
447 | ret = intel_write_sha_text(dev_priv, 0); | |
448 | if (ret < 0) | |
449 | return ret; | |
450 | sha_idx += sizeof(sha_text); | |
451 | } else { | |
41baafae R |
452 | DRM_DEBUG_KMS("Invalid number of leftovers %d\n", |
453 | sha_leftovers); | |
ee5e5e7a SP |
454 | return -EINVAL; |
455 | } | |
456 | ||
457 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); | |
458 | /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ | |
459 | while ((sha_idx % 64) < (64 - sizeof(sha_text))) { | |
460 | ret = intel_write_sha_text(dev_priv, 0); | |
461 | if (ret < 0) | |
462 | return ret; | |
463 | sha_idx += sizeof(sha_text); | |
464 | } | |
465 | ||
466 | /* | |
467 | * Last write gets the length of the concatenation in bits. That is: | |
468 | * - 5 bytes per device | |
469 | * - 10 bytes for BINFO/BSTATUS(2), M0(8) | |
470 | */ | |
471 | sha_text = (num_downstream * 5 + 10) * 8; | |
472 | ret = intel_write_sha_text(dev_priv, sha_text); | |
473 | if (ret < 0) | |
474 | return ret; | |
475 | ||
476 | /* Tell the HW we're done with the hash and wait for it to ACK */ | |
477 | I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH); | |
97a04e0d | 478 | if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL, |
ee5e5e7a SP |
479 | HDCP_SHA1_COMPLETE, |
480 | HDCP_SHA1_COMPLETE, 1)) { | |
10ff7b11 | 481 | DRM_ERROR("Timed out waiting for SHA1 complete\n"); |
ee5e5e7a SP |
482 | return -ETIMEDOUT; |
483 | } | |
484 | if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { | |
41baafae | 485 | DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n"); |
ee5e5e7a SP |
486 | return -ENXIO; |
487 | } | |
488 | ||
41baafae R |
489 | return 0; |
490 | } | |
491 | ||
492 | /* Implements Part 2 of the HDCP authorization procedure */ | |
493 | static | |
f26ae6a6 | 494 | int intel_hdcp_auth_downstream(struct intel_connector *connector) |
41baafae | 495 | { |
f26ae6a6 R |
496 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); |
497 | const struct intel_hdcp_shim *shim = connector->hdcp.shim; | |
498 | struct drm_device *dev = connector->base.dev; | |
41baafae R |
499 | u8 bstatus[2], num_downstream, *ksv_fifo; |
500 | int ret, i, tries = 3; | |
501 | ||
502 | ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); | |
503 | if (ret) { | |
10ff7b11 | 504 | DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret); |
41baafae R |
505 | return ret; |
506 | } | |
507 | ||
508 | ret = shim->read_bstatus(intel_dig_port, bstatus); | |
509 | if (ret) | |
510 | return ret; | |
511 | ||
512 | if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || | |
513 | DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { | |
10ff7b11 | 514 | DRM_DEBUG_KMS("Max Topology Limit Exceeded\n"); |
41baafae R |
515 | return -EPERM; |
516 | } | |
517 | ||
518 | /* | |
519 | * When repeater reports 0 device count, HDCP1.4 spec allows disabling | |
520 | * the HDCP encryption. That implies that repeater can't have its own | |
521 | * display. As there is no consumption of encrypted content in the | |
522 | * repeater with 0 downstream devices, we are failing the | |
523 | * authentication. | |
524 | */ | |
525 | num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); | |
526 | if (num_downstream == 0) | |
527 | return -EINVAL; | |
528 | ||
6396bb22 | 529 | ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); |
41baafae R |
530 | if (!ksv_fifo) |
531 | return -ENOMEM; | |
532 | ||
533 | ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo); | |
534 | if (ret) | |
46a67c4d | 535 | goto err; |
41baafae | 536 | |
f26ae6a6 R |
537 | if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) { |
538 | DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n"); | |
0584674d WY |
539 | ret = -EPERM; |
540 | goto err; | |
f26ae6a6 R |
541 | } |
542 | ||
41baafae R |
543 | /* |
544 | * When V prime mismatches, DP Spec mandates re-read of | |
545 | * V prime atleast twice. | |
546 | */ | |
547 | for (i = 0; i < tries; i++) { | |
548 | ret = intel_hdcp_validate_v_prime(intel_dig_port, shim, | |
549 | ksv_fifo, num_downstream, | |
550 | bstatus); | |
551 | if (!ret) | |
552 | break; | |
553 | } | |
554 | ||
555 | if (i == tries) { | |
10ff7b11 | 556 | DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret); |
46a67c4d | 557 | goto err; |
41baafae R |
558 | } |
559 | ||
363932b4 SP |
560 | DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n", |
561 | num_downstream); | |
46a67c4d RS |
562 | ret = 0; |
563 | err: | |
564 | kfree(ksv_fifo); | |
565 | return ret; | |
ee5e5e7a SP |
566 | } |
567 | ||
568 | /* Implements Part 1 of the HDCP authorization procedure */ | |
f26ae6a6 | 569 | static int intel_hdcp_auth(struct intel_connector *connector) |
ee5e5e7a | 570 | { |
f26ae6a6 R |
571 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); |
572 | struct intel_hdcp *hdcp = &connector->hdcp; | |
573 | struct drm_device *dev = connector->base.dev; | |
574 | const struct intel_hdcp_shim *shim = hdcp->shim; | |
ee5e5e7a SP |
575 | struct drm_i915_private *dev_priv; |
576 | enum port port; | |
577 | unsigned long r0_prime_gen_start; | |
f622a71d | 578 | int ret, i, tries = 2; |
ee5e5e7a SP |
579 | union { |
580 | u32 reg[2]; | |
581 | u8 shim[DRM_HDCP_AN_LEN]; | |
582 | } an; | |
583 | union { | |
584 | u32 reg[2]; | |
585 | u8 shim[DRM_HDCP_KSV_LEN]; | |
586 | } bksv; | |
587 | union { | |
588 | u32 reg; | |
589 | u8 shim[DRM_HDCP_RI_LEN]; | |
590 | } ri; | |
791a98dd | 591 | bool repeater_present, hdcp_capable; |
ee5e5e7a SP |
592 | |
593 | dev_priv = intel_dig_port->base.base.dev->dev_private; | |
594 | ||
595 | port = intel_dig_port->base.port; | |
596 | ||
791a98dd R |
597 | /* |
598 | * Detects whether the display is HDCP capable. Although we check for | |
599 | * valid Bksv below, the HDCP over DP spec requires that we check | |
600 | * whether the display supports HDCP before we write An. For HDMI | |
601 | * displays, this is not necessary. | |
602 | */ | |
603 | if (shim->hdcp_capable) { | |
604 | ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable); | |
605 | if (ret) | |
606 | return ret; | |
607 | if (!hdcp_capable) { | |
10ff7b11 | 608 | DRM_DEBUG_KMS("Panel is not HDCP capable\n"); |
791a98dd R |
609 | return -EINVAL; |
610 | } | |
611 | } | |
612 | ||
ee5e5e7a SP |
613 | /* Initialize An with 2 random values and acquire it */ |
614 | for (i = 0; i < 2; i++) | |
615 | I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32()); | |
616 | I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN); | |
617 | ||
618 | /* Wait for An to be acquired */ | |
97a04e0d | 619 | if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port), |
ee5e5e7a SP |
620 | HDCP_STATUS_AN_READY, |
621 | HDCP_STATUS_AN_READY, 1)) { | |
622 | DRM_ERROR("Timed out waiting for An\n"); | |
623 | return -ETIMEDOUT; | |
624 | } | |
625 | ||
626 | an.reg[0] = I915_READ(PORT_HDCP_ANLO(port)); | |
627 | an.reg[1] = I915_READ(PORT_HDCP_ANHI(port)); | |
628 | ret = shim->write_an_aksv(intel_dig_port, an.shim); | |
629 | if (ret) | |
630 | return ret; | |
631 | ||
632 | r0_prime_gen_start = jiffies; | |
633 | ||
634 | memset(&bksv, 0, sizeof(bksv)); | |
f622a71d | 635 | |
f106d100 R |
636 | ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim); |
637 | if (ret < 0) | |
638 | return ret; | |
ee5e5e7a | 639 | |
f26ae6a6 R |
640 | if (drm_hdcp_check_ksvs_revoked(dev, bksv.shim, 1)) { |
641 | DRM_ERROR("BKSV is revoked\n"); | |
642 | return -EPERM; | |
643 | } | |
644 | ||
ee5e5e7a SP |
645 | I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]); |
646 | I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]); | |
647 | ||
648 | ret = shim->repeater_present(intel_dig_port, &repeater_present); | |
649 | if (ret) | |
650 | return ret; | |
651 | if (repeater_present) | |
652 | I915_WRITE(HDCP_REP_CTL, | |
653 | intel_hdcp_get_repeater_ctl(intel_dig_port)); | |
654 | ||
655 | ret = shim->toggle_signalling(intel_dig_port, true); | |
656 | if (ret) | |
657 | return ret; | |
658 | ||
659 | I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC); | |
660 | ||
661 | /* Wait for R0 ready */ | |
662 | if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) & | |
663 | (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { | |
664 | DRM_ERROR("Timed out waiting for R0 ready\n"); | |
665 | return -ETIMEDOUT; | |
666 | } | |
667 | ||
668 | /* | |
669 | * Wait for R0' to become available. The spec says 100ms from Aksv, but | |
670 | * some monitors can take longer than this. We'll set the timeout at | |
671 | * 300ms just to be sure. | |
672 | * | |
673 | * On DP, there's an R0_READY bit available but no such bit | |
674 | * exists on HDMI. Since the upper-bound is the same, we'll just do | |
675 | * the stupid thing instead of polling on one and not the other. | |
676 | */ | |
677 | wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300); | |
678 | ||
4bfbec68 | 679 | tries = 3; |
ee5e5e7a | 680 | |
4bfbec68 R |
681 | /* |
682 | * DP HDCP Spec mandates the two more reattempt to read R0, incase | |
683 | * of R0 mismatch. | |
684 | */ | |
685 | for (i = 0; i < tries; i++) { | |
686 | ri.reg = 0; | |
687 | ret = shim->read_ri_prime(intel_dig_port, ri.shim); | |
688 | if (ret) | |
689 | return ret; | |
690 | I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg); | |
691 | ||
692 | /* Wait for Ri prime match */ | |
693 | if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) & | |
694 | (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) | |
695 | break; | |
696 | } | |
697 | ||
698 | if (i == tries) { | |
10ff7b11 R |
699 | DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n", |
700 | I915_READ(PORT_HDCP_STATUS(port))); | |
ee5e5e7a SP |
701 | return -ETIMEDOUT; |
702 | } | |
703 | ||
704 | /* Wait for encryption confirmation */ | |
97a04e0d | 705 | if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port), |
7e90e8d0 R |
706 | HDCP_STATUS_ENC, HDCP_STATUS_ENC, |
707 | ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { | |
ee5e5e7a SP |
708 | DRM_ERROR("Timed out waiting for encryption\n"); |
709 | return -ETIMEDOUT; | |
710 | } | |
711 | ||
712 | /* | |
713 | * XXX: If we have MST-connected devices, we need to enable encryption | |
714 | * on those as well. | |
715 | */ | |
716 | ||
87eb3ec8 | 717 | if (repeater_present) |
f26ae6a6 | 718 | return intel_hdcp_auth_downstream(connector); |
87eb3ec8 | 719 | |
363932b4 | 720 | DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n"); |
87eb3ec8 | 721 | return 0; |
ee5e5e7a SP |
722 | } |
723 | ||
ee5e5e7a SP |
724 | static int _intel_hdcp_disable(struct intel_connector *connector) |
725 | { | |
d3dacc70 | 726 | struct intel_hdcp *hdcp = &connector->hdcp; |
ee5e5e7a SP |
727 | struct drm_i915_private *dev_priv = connector->base.dev->dev_private; |
728 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | |
729 | enum port port = intel_dig_port->base.port; | |
730 | int ret; | |
731 | ||
cb340bf3 R |
732 | DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n", |
733 | connector->base.name, connector->base.base.id); | |
734 | ||
09d56393 | 735 | hdcp->hdcp_encrypted = false; |
ee5e5e7a | 736 | I915_WRITE(PORT_HDCP_CONF(port), 0); |
97a04e0d DCS |
737 | if (intel_wait_for_register(&dev_priv->uncore, |
738 | PORT_HDCP_STATUS(port), ~0, 0, | |
7e90e8d0 | 739 | ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { |
ee5e5e7a SP |
740 | DRM_ERROR("Failed to disable HDCP, timeout clearing status\n"); |
741 | return -ETIMEDOUT; | |
742 | } | |
743 | ||
d3dacc70 | 744 | ret = hdcp->shim->toggle_signalling(intel_dig_port, false); |
ee5e5e7a SP |
745 | if (ret) { |
746 | DRM_ERROR("Failed to disable HDCP signalling\n"); | |
747 | return ret; | |
748 | } | |
749 | ||
363932b4 | 750 | DRM_DEBUG_KMS("HDCP is disabled\n"); |
ee5e5e7a SP |
751 | return 0; |
752 | } | |
753 | ||
754 | static int _intel_hdcp_enable(struct intel_connector *connector) | |
755 | { | |
d3dacc70 | 756 | struct intel_hdcp *hdcp = &connector->hdcp; |
ee5e5e7a | 757 | struct drm_i915_private *dev_priv = connector->base.dev->dev_private; |
6d983946 | 758 | int i, ret, tries = 3; |
ee5e5e7a | 759 | |
cb340bf3 R |
760 | DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n", |
761 | connector->base.name, connector->base.base.id); | |
762 | ||
6308a315 R |
763 | if (!hdcp_key_loadable(dev_priv)) { |
764 | DRM_ERROR("HDCP key Load is not possible\n"); | |
ee5e5e7a SP |
765 | return -ENXIO; |
766 | } | |
767 | ||
768 | for (i = 0; i < KEY_LOAD_TRIES; i++) { | |
769 | ret = intel_hdcp_load_keys(dev_priv); | |
770 | if (!ret) | |
771 | break; | |
772 | intel_hdcp_clear_keys(dev_priv); | |
773 | } | |
774 | if (ret) { | |
775 | DRM_ERROR("Could not load HDCP keys, (%d)\n", ret); | |
776 | return ret; | |
777 | } | |
778 | ||
6d983946 R |
779 | /* Incase of authentication failures, HDCP spec expects reauth. */ |
780 | for (i = 0; i < tries; i++) { | |
f26ae6a6 | 781 | ret = intel_hdcp_auth(connector); |
09d56393 R |
782 | if (!ret) { |
783 | hdcp->hdcp_encrypted = true; | |
6d983946 | 784 | return 0; |
09d56393 | 785 | } |
6d983946 R |
786 | |
787 | DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret); | |
a0124496 R |
788 | |
789 | /* Ensuring HDCP encryption and signalling are stopped. */ | |
790 | _intel_hdcp_disable(connector); | |
ee5e5e7a SP |
791 | } |
792 | ||
10ff7b11 | 793 | DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret); |
6d983946 | 794 | return ret; |
ee5e5e7a SP |
795 | } |
796 | ||
d3dacc70 R |
797 | static inline |
798 | struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) | |
799 | { | |
800 | return container_of(hdcp, struct intel_connector, hdcp); | |
801 | } | |
802 | ||
4c719c25 | 803 | /* Implements Part 3 of the HDCP authorization procedure */ |
c5568ed2 | 804 | static int intel_hdcp_check_link(struct intel_connector *connector) |
4c719c25 R |
805 | { |
806 | struct intel_hdcp *hdcp = &connector->hdcp; | |
807 | struct drm_i915_private *dev_priv = connector->base.dev->dev_private; | |
808 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | |
809 | enum port port = intel_dig_port->base.port; | |
810 | int ret = 0; | |
811 | ||
4c719c25 R |
812 | mutex_lock(&hdcp->mutex); |
813 | ||
09d56393 R |
814 | /* Check_link valid only when HDCP1.4 is enabled */ |
815 | if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || | |
816 | !hdcp->hdcp_encrypted) { | |
817 | ret = -EINVAL; | |
4c719c25 | 818 | goto out; |
09d56393 | 819 | } |
4c719c25 | 820 | |
09d56393 R |
821 | if (WARN_ON(!intel_hdcp_in_use(connector))) { |
822 | DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n", | |
4c719c25 R |
823 | connector->base.name, connector->base.base.id, |
824 | I915_READ(PORT_HDCP_STATUS(port))); | |
825 | ret = -ENXIO; | |
826 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; | |
827 | schedule_work(&hdcp->prop_work); | |
828 | goto out; | |
829 | } | |
830 | ||
831 | if (hdcp->shim->check_link(intel_dig_port)) { | |
832 | if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { | |
833 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; | |
834 | schedule_work(&hdcp->prop_work); | |
835 | } | |
836 | goto out; | |
837 | } | |
838 | ||
839 | DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n", | |
840 | connector->base.name, connector->base.base.id); | |
841 | ||
842 | ret = _intel_hdcp_disable(connector); | |
843 | if (ret) { | |
844 | DRM_ERROR("Failed to disable hdcp (%d)\n", ret); | |
845 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; | |
846 | schedule_work(&hdcp->prop_work); | |
847 | goto out; | |
848 | } | |
849 | ||
850 | ret = _intel_hdcp_enable(connector); | |
851 | if (ret) { | |
852 | DRM_ERROR("Failed to enable hdcp (%d)\n", ret); | |
853 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; | |
854 | schedule_work(&hdcp->prop_work); | |
855 | goto out; | |
856 | } | |
857 | ||
858 | out: | |
859 | mutex_unlock(&hdcp->mutex); | |
860 | return ret; | |
861 | } | |
862 | ||
ee5e5e7a SP |
863 | static void intel_hdcp_prop_work(struct work_struct *work) |
864 | { | |
d3dacc70 R |
865 | struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, |
866 | prop_work); | |
867 | struct intel_connector *connector = intel_hdcp_to_connector(hdcp); | |
ee5e5e7a SP |
868 | struct drm_device *dev = connector->base.dev; |
869 | struct drm_connector_state *state; | |
870 | ||
871 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); | |
d3dacc70 | 872 | mutex_lock(&hdcp->mutex); |
ee5e5e7a SP |
873 | |
874 | /* | |
875 | * This worker is only used to flip between ENABLED/DESIRED. Either of | |
d3dacc70 | 876 | * those to UNDESIRED is handled by core. If value == UNDESIRED, |
ee5e5e7a SP |
877 | * we're running just after hdcp has been disabled, so just exit |
878 | */ | |
d3dacc70 | 879 | if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { |
ee5e5e7a | 880 | state = connector->base.state; |
d3dacc70 | 881 | state->content_protection = hdcp->value; |
ee5e5e7a SP |
882 | } |
883 | ||
d3dacc70 | 884 | mutex_unlock(&hdcp->mutex); |
ee5e5e7a SP |
885 | drm_modeset_unlock(&dev->mode_config.connection_mutex); |
886 | } | |
887 | ||
fdddd08c R |
888 | bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) |
889 | { | |
890 | /* PORT E doesn't have HDCP, and PORT F is disabled */ | |
a0ea697a | 891 | return INTEL_GEN(dev_priv) >= 9 && port < PORT_E; |
fdddd08c R |
892 | } |
893 | ||
bd90d7c7 | 894 | static int |
9055aac7 R |
895 | hdcp2_prepare_ake_init(struct intel_connector *connector, |
896 | struct hdcp2_ake_init *ake_data) | |
897 | { | |
898 | struct hdcp_port_data *data = &connector->hdcp.port_data; | |
899 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
900 | struct i915_hdcp_comp_master *comp; | |
901 | int ret; | |
902 | ||
903 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
904 | comp = dev_priv->hdcp_master; | |
905 | ||
906 | if (!comp || !comp->ops) { | |
907 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
908 | return -EINVAL; | |
909 | } | |
910 | ||
911 | ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data); | |
912 | if (ret) | |
913 | DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret); | |
914 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
915 | ||
916 | return ret; | |
917 | } | |
918 | ||
bd90d7c7 | 919 | static int |
9055aac7 R |
920 | hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, |
921 | struct hdcp2_ake_send_cert *rx_cert, | |
922 | bool *paired, | |
923 | struct hdcp2_ake_no_stored_km *ek_pub_km, | |
924 | size_t *msg_sz) | |
925 | { | |
926 | struct hdcp_port_data *data = &connector->hdcp.port_data; | |
927 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
928 | struct i915_hdcp_comp_master *comp; | |
929 | int ret; | |
930 | ||
931 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
932 | comp = dev_priv->hdcp_master; | |
933 | ||
934 | if (!comp || !comp->ops) { | |
935 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
936 | return -EINVAL; | |
937 | } | |
938 | ||
939 | ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data, | |
940 | rx_cert, paired, | |
941 | ek_pub_km, msg_sz); | |
942 | if (ret < 0) | |
943 | DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret); | |
944 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
945 | ||
946 | return ret; | |
947 | } | |
948 | ||
bd90d7c7 R |
949 | static int hdcp2_verify_hprime(struct intel_connector *connector, |
950 | struct hdcp2_ake_send_hprime *rx_hprime) | |
9055aac7 R |
951 | { |
952 | struct hdcp_port_data *data = &connector->hdcp.port_data; | |
953 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
954 | struct i915_hdcp_comp_master *comp; | |
955 | int ret; | |
956 | ||
957 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
958 | comp = dev_priv->hdcp_master; | |
959 | ||
960 | if (!comp || !comp->ops) { | |
961 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
962 | return -EINVAL; | |
963 | } | |
964 | ||
965 | ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime); | |
966 | if (ret < 0) | |
967 | DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret); | |
968 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
969 | ||
970 | return ret; | |
971 | } | |
972 | ||
bd90d7c7 | 973 | static int |
9055aac7 R |
974 | hdcp2_store_pairing_info(struct intel_connector *connector, |
975 | struct hdcp2_ake_send_pairing_info *pairing_info) | |
976 | { | |
977 | struct hdcp_port_data *data = &connector->hdcp.port_data; | |
978 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
979 | struct i915_hdcp_comp_master *comp; | |
980 | int ret; | |
981 | ||
982 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
983 | comp = dev_priv->hdcp_master; | |
984 | ||
985 | if (!comp || !comp->ops) { | |
986 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
987 | return -EINVAL; | |
988 | } | |
989 | ||
990 | ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info); | |
991 | if (ret < 0) | |
992 | DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret); | |
993 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
994 | ||
995 | return ret; | |
996 | } | |
997 | ||
bd90d7c7 | 998 | static int |
9055aac7 R |
999 | hdcp2_prepare_lc_init(struct intel_connector *connector, |
1000 | struct hdcp2_lc_init *lc_init) | |
1001 | { | |
1002 | struct hdcp_port_data *data = &connector->hdcp.port_data; | |
1003 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
1004 | struct i915_hdcp_comp_master *comp; | |
1005 | int ret; | |
1006 | ||
1007 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
1008 | comp = dev_priv->hdcp_master; | |
1009 | ||
1010 | if (!comp || !comp->ops) { | |
1011 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1012 | return -EINVAL; | |
1013 | } | |
1014 | ||
1015 | ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init); | |
1016 | if (ret < 0) | |
1017 | DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret); | |
1018 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1019 | ||
1020 | return ret; | |
1021 | } | |
1022 | ||
bd90d7c7 | 1023 | static int |
9055aac7 R |
1024 | hdcp2_verify_lprime(struct intel_connector *connector, |
1025 | struct hdcp2_lc_send_lprime *rx_lprime) | |
1026 | { | |
1027 | struct hdcp_port_data *data = &connector->hdcp.port_data; | |
1028 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
1029 | struct i915_hdcp_comp_master *comp; | |
1030 | int ret; | |
1031 | ||
1032 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
1033 | comp = dev_priv->hdcp_master; | |
1034 | ||
1035 | if (!comp || !comp->ops) { | |
1036 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1037 | return -EINVAL; | |
1038 | } | |
1039 | ||
1040 | ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime); | |
1041 | if (ret < 0) | |
1042 | DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret); | |
1043 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1044 | ||
1045 | return ret; | |
1046 | } | |
1047 | ||
bd90d7c7 R |
1048 | static int hdcp2_prepare_skey(struct intel_connector *connector, |
1049 | struct hdcp2_ske_send_eks *ske_data) | |
9055aac7 R |
1050 | { |
1051 | struct hdcp_port_data *data = &connector->hdcp.port_data; | |
1052 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
1053 | struct i915_hdcp_comp_master *comp; | |
1054 | int ret; | |
1055 | ||
1056 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
1057 | comp = dev_priv->hdcp_master; | |
1058 | ||
1059 | if (!comp || !comp->ops) { | |
1060 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1061 | return -EINVAL; | |
1062 | } | |
1063 | ||
1064 | ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data); | |
1065 | if (ret < 0) | |
1066 | DRM_DEBUG_KMS("Get session key failed. %d\n", ret); | |
1067 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1068 | ||
1069 | return ret; | |
1070 | } | |
1071 | ||
d849178e | 1072 | static int |
9055aac7 R |
1073 | hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, |
1074 | struct hdcp2_rep_send_receiverid_list | |
1075 | *rep_topology, | |
1076 | struct hdcp2_rep_send_ack *rep_send_ack) | |
1077 | { | |
1078 | struct hdcp_port_data *data = &connector->hdcp.port_data; | |
1079 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
1080 | struct i915_hdcp_comp_master *comp; | |
1081 | int ret; | |
1082 | ||
1083 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
1084 | comp = dev_priv->hdcp_master; | |
1085 | ||
1086 | if (!comp || !comp->ops) { | |
1087 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1088 | return -EINVAL; | |
1089 | } | |
1090 | ||
1091 | ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data, | |
1092 | rep_topology, | |
1093 | rep_send_ack); | |
1094 | if (ret < 0) | |
1095 | DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret); | |
1096 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1097 | ||
1098 | return ret; | |
1099 | } | |
1100 | ||
d849178e | 1101 | static int |
9055aac7 R |
1102 | hdcp2_verify_mprime(struct intel_connector *connector, |
1103 | struct hdcp2_rep_stream_ready *stream_ready) | |
1104 | { | |
1105 | struct hdcp_port_data *data = &connector->hdcp.port_data; | |
1106 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
1107 | struct i915_hdcp_comp_master *comp; | |
1108 | int ret; | |
1109 | ||
1110 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
1111 | comp = dev_priv->hdcp_master; | |
1112 | ||
1113 | if (!comp || !comp->ops) { | |
1114 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1115 | return -EINVAL; | |
1116 | } | |
1117 | ||
1118 | ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready); | |
1119 | if (ret < 0) | |
1120 | DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret); | |
1121 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1122 | ||
1123 | return ret; | |
1124 | } | |
1125 | ||
bd90d7c7 | 1126 | static int hdcp2_authenticate_port(struct intel_connector *connector) |
9055aac7 R |
1127 | { |
1128 | struct hdcp_port_data *data = &connector->hdcp.port_data; | |
1129 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
1130 | struct i915_hdcp_comp_master *comp; | |
1131 | int ret; | |
1132 | ||
1133 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
1134 | comp = dev_priv->hdcp_master; | |
1135 | ||
1136 | if (!comp || !comp->ops) { | |
1137 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1138 | return -EINVAL; | |
1139 | } | |
1140 | ||
1141 | ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data); | |
1142 | if (ret < 0) | |
1143 | DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret); | |
1144 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1145 | ||
1146 | return ret; | |
1147 | } | |
1148 | ||
49a630b0 | 1149 | static int hdcp2_close_mei_session(struct intel_connector *connector) |
9055aac7 R |
1150 | { |
1151 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
1152 | struct i915_hdcp_comp_master *comp; | |
1153 | int ret; | |
1154 | ||
1155 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
1156 | comp = dev_priv->hdcp_master; | |
1157 | ||
1158 | if (!comp || !comp->ops) { | |
1159 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1160 | return -EINVAL; | |
1161 | } | |
1162 | ||
1163 | ret = comp->ops->close_hdcp_session(comp->mei_dev, | |
1164 | &connector->hdcp.port_data); | |
1165 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1166 | ||
1167 | return ret; | |
1168 | } | |
1169 | ||
49a630b0 | 1170 | static int hdcp2_deauthenticate_port(struct intel_connector *connector) |
9055aac7 R |
1171 | { |
1172 | return hdcp2_close_mei_session(connector); | |
1173 | } | |
1174 | ||
bd90d7c7 R |
1175 | /* Authentication flow starts from here */ |
1176 | static int hdcp2_authentication_key_exchange(struct intel_connector *connector) | |
1177 | { | |
1178 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | |
1179 | struct intel_hdcp *hdcp = &connector->hdcp; | |
f26ae6a6 | 1180 | struct drm_device *dev = connector->base.dev; |
bd90d7c7 R |
1181 | union { |
1182 | struct hdcp2_ake_init ake_init; | |
1183 | struct hdcp2_ake_send_cert send_cert; | |
1184 | struct hdcp2_ake_no_stored_km no_stored_km; | |
1185 | struct hdcp2_ake_send_hprime send_hprime; | |
1186 | struct hdcp2_ake_send_pairing_info pairing_info; | |
1187 | } msgs; | |
1188 | const struct intel_hdcp_shim *shim = hdcp->shim; | |
1189 | size_t size; | |
1190 | int ret; | |
1191 | ||
1192 | /* Init for seq_num */ | |
1193 | hdcp->seq_num_v = 0; | |
1194 | hdcp->seq_num_m = 0; | |
1195 | ||
1196 | ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init); | |
1197 | if (ret < 0) | |
1198 | return ret; | |
1199 | ||
1200 | ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init, | |
1201 | sizeof(msgs.ake_init)); | |
1202 | if (ret < 0) | |
1203 | return ret; | |
1204 | ||
1205 | ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT, | |
1206 | &msgs.send_cert, sizeof(msgs.send_cert)); | |
1207 | if (ret < 0) | |
1208 | return ret; | |
1209 | ||
1210 | if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) | |
1211 | return -EINVAL; | |
1212 | ||
1213 | hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); | |
1214 | ||
f26ae6a6 R |
1215 | if (drm_hdcp_check_ksvs_revoked(dev, msgs.send_cert.cert_rx.receiver_id, |
1216 | 1)) { | |
1217 | DRM_ERROR("Receiver ID is revoked\n"); | |
1218 | return -EPERM; | |
1219 | } | |
1220 | ||
bd90d7c7 R |
1221 | /* |
1222 | * Here msgs.no_stored_km will hold msgs corresponding to the km | |
1223 | * stored also. | |
1224 | */ | |
1225 | ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert, | |
1226 | &hdcp->is_paired, | |
1227 | &msgs.no_stored_km, &size); | |
1228 | if (ret < 0) | |
1229 | return ret; | |
1230 | ||
1231 | ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size); | |
1232 | if (ret < 0) | |
1233 | return ret; | |
1234 | ||
1235 | ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME, | |
1236 | &msgs.send_hprime, sizeof(msgs.send_hprime)); | |
1237 | if (ret < 0) | |
1238 | return ret; | |
1239 | ||
1240 | ret = hdcp2_verify_hprime(connector, &msgs.send_hprime); | |
1241 | if (ret < 0) | |
1242 | return ret; | |
1243 | ||
1244 | if (!hdcp->is_paired) { | |
1245 | /* Pairing is required */ | |
1246 | ret = shim->read_2_2_msg(intel_dig_port, | |
1247 | HDCP_2_2_AKE_SEND_PAIRING_INFO, | |
1248 | &msgs.pairing_info, | |
1249 | sizeof(msgs.pairing_info)); | |
1250 | if (ret < 0) | |
1251 | return ret; | |
1252 | ||
1253 | ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info); | |
1254 | if (ret < 0) | |
1255 | return ret; | |
1256 | hdcp->is_paired = true; | |
1257 | } | |
1258 | ||
1259 | return 0; | |
1260 | } | |
1261 | ||
1262 | static int hdcp2_locality_check(struct intel_connector *connector) | |
1263 | { | |
1264 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | |
1265 | struct intel_hdcp *hdcp = &connector->hdcp; | |
1266 | union { | |
1267 | struct hdcp2_lc_init lc_init; | |
1268 | struct hdcp2_lc_send_lprime send_lprime; | |
1269 | } msgs; | |
1270 | const struct intel_hdcp_shim *shim = hdcp->shim; | |
1271 | int tries = HDCP2_LC_RETRY_CNT, ret, i; | |
1272 | ||
1273 | for (i = 0; i < tries; i++) { | |
1274 | ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init); | |
1275 | if (ret < 0) | |
1276 | continue; | |
1277 | ||
1278 | ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init, | |
1279 | sizeof(msgs.lc_init)); | |
1280 | if (ret < 0) | |
1281 | continue; | |
1282 | ||
1283 | ret = shim->read_2_2_msg(intel_dig_port, | |
1284 | HDCP_2_2_LC_SEND_LPRIME, | |
1285 | &msgs.send_lprime, | |
1286 | sizeof(msgs.send_lprime)); | |
1287 | if (ret < 0) | |
1288 | continue; | |
1289 | ||
1290 | ret = hdcp2_verify_lprime(connector, &msgs.send_lprime); | |
1291 | if (!ret) | |
1292 | break; | |
1293 | } | |
1294 | ||
1295 | return ret; | |
1296 | } | |
1297 | ||
1298 | static int hdcp2_session_key_exchange(struct intel_connector *connector) | |
1299 | { | |
1300 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | |
1301 | struct intel_hdcp *hdcp = &connector->hdcp; | |
1302 | struct hdcp2_ske_send_eks send_eks; | |
1303 | int ret; | |
1304 | ||
1305 | ret = hdcp2_prepare_skey(connector, &send_eks); | |
1306 | if (ret < 0) | |
1307 | return ret; | |
1308 | ||
1309 | ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks, | |
1310 | sizeof(send_eks)); | |
1311 | if (ret < 0) | |
1312 | return ret; | |
1313 | ||
1314 | return 0; | |
1315 | } | |
1316 | ||
d849178e R |
1317 | static |
1318 | int hdcp2_propagate_stream_management_info(struct intel_connector *connector) | |
1319 | { | |
1320 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | |
1321 | struct intel_hdcp *hdcp = &connector->hdcp; | |
1322 | union { | |
1323 | struct hdcp2_rep_stream_manage stream_manage; | |
1324 | struct hdcp2_rep_stream_ready stream_ready; | |
1325 | } msgs; | |
1326 | const struct intel_hdcp_shim *shim = hdcp->shim; | |
1327 | int ret; | |
1328 | ||
1329 | /* Prepare RepeaterAuth_Stream_Manage msg */ | |
1330 | msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE; | |
0de655ca | 1331 | drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m); |
d849178e R |
1332 | |
1333 | /* K no of streams is fixed as 1. Stored as big-endian. */ | |
1334 | msgs.stream_manage.k = cpu_to_be16(1); | |
1335 | ||
1336 | /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */ | |
1337 | msgs.stream_manage.streams[0].stream_id = 0; | |
1338 | msgs.stream_manage.streams[0].stream_type = hdcp->content_type; | |
1339 | ||
1340 | /* Send it to Repeater */ | |
1341 | ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage, | |
1342 | sizeof(msgs.stream_manage)); | |
1343 | if (ret < 0) | |
1344 | return ret; | |
1345 | ||
1346 | ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY, | |
1347 | &msgs.stream_ready, sizeof(msgs.stream_ready)); | |
1348 | if (ret < 0) | |
1349 | return ret; | |
1350 | ||
1351 | hdcp->port_data.seq_num_m = hdcp->seq_num_m; | |
1352 | hdcp->port_data.streams[0].stream_type = hdcp->content_type; | |
1353 | ||
1354 | ret = hdcp2_verify_mprime(connector, &msgs.stream_ready); | |
1355 | if (ret < 0) | |
1356 | return ret; | |
1357 | ||
1358 | hdcp->seq_num_m++; | |
1359 | ||
1360 | if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { | |
1361 | DRM_DEBUG_KMS("seq_num_m roll over.\n"); | |
1362 | return -1; | |
1363 | } | |
1364 | ||
1365 | return 0; | |
1366 | } | |
1367 | ||
1368 | static | |
1369 | int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) | |
1370 | { | |
1371 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | |
1372 | struct intel_hdcp *hdcp = &connector->hdcp; | |
f26ae6a6 | 1373 | struct drm_device *dev = connector->base.dev; |
d849178e R |
1374 | union { |
1375 | struct hdcp2_rep_send_receiverid_list recvid_list; | |
1376 | struct hdcp2_rep_send_ack rep_ack; | |
1377 | } msgs; | |
1378 | const struct intel_hdcp_shim *shim = hdcp->shim; | |
f26ae6a6 | 1379 | u32 seq_num_v, device_cnt; |
d849178e | 1380 | u8 *rx_info; |
d849178e R |
1381 | int ret; |
1382 | ||
1383 | ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST, | |
1384 | &msgs.recvid_list, sizeof(msgs.recvid_list)); | |
1385 | if (ret < 0) | |
1386 | return ret; | |
1387 | ||
1388 | rx_info = msgs.recvid_list.rx_info; | |
1389 | ||
1390 | if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || | |
1391 | HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { | |
1392 | DRM_DEBUG_KMS("Topology Max Size Exceeded\n"); | |
1393 | return -EINVAL; | |
1394 | } | |
1395 | ||
1396 | /* Converting and Storing the seq_num_v to local variable as DWORD */ | |
0de655ca R |
1397 | seq_num_v = |
1398 | drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); | |
d849178e R |
1399 | |
1400 | if (seq_num_v < hdcp->seq_num_v) { | |
1401 | /* Roll over of the seq_num_v from repeater. Reauthenticate. */ | |
1402 | DRM_DEBUG_KMS("Seq_num_v roll over.\n"); | |
1403 | return -EINVAL; | |
1404 | } | |
1405 | ||
af461ff3 CW |
1406 | device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | |
1407 | HDCP_2_2_DEV_COUNT_LO(rx_info[1])); | |
f26ae6a6 R |
1408 | if (drm_hdcp_check_ksvs_revoked(dev, msgs.recvid_list.receiver_ids, |
1409 | device_cnt)) { | |
1410 | DRM_ERROR("Revoked receiver ID(s) is in list\n"); | |
1411 | return -EPERM; | |
1412 | } | |
1413 | ||
d849178e R |
1414 | ret = hdcp2_verify_rep_topology_prepare_ack(connector, |
1415 | &msgs.recvid_list, | |
1416 | &msgs.rep_ack); | |
1417 | if (ret < 0) | |
1418 | return ret; | |
1419 | ||
1420 | hdcp->seq_num_v = seq_num_v; | |
1421 | ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack, | |
1422 | sizeof(msgs.rep_ack)); | |
1423 | if (ret < 0) | |
1424 | return ret; | |
1425 | ||
1426 | return 0; | |
1427 | } | |
1428 | ||
1429 | static int hdcp2_authenticate_repeater(struct intel_connector *connector) | |
1430 | { | |
1431 | int ret; | |
1432 | ||
1433 | ret = hdcp2_authenticate_repeater_topology(connector); | |
1434 | if (ret < 0) | |
1435 | return ret; | |
1436 | ||
1437 | return hdcp2_propagate_stream_management_info(connector); | |
1438 | } | |
1439 | ||
49a630b0 R |
1440 | static int hdcp2_authenticate_sink(struct intel_connector *connector) |
1441 | { | |
bd90d7c7 R |
1442 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); |
1443 | struct intel_hdcp *hdcp = &connector->hdcp; | |
1444 | const struct intel_hdcp_shim *shim = hdcp->shim; | |
1445 | int ret; | |
49a630b0 | 1446 | |
bd90d7c7 R |
1447 | ret = hdcp2_authentication_key_exchange(connector); |
1448 | if (ret < 0) { | |
1449 | DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret); | |
1450 | return ret; | |
1451 | } | |
1452 | ||
1453 | ret = hdcp2_locality_check(connector); | |
1454 | if (ret < 0) { | |
1455 | DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret); | |
1456 | return ret; | |
1457 | } | |
1458 | ||
1459 | ret = hdcp2_session_key_exchange(connector); | |
1460 | if (ret < 0) { | |
1461 | DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret); | |
1462 | return ret; | |
1463 | } | |
1464 | ||
1465 | if (shim->config_stream_type) { | |
1466 | ret = shim->config_stream_type(intel_dig_port, | |
1467 | hdcp->is_repeater, | |
1468 | hdcp->content_type); | |
1469 | if (ret < 0) | |
1470 | return ret; | |
1471 | } | |
1472 | ||
d849178e R |
1473 | if (hdcp->is_repeater) { |
1474 | ret = hdcp2_authenticate_repeater(connector); | |
1475 | if (ret < 0) { | |
1476 | DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret); | |
1477 | return ret; | |
1478 | } | |
1479 | } | |
1480 | ||
bd90d7c7 R |
1481 | hdcp->port_data.streams[0].stream_type = hdcp->content_type; |
1482 | ret = hdcp2_authenticate_port(connector); | |
1483 | if (ret < 0) | |
1484 | return ret; | |
1485 | ||
1486 | return ret; | |
49a630b0 R |
1487 | } |
1488 | ||
1489 | static int hdcp2_enable_encryption(struct intel_connector *connector) | |
1490 | { | |
1491 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | |
1492 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
1493 | struct intel_hdcp *hdcp = &connector->hdcp; | |
1494 | enum port port = connector->encoder->port; | |
1495 | int ret; | |
1496 | ||
1497 | WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS); | |
1498 | ||
1499 | if (hdcp->shim->toggle_signalling) { | |
1500 | ret = hdcp->shim->toggle_signalling(intel_dig_port, true); | |
1501 | if (ret) { | |
1502 | DRM_ERROR("Failed to enable HDCP signalling. %d\n", | |
1503 | ret); | |
1504 | return ret; | |
1505 | } | |
1506 | } | |
1507 | ||
1508 | if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) { | |
1509 | /* Link is Authenticated. Now set for Encryption */ | |
1510 | I915_WRITE(HDCP2_CTL_DDI(port), | |
1511 | I915_READ(HDCP2_CTL_DDI(port)) | | |
1512 | CTL_LINK_ENCRYPTION_REQ); | |
1513 | } | |
1514 | ||
97a04e0d | 1515 | ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port), |
49a630b0 R |
1516 | LINK_ENCRYPTION_STATUS, |
1517 | LINK_ENCRYPTION_STATUS, | |
1518 | ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); | |
1519 | ||
1520 | return ret; | |
1521 | } | |
1522 | ||
1523 | static int hdcp2_disable_encryption(struct intel_connector *connector) | |
1524 | { | |
1525 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | |
1526 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
1527 | struct intel_hdcp *hdcp = &connector->hdcp; | |
1528 | enum port port = connector->encoder->port; | |
1529 | int ret; | |
1530 | ||
1531 | WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS)); | |
1532 | ||
1533 | I915_WRITE(HDCP2_CTL_DDI(port), | |
1534 | I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ); | |
1535 | ||
97a04e0d | 1536 | ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port), |
49a630b0 R |
1537 | LINK_ENCRYPTION_STATUS, 0x0, |
1538 | ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); | |
1539 | if (ret == -ETIMEDOUT) | |
1540 | DRM_DEBUG_KMS("Disable Encryption Timedout"); | |
1541 | ||
1542 | if (hdcp->shim->toggle_signalling) { | |
1543 | ret = hdcp->shim->toggle_signalling(intel_dig_port, false); | |
1544 | if (ret) { | |
1545 | DRM_ERROR("Failed to disable HDCP signalling. %d\n", | |
1546 | ret); | |
1547 | return ret; | |
1548 | } | |
1549 | } | |
1550 | ||
1551 | return ret; | |
1552 | } | |
1553 | ||
1554 | static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) | |
1555 | { | |
1556 | int ret, i, tries = 3; | |
1557 | ||
1558 | for (i = 0; i < tries; i++) { | |
1559 | ret = hdcp2_authenticate_sink(connector); | |
1560 | if (!ret) | |
1561 | break; | |
1562 | ||
1563 | /* Clearing the mei hdcp session */ | |
1564 | DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n", | |
1565 | i + 1, tries, ret); | |
1566 | if (hdcp2_deauthenticate_port(connector) < 0) | |
1567 | DRM_DEBUG_KMS("Port deauth failed.\n"); | |
1568 | } | |
1569 | ||
1570 | if (i != tries) { | |
1571 | /* | |
1572 | * Ensuring the required 200mSec min time interval between | |
1573 | * Session Key Exchange and encryption. | |
1574 | */ | |
1575 | msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); | |
1576 | ret = hdcp2_enable_encryption(connector); | |
1577 | if (ret < 0) { | |
1578 | DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret); | |
1579 | if (hdcp2_deauthenticate_port(connector) < 0) | |
1580 | DRM_DEBUG_KMS("Port deauth failed.\n"); | |
1581 | } | |
1582 | } | |
1583 | ||
1584 | return ret; | |
1585 | } | |
1586 | ||
1587 | static int _intel_hdcp2_enable(struct intel_connector *connector) | |
1588 | { | |
1589 | struct intel_hdcp *hdcp = &connector->hdcp; | |
1590 | int ret; | |
1591 | ||
1592 | DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n", | |
1593 | connector->base.name, connector->base.base.id, | |
1594 | hdcp->content_type); | |
1595 | ||
1596 | ret = hdcp2_authenticate_and_encrypt(connector); | |
1597 | if (ret) { | |
1598 | DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n", | |
1599 | hdcp->content_type, ret); | |
1600 | return ret; | |
1601 | } | |
1602 | ||
1603 | DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n", | |
1604 | connector->base.name, connector->base.base.id, | |
1605 | hdcp->content_type); | |
1606 | ||
1607 | hdcp->hdcp2_encrypted = true; | |
1608 | return 0; | |
1609 | } | |
1610 | ||
1611 | static int _intel_hdcp2_disable(struct intel_connector *connector) | |
1612 | { | |
1613 | int ret; | |
1614 | ||
1615 | DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n", | |
1616 | connector->base.name, connector->base.base.id); | |
1617 | ||
1618 | ret = hdcp2_disable_encryption(connector); | |
1619 | ||
1620 | if (hdcp2_deauthenticate_port(connector) < 0) | |
1621 | DRM_DEBUG_KMS("Port deauth failed.\n"); | |
1622 | ||
1623 | connector->hdcp.hdcp2_encrypted = false; | |
1624 | ||
1625 | return ret; | |
1626 | } | |
1627 | ||
22ce2d94 R |
1628 | /* Implements the Link Integrity Check for HDCP2.2 */ |
1629 | static int intel_hdcp2_check_link(struct intel_connector *connector) | |
1630 | { | |
1631 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | |
1632 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | |
1633 | struct intel_hdcp *hdcp = &connector->hdcp; | |
1634 | enum port port = connector->encoder->port; | |
1635 | int ret = 0; | |
1636 | ||
1637 | mutex_lock(&hdcp->mutex); | |
1638 | ||
1639 | /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */ | |
1640 | if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || | |
1641 | !hdcp->hdcp2_encrypted) { | |
1642 | ret = -EINVAL; | |
1643 | goto out; | |
1644 | } | |
1645 | ||
1646 | if (WARN_ON(!intel_hdcp2_in_use(connector))) { | |
1647 | DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n", | |
1648 | I915_READ(HDCP2_STATUS_DDI(port))); | |
1649 | ret = -ENXIO; | |
1650 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; | |
1651 | schedule_work(&hdcp->prop_work); | |
1652 | goto out; | |
1653 | } | |
1654 | ||
1655 | ret = hdcp->shim->check_2_2_link(intel_dig_port); | |
1656 | if (ret == HDCP_LINK_PROTECTED) { | |
1657 | if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { | |
1658 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; | |
1659 | schedule_work(&hdcp->prop_work); | |
1660 | } | |
1661 | goto out; | |
1662 | } | |
1663 | ||
dfe4cbc2 R |
1664 | if (ret == HDCP_TOPOLOGY_CHANGE) { |
1665 | if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) | |
1666 | goto out; | |
1667 | ||
1668 | DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n"); | |
1669 | ret = hdcp2_authenticate_repeater_topology(connector); | |
1670 | if (!ret) { | |
1671 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; | |
1672 | schedule_work(&hdcp->prop_work); | |
1673 | goto out; | |
1674 | } | |
1675 | DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n", | |
1676 | connector->base.name, connector->base.base.id, | |
1677 | ret); | |
1678 | } else { | |
1679 | DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n", | |
1680 | connector->base.name, connector->base.base.id); | |
1681 | } | |
22ce2d94 R |
1682 | |
1683 | ret = _intel_hdcp2_disable(connector); | |
1684 | if (ret) { | |
1685 | DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n", | |
1686 | connector->base.name, connector->base.base.id, ret); | |
1687 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; | |
1688 | schedule_work(&hdcp->prop_work); | |
1689 | goto out; | |
1690 | } | |
1691 | ||
1692 | ret = _intel_hdcp2_enable(connector); | |
1693 | if (ret) { | |
1694 | DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n", | |
1695 | connector->base.name, connector->base.base.id, | |
1696 | ret); | |
1697 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; | |
1698 | schedule_work(&hdcp->prop_work); | |
1699 | goto out; | |
1700 | } | |
1701 | ||
1702 | out: | |
1703 | mutex_unlock(&hdcp->mutex); | |
1704 | return ret; | |
1705 | } | |
1706 | ||
09d56393 R |
1707 | static void intel_hdcp_check_work(struct work_struct *work) |
1708 | { | |
1709 | struct intel_hdcp *hdcp = container_of(to_delayed_work(work), | |
1710 | struct intel_hdcp, | |
1711 | check_work); | |
1712 | struct intel_connector *connector = intel_hdcp_to_connector(hdcp); | |
1713 | ||
22ce2d94 R |
1714 | if (!intel_hdcp2_check_link(connector)) |
1715 | schedule_delayed_work(&hdcp->check_work, | |
1716 | DRM_HDCP2_CHECK_PERIOD_MS); | |
1717 | else if (!intel_hdcp_check_link(connector)) | |
09d56393 R |
1718 | schedule_delayed_work(&hdcp->check_work, |
1719 | DRM_HDCP_CHECK_PERIOD_MS); | |
1720 | } | |
1721 | ||
9055aac7 R |
1722 | static int i915_hdcp_component_bind(struct device *i915_kdev, |
1723 | struct device *mei_kdev, void *data) | |
1724 | { | |
1725 | struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); | |
1726 | ||
1727 | DRM_DEBUG("I915 HDCP comp bind\n"); | |
1728 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
1729 | dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data; | |
1730 | dev_priv->hdcp_master->mei_dev = mei_kdev; | |
1731 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1732 | ||
1733 | return 0; | |
1734 | } | |
1735 | ||
1736 | static void i915_hdcp_component_unbind(struct device *i915_kdev, | |
1737 | struct device *mei_kdev, void *data) | |
1738 | { | |
1739 | struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); | |
1740 | ||
1741 | DRM_DEBUG("I915 HDCP comp unbind\n"); | |
1742 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
1743 | dev_priv->hdcp_master = NULL; | |
1744 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1745 | } | |
1746 | ||
1747 | static const struct component_ops i915_hdcp_component_ops = { | |
1748 | .bind = i915_hdcp_component_bind, | |
1749 | .unbind = i915_hdcp_component_unbind, | |
1750 | }; | |
1751 | ||
1752 | static inline int initialize_hdcp_port_data(struct intel_connector *connector) | |
1753 | { | |
1754 | struct intel_hdcp *hdcp = &connector->hdcp; | |
1755 | struct hdcp_port_data *data = &hdcp->port_data; | |
1756 | ||
1757 | data->port = connector->encoder->port; | |
1758 | data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; | |
1759 | data->protocol = (u8)hdcp->shim->protocol; | |
1760 | ||
1761 | data->k = 1; | |
1762 | if (!data->streams) | |
1763 | data->streams = kcalloc(data->k, | |
1764 | sizeof(struct hdcp2_streamid_type), | |
1765 | GFP_KERNEL); | |
1766 | if (!data->streams) { | |
1767 | DRM_ERROR("Out of Memory\n"); | |
1768 | return -ENOMEM; | |
1769 | } | |
1770 | ||
1771 | data->streams[0].stream_id = 0; | |
1772 | data->streams[0].stream_type = hdcp->content_type; | |
1773 | ||
1774 | return 0; | |
1775 | } | |
1776 | ||
04707f97 R |
1777 | static bool is_hdcp2_supported(struct drm_i915_private *dev_priv) |
1778 | { | |
1779 | if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) | |
1780 | return false; | |
1781 | ||
1782 | return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) || | |
1783 | IS_KABYLAKE(dev_priv)); | |
1784 | } | |
1785 | ||
9055aac7 R |
1786 | void intel_hdcp_component_init(struct drm_i915_private *dev_priv) |
1787 | { | |
1788 | int ret; | |
1789 | ||
1790 | if (!is_hdcp2_supported(dev_priv)) | |
1791 | return; | |
1792 | ||
1793 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
1794 | WARN_ON(dev_priv->hdcp_comp_added); | |
1795 | ||
1796 | dev_priv->hdcp_comp_added = true; | |
1797 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1798 | ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops, | |
1799 | I915_COMPONENT_HDCP); | |
1800 | if (ret < 0) { | |
1801 | DRM_DEBUG_KMS("Failed at component add(%d)\n", ret); | |
1802 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
1803 | dev_priv->hdcp_comp_added = false; | |
1804 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1805 | return; | |
1806 | } | |
1807 | } | |
1808 | ||
04707f97 R |
1809 | static void intel_hdcp2_init(struct intel_connector *connector) |
1810 | { | |
1811 | struct intel_hdcp *hdcp = &connector->hdcp; | |
9055aac7 R |
1812 | int ret; |
1813 | ||
1814 | ret = initialize_hdcp_port_data(connector); | |
1815 | if (ret) { | |
1816 | DRM_DEBUG_KMS("Mei hdcp data init failed\n"); | |
1817 | return; | |
1818 | } | |
04707f97 | 1819 | |
04707f97 R |
1820 | hdcp->hdcp2_supported = true; |
1821 | } | |
1822 | ||
ee5e5e7a | 1823 | int intel_hdcp_init(struct intel_connector *connector, |
d3dacc70 | 1824 | const struct intel_hdcp_shim *shim) |
ee5e5e7a | 1825 | { |
04707f97 | 1826 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
d3dacc70 | 1827 | struct intel_hdcp *hdcp = &connector->hdcp; |
ee5e5e7a SP |
1828 | int ret; |
1829 | ||
04707f97 R |
1830 | if (!shim) |
1831 | return -EINVAL; | |
1832 | ||
1833 | ret = drm_connector_attach_content_protection_property(&connector->base); | |
ee5e5e7a SP |
1834 | if (ret) |
1835 | return ret; | |
1836 | ||
d3dacc70 R |
1837 | hdcp->shim = shim; |
1838 | mutex_init(&hdcp->mutex); | |
1839 | INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work); | |
1840 | INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work); | |
04707f97 R |
1841 | |
1842 | if (is_hdcp2_supported(dev_priv)) | |
1843 | intel_hdcp2_init(connector); | |
cf9cb35f | 1844 | init_waitqueue_head(&hdcp->cp_irq_queue); |
04707f97 | 1845 | |
ee5e5e7a SP |
1846 | return 0; |
1847 | } | |
1848 | ||
1849 | int intel_hdcp_enable(struct intel_connector *connector) | |
1850 | { | |
d3dacc70 | 1851 | struct intel_hdcp *hdcp = &connector->hdcp; |
22ce2d94 | 1852 | unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; |
49a630b0 | 1853 | int ret = -EINVAL; |
ee5e5e7a | 1854 | |
d3dacc70 | 1855 | if (!hdcp->shim) |
ee5e5e7a SP |
1856 | return -ENOENT; |
1857 | ||
d3dacc70 | 1858 | mutex_lock(&hdcp->mutex); |
49a630b0 | 1859 | WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); |
ee5e5e7a | 1860 | |
49a630b0 R |
1861 | /* |
1862 | * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup | |
1863 | * is capable of HDCP2.2, it is preferred to use HDCP2.2. | |
1864 | */ | |
22ce2d94 | 1865 | if (intel_hdcp2_capable(connector)) { |
49a630b0 | 1866 | ret = _intel_hdcp2_enable(connector); |
22ce2d94 R |
1867 | if (!ret) |
1868 | check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS; | |
1869 | } | |
49a630b0 R |
1870 | |
1871 | /* When HDCP2.2 fails, HDCP1.4 will be attempted */ | |
1872 | if (ret && intel_hdcp_capable(connector)) { | |
1873 | ret = _intel_hdcp_enable(connector); | |
49a630b0 R |
1874 | } |
1875 | ||
1876 | if (!ret) { | |
22ce2d94 | 1877 | schedule_delayed_work(&hdcp->check_work, check_link_interval); |
49a630b0 R |
1878 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; |
1879 | schedule_work(&hdcp->prop_work); | |
1880 | } | |
ee5e5e7a | 1881 | |
d3dacc70 | 1882 | mutex_unlock(&hdcp->mutex); |
ee5e5e7a SP |
1883 | return ret; |
1884 | } | |
1885 | ||
1886 | int intel_hdcp_disable(struct intel_connector *connector) | |
1887 | { | |
d3dacc70 | 1888 | struct intel_hdcp *hdcp = &connector->hdcp; |
01468d6c | 1889 | int ret = 0; |
ee5e5e7a | 1890 | |
d3dacc70 | 1891 | if (!hdcp->shim) |
ee5e5e7a SP |
1892 | return -ENOENT; |
1893 | ||
d3dacc70 | 1894 | mutex_lock(&hdcp->mutex); |
ee5e5e7a | 1895 | |
d3dacc70 R |
1896 | if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { |
1897 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; | |
49a630b0 R |
1898 | if (hdcp->hdcp2_encrypted) |
1899 | ret = _intel_hdcp2_disable(connector); | |
1900 | else if (hdcp->hdcp_encrypted) | |
09d56393 | 1901 | ret = _intel_hdcp_disable(connector); |
01468d6c | 1902 | } |
ee5e5e7a | 1903 | |
d3dacc70 R |
1904 | mutex_unlock(&hdcp->mutex); |
1905 | cancel_delayed_work_sync(&hdcp->check_work); | |
ee5e5e7a SP |
1906 | return ret; |
1907 | } | |
1908 | ||
9055aac7 R |
1909 | void intel_hdcp_component_fini(struct drm_i915_private *dev_priv) |
1910 | { | |
1911 | mutex_lock(&dev_priv->hdcp_comp_mutex); | |
1912 | if (!dev_priv->hdcp_comp_added) { | |
1913 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1914 | return; | |
1915 | } | |
1916 | ||
1917 | dev_priv->hdcp_comp_added = false; | |
1918 | mutex_unlock(&dev_priv->hdcp_comp_mutex); | |
1919 | ||
1920 | component_del(dev_priv->drm.dev, &i915_hdcp_component_ops); | |
1921 | } | |
1922 | ||
1923 | void intel_hdcp_cleanup(struct intel_connector *connector) | |
1924 | { | |
1925 | if (!connector->hdcp.shim) | |
1926 | return; | |
1927 | ||
1928 | mutex_lock(&connector->hdcp.mutex); | |
1929 | kfree(connector->hdcp.port_data.streams); | |
1930 | mutex_unlock(&connector->hdcp.mutex); | |
1931 | } | |
1932 | ||
ee5e5e7a SP |
1933 | void intel_hdcp_atomic_check(struct drm_connector *connector, |
1934 | struct drm_connector_state *old_state, | |
1935 | struct drm_connector_state *new_state) | |
1936 | { | |
739f3abd JN |
1937 | u64 old_cp = old_state->content_protection; |
1938 | u64 new_cp = new_state->content_protection; | |
ee5e5e7a SP |
1939 | struct drm_crtc_state *crtc_state; |
1940 | ||
1941 | if (!new_state->crtc) { | |
1942 | /* | |
1943 | * If the connector is being disabled with CP enabled, mark it | |
1944 | * desired so it's re-enabled when the connector is brought back | |
1945 | */ | |
1946 | if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) | |
1947 | new_state->content_protection = | |
1948 | DRM_MODE_CONTENT_PROTECTION_DESIRED; | |
1949 | return; | |
1950 | } | |
1951 | ||
1952 | /* | |
1953 | * Nothing to do if the state didn't change, or HDCP was activated since | |
1954 | * the last commit | |
1955 | */ | |
1956 | if (old_cp == new_cp || | |
1957 | (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && | |
1958 | new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) | |
1959 | return; | |
1960 | ||
1961 | crtc_state = drm_atomic_get_new_crtc_state(new_state->state, | |
1962 | new_state->crtc); | |
1963 | crtc_state->mode_changed = true; | |
1964 | } | |
09d56393 R |
1965 | |
1966 | /* Handles the CP_IRQ raised from the DP HDCP sink */ | |
1967 | void intel_hdcp_handle_cp_irq(struct intel_connector *connector) | |
1968 | { | |
1969 | struct intel_hdcp *hdcp = &connector->hdcp; | |
1970 | ||
1971 | if (!hdcp->shim) | |
1972 | return; | |
1973 | ||
cf9cb35f R |
1974 | atomic_inc(&connector->hdcp.cp_irq_count); |
1975 | wake_up_all(&connector->hdcp.cp_irq_queue); | |
1976 | ||
09d56393 R |
1977 | schedule_delayed_work(&hdcp->check_work, 0); |
1978 | } |