drm/i915: extract intel_sdvo.h from intel_drv.h
[linux-block.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
2d1a8a48 28#include <linux/export.h>
331c201a 29#include <linux/i2c.h>
01527b31
CT
30#include <linux/notifier.h>
31#include <linux/reboot.h>
331c201a
JN
32#include <linux/slab.h>
33#include <linux/types.h>
611032bf 34#include <asm/byteorder.h>
331c201a 35
c6f95f27 36#include <drm/drm_atomic_helper.h>
760285e7 37#include <drm/drm_crtc.h>
20f24d77 38#include <drm/drm_dp_helper.h>
760285e7 39#include <drm/drm_edid.h>
20f24d77 40#include <drm/drm_hdcp.h>
fcd70cd3 41#include <drm/drm_probe_helper.h>
760285e7 42#include <drm/i915_drm.h>
331c201a 43
a4fc5ed6 44#include "i915_drv.h"
331c201a 45#include "intel_audio.h"
ec7f29ff 46#include "intel_connector.h"
fdc24cf3 47#include "intel_ddi.h"
331c201a 48#include "intel_drv.h"
f3e18947 49#include "intel_lspcon.h"
55367a27 50#include "intel_psr.h"
a4fc5ed6 51
e8b2577c 52#define DP_DPRX_ESI_LEN 14
a4fc5ed6 53
d9218c8f
MN
54/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
55#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
a4a15777
MN
56#define DP_DSC_MIN_SUPPORTED_BPC 8
57#define DP_DSC_MAX_SUPPORTED_BPC 10
d9218c8f
MN
58
59/* DP DSC throughput values used for slice count calculations KPixels/s */
60#define DP_DSC_PEAK_PIXEL_RATE 2720000
61#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
62#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
63
64/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
65#define DP_DSC_FEC_OVERHEAD_FACTOR 976
66
559be30c
TP
67/* Compliance test status bits */
68#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
69#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
70#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
71#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
72
9dd4ffdf 73struct dp_link_dpll {
840b32b7 74 int clock;
9dd4ffdf
CML
75 struct dpll dpll;
76};
77
45101e93 78static const struct dp_link_dpll g4x_dpll[] = {
840b32b7 79 { 162000,
9dd4ffdf 80 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 81 { 270000,
9dd4ffdf
CML
82 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
83};
84
85static const struct dp_link_dpll pch_dpll[] = {
840b32b7 86 { 162000,
9dd4ffdf 87 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 88 { 270000,
9dd4ffdf
CML
89 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
90};
91
65ce4bf5 92static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 93 { 162000,
58f6e632 94 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 95 { 270000,
65ce4bf5
CML
96 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
97};
98
ef9348c8
CML
99/*
100 * CHV supports eDP 1.4 that have more link rates.
101 * Below only provides the fixed rate but exclude variable rate.
102 */
103static const struct dp_link_dpll chv_dpll[] = {
104 /*
105 * CHV requires to program fractional division for m2.
106 * m2 is stored in fixed point format using formula below
107 * (m2_int << 22) | m2_fraction
108 */
840b32b7 109 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 110 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 111 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 112 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
ef9348c8 113};
637a9c63 114
d9218c8f
MN
115/* Constants for DP DSC configurations */
116static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
117
118/* With Single pipe configuration, HW is capable of supporting maximum
119 * of 4 slices per line.
120 */
121static const u8 valid_dsc_slicecount[] = {1, 2, 4};
122
cfcb0fc9 123/**
1853a9da 124 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
cfcb0fc9
JB
125 * @intel_dp: DP struct
126 *
127 * If a CPU or PCH DP output is attached to an eDP panel, this function
128 * will return true, and false otherwise.
129 */
1853a9da 130bool intel_dp_is_edp(struct intel_dp *intel_dp)
cfcb0fc9 131{
da63a9f2
PZ
132 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
133
134 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
135}
136
df0e9248
CW
137static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
138{
fa90ecef 139 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
140}
141
adc10304
VS
142static void intel_dp_link_down(struct intel_encoder *encoder,
143 const struct intel_crtc_state *old_crtc_state);
1e0560e0 144static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 145static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
adc10304
VS
146static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
147 const struct intel_crtc_state *crtc_state);
46bd8383 148static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
a8c3344e 149 enum pipe pipe);
f21a2198 150static void intel_dp_unset_edid(struct intel_dp *intel_dp);
a4fc5ed6 151
68f357cb
JN
152/* update sink rates from dpcd */
153static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
154{
229675d5 155 static const int dp_rates[] = {
c71b53cc 156 162000, 270000, 540000, 810000
229675d5 157 };
a8a08886 158 int i, max_rate;
68f357cb 159
a8a08886 160 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
68f357cb 161
229675d5
JN
162 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
163 if (dp_rates[i] > max_rate)
a8a08886 164 break;
229675d5 165 intel_dp->sink_rates[i] = dp_rates[i];
a8a08886 166 }
68f357cb 167
a8a08886 168 intel_dp->num_sink_rates = i;
68f357cb
JN
169}
170
10ebb736
JN
171/* Get length of rates array potentially limited by max_rate. */
172static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
173{
174 int i;
175
176 /* Limit results by potentially reduced max rate */
177 for (i = 0; i < len; i++) {
178 if (rates[len - i - 1] <= max_rate)
179 return len - i;
180 }
181
182 return 0;
183}
184
185/* Get length of common rates array potentially limited by max_rate. */
186static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
187 int max_rate)
188{
189 return intel_dp_rate_limit_len(intel_dp->common_rates,
190 intel_dp->num_common_rates, max_rate);
191}
192
540b0b7f
JN
193/* Theoretical max between source and sink */
194static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
a4fc5ed6 195{
540b0b7f 196 return intel_dp->common_rates[intel_dp->num_common_rates - 1];
a4fc5ed6
KP
197}
198
db7295c2
AM
199static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
200{
201 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
202 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
203 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
204 u32 lane_info;
205
206 if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
207 return 4;
208
209 lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
210 DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
211 DP_LANE_ASSIGNMENT_SHIFT(tc_port);
212
213 switch (lane_info) {
214 default:
215 MISSING_CASE(lane_info);
216 case 1:
217 case 2:
218 case 4:
219 case 8:
220 return 1;
221 case 3:
222 case 12:
223 return 2;
224 case 15:
225 return 4;
226 }
227}
228
540b0b7f
JN
229/* Theoretical max between source and sink */
230static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
eeb6324d
PZ
231{
232 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
540b0b7f
JN
233 int source_max = intel_dig_port->max_lanes;
234 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
db7295c2 235 int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
eeb6324d 236
db7295c2 237 return min3(source_max, sink_max, fia_max);
eeb6324d
PZ
238}
239
3d65a735 240int intel_dp_max_lane_count(struct intel_dp *intel_dp)
540b0b7f
JN
241{
242 return intel_dp->max_link_lane_count;
243}
244
22a2c8e0 245int
c898261c 246intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 247{
fd81c44e
DP
248 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
249 return DIV_ROUND_UP(pixel_clock * bpp, 8);
a4fc5ed6
KP
250}
251
22a2c8e0 252int
fe27d53e
DA
253intel_dp_max_data_rate(int max_link_clock, int max_lanes)
254{
fd81c44e
DP
255 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
256 * link rate that is generally expressed in Gbps. Since, 8 bits of data
257 * is transmitted every LS_Clk per lane, there is no need to account for
258 * the channel encoding that is done in the PHY layer here.
259 */
260
261 return max_link_clock * max_lanes;
fe27d53e
DA
262}
263
70ec0645
MK
264static int
265intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
266{
267 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
268 struct intel_encoder *encoder = &intel_dig_port->base;
269 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
270 int max_dotclk = dev_priv->max_dotclk_freq;
271 int ds_max_dotclk;
272
273 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
274
275 if (type != DP_DS_PORT_TYPE_VGA)
276 return max_dotclk;
277
278 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
279 intel_dp->downstream_ports);
280
281 if (ds_max_dotclk != 0)
282 max_dotclk = min(max_dotclk, ds_max_dotclk);
283
284 return max_dotclk;
285}
286
4ba285d4 287static int cnl_max_source_rate(struct intel_dp *intel_dp)
53ddb3cd
RV
288{
289 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
290 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
291 enum port port = dig_port->base.port;
292
293 u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
294
295 /* Low voltage SKUs are limited to max of 5.4G */
296 if (voltage == VOLTAGE_INFO_0_85V)
4ba285d4 297 return 540000;
53ddb3cd
RV
298
299 /* For this SKU 8.1G is supported in all ports */
300 if (IS_CNL_WITH_PORT_F(dev_priv))
4ba285d4 301 return 810000;
53ddb3cd 302
3758d968 303 /* For other SKUs, max rate on ports A and D is 5.4G */
53ddb3cd 304 if (port == PORT_A || port == PORT_D)
4ba285d4 305 return 540000;
53ddb3cd 306
4ba285d4 307 return 810000;
53ddb3cd
RV
308}
309
46b527d1
MN
310static int icl_max_source_rate(struct intel_dp *intel_dp)
311{
312 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
b265a2a6 313 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
46b527d1
MN
314 enum port port = dig_port->base.port;
315
b265a2a6
CT
316 if (intel_port_is_combophy(dev_priv, port) &&
317 !intel_dp_is_edp(intel_dp))
46b527d1
MN
318 return 540000;
319
320 return 810000;
321}
322
55cfc580
JN
323static void
324intel_dp_set_source_rates(struct intel_dp *intel_dp)
40dba341 325{
229675d5
JN
326 /* The values must be in increasing order */
327 static const int cnl_rates[] = {
328 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
329 };
330 static const int bxt_rates[] = {
331 162000, 216000, 243000, 270000, 324000, 432000, 540000
332 };
333 static const int skl_rates[] = {
334 162000, 216000, 270000, 324000, 432000, 540000
335 };
336 static const int hsw_rates[] = {
337 162000, 270000, 540000
338 };
339 static const int g4x_rates[] = {
340 162000, 270000
341 };
40dba341
NM
342 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
343 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
99b91bda
JN
344 const struct ddi_vbt_port_info *info =
345 &dev_priv->vbt.ddi_port_info[dig_port->base.port];
55cfc580 346 const int *source_rates;
99b91bda 347 int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
40dba341 348
55cfc580
JN
349 /* This should only be done once */
350 WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
351
46b527d1 352 if (INTEL_GEN(dev_priv) >= 10) {
d907b665 353 source_rates = cnl_rates;
4ba285d4 354 size = ARRAY_SIZE(cnl_rates);
cf819eff 355 if (IS_GEN(dev_priv, 10))
46b527d1
MN
356 max_rate = cnl_max_source_rate(intel_dp);
357 else
358 max_rate = icl_max_source_rate(intel_dp);
ba1c06a5
MN
359 } else if (IS_GEN9_LP(dev_priv)) {
360 source_rates = bxt_rates;
361 size = ARRAY_SIZE(bxt_rates);
b976dc53 362 } else if (IS_GEN9_BC(dev_priv)) {
55cfc580 363 source_rates = skl_rates;
40dba341 364 size = ARRAY_SIZE(skl_rates);
fc603ca7
JN
365 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
366 IS_BROADWELL(dev_priv)) {
229675d5
JN
367 source_rates = hsw_rates;
368 size = ARRAY_SIZE(hsw_rates);
fc603ca7 369 } else {
229675d5
JN
370 source_rates = g4x_rates;
371 size = ARRAY_SIZE(g4x_rates);
40dba341
NM
372 }
373
99b91bda
JN
374 if (max_rate && vbt_max_rate)
375 max_rate = min(max_rate, vbt_max_rate);
376 else if (vbt_max_rate)
377 max_rate = vbt_max_rate;
378
4ba285d4
JN
379 if (max_rate)
380 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
381
55cfc580
JN
382 intel_dp->source_rates = source_rates;
383 intel_dp->num_source_rates = size;
40dba341
NM
384}
385
386static int intersect_rates(const int *source_rates, int source_len,
387 const int *sink_rates, int sink_len,
388 int *common_rates)
389{
390 int i = 0, j = 0, k = 0;
391
392 while (i < source_len && j < sink_len) {
393 if (source_rates[i] == sink_rates[j]) {
394 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
395 return k;
396 common_rates[k] = source_rates[i];
397 ++k;
398 ++i;
399 ++j;
400 } else if (source_rates[i] < sink_rates[j]) {
401 ++i;
402 } else {
403 ++j;
404 }
405 }
406 return k;
407}
408
8001b754
JN
409/* return index of rate in rates array, or -1 if not found */
410static int intel_dp_rate_index(const int *rates, int len, int rate)
411{
412 int i;
413
414 for (i = 0; i < len; i++)
415 if (rate == rates[i])
416 return i;
417
418 return -1;
419}
420
975ee5fc 421static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
40dba341 422{
975ee5fc 423 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
40dba341 424
975ee5fc
JN
425 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
426 intel_dp->num_source_rates,
427 intel_dp->sink_rates,
428 intel_dp->num_sink_rates,
429 intel_dp->common_rates);
430
431 /* Paranoia, there should always be something in common. */
432 if (WARN_ON(intel_dp->num_common_rates == 0)) {
229675d5 433 intel_dp->common_rates[0] = 162000;
975ee5fc
JN
434 intel_dp->num_common_rates = 1;
435 }
436}
437
1a92c70e 438static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
830de422 439 u8 lane_count)
14c562c0
MN
440{
441 /*
442 * FIXME: we need to synchronize the current link parameters with
443 * hardware readout. Currently fast link training doesn't work on
444 * boot-up.
445 */
1a92c70e
MN
446 if (link_rate == 0 ||
447 link_rate > intel_dp->max_link_rate)
14c562c0
MN
448 return false;
449
1a92c70e
MN
450 if (lane_count == 0 ||
451 lane_count > intel_dp_max_lane_count(intel_dp))
14c562c0
MN
452 return false;
453
454 return true;
455}
456
1e712535
MN
457static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
458 int link_rate,
830de422 459 u8 lane_count)
1e712535
MN
460{
461 const struct drm_display_mode *fixed_mode =
462 intel_dp->attached_connector->panel.fixed_mode;
463 int mode_rate, max_rate;
464
465 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
466 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
467 if (mode_rate > max_rate)
468 return false;
469
470 return true;
471}
472
fdb14d33 473int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
830de422 474 int link_rate, u8 lane_count)
fdb14d33 475{
b1810a74 476 int index;
fdb14d33 477
b1810a74
JN
478 index = intel_dp_rate_index(intel_dp->common_rates,
479 intel_dp->num_common_rates,
480 link_rate);
481 if (index > 0) {
1e712535
MN
482 if (intel_dp_is_edp(intel_dp) &&
483 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
484 intel_dp->common_rates[index - 1],
485 lane_count)) {
486 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
487 return 0;
488 }
e6c0c64a
JN
489 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
490 intel_dp->max_link_lane_count = lane_count;
fdb14d33 491 } else if (lane_count > 1) {
1e712535
MN
492 if (intel_dp_is_edp(intel_dp) &&
493 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
494 intel_dp_max_common_rate(intel_dp),
495 lane_count >> 1)) {
496 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
497 return 0;
498 }
540b0b7f 499 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
e6c0c64a 500 intel_dp->max_link_lane_count = lane_count >> 1;
fdb14d33
MN
501 } else {
502 DRM_ERROR("Link Training Unsuccessful\n");
503 return -1;
504 }
505
506 return 0;
507}
508
c19de8eb 509static enum drm_mode_status
a4fc5ed6
KP
510intel_dp_mode_valid(struct drm_connector *connector,
511 struct drm_display_mode *mode)
512{
df0e9248 513 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
514 struct intel_connector *intel_connector = to_intel_connector(connector);
515 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
6cfd04b0 516 struct drm_i915_private *dev_priv = to_i915(connector->dev);
36008365
DV
517 int target_clock = mode->clock;
518 int max_rate, mode_rate, max_lanes, max_link_clock;
70ec0645 519 int max_dotclk;
6cfd04b0
MN
520 u16 dsc_max_output_bpp = 0;
521 u8 dsc_slice_count = 0;
70ec0645 522
e4dd27aa
VS
523 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
524 return MODE_NO_DBLESCAN;
525
70ec0645 526 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
a4fc5ed6 527
1853a9da 528 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
dd06f90e 529 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
530 return MODE_PANEL;
531
dd06f90e 532 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 533 return MODE_PANEL;
03afc4a2
DV
534
535 target_clock = fixed_mode->clock;
7de56f43
ZY
536 }
537
50fec21a 538 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 539 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
540
541 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
542 mode_rate = intel_dp_link_required(target_clock, 18);
543
6cfd04b0
MN
544 /*
545 * Output bpp is stored in 6.4 format so right shift by 4 to get the
546 * integer value since we support only integer values of bpp.
547 */
548 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
549 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
550 if (intel_dp_is_edp(intel_dp)) {
551 dsc_max_output_bpp =
552 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
553 dsc_slice_count =
554 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
555 true);
240999cf 556 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
6cfd04b0
MN
557 dsc_max_output_bpp =
558 intel_dp_dsc_get_output_bpp(max_link_clock,
559 max_lanes,
560 target_clock,
561 mode->hdisplay) >> 4;
562 dsc_slice_count =
563 intel_dp_dsc_get_slice_count(intel_dp,
564 target_clock,
565 mode->hdisplay);
566 }
567 }
568
569 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
570 target_clock > max_dotclk)
c4867936 571 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
572
573 if (mode->clock < 10000)
574 return MODE_CLOCK_LOW;
575
0af78a2b
DV
576 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
577 return MODE_H_ILLEGAL;
578
a4fc5ed6
KP
579 return MODE_OK;
580}
581
830de422 582u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
a4fc5ed6 583{
830de422
JN
584 int i;
585 u32 v = 0;
a4fc5ed6
KP
586
587 if (src_bytes > 4)
588 src_bytes = 4;
589 for (i = 0; i < src_bytes; i++)
830de422 590 v |= ((u32)src[i]) << ((3 - i) * 8);
a4fc5ed6
KP
591 return v;
592}
593
830de422 594static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
a4fc5ed6
KP
595{
596 int i;
597 if (dst_bytes > 4)
598 dst_bytes = 4;
599 for (i = 0; i < dst_bytes; i++)
600 dst[i] = src >> ((3-i) * 8);
601}
602
bf13e81b 603static void
46bd8383 604intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
bf13e81b 605static void
46bd8383 606intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
5d5ab2d2 607 bool force_disable_vdd);
335f752b 608static void
46bd8383 609intel_dp_pps_init(struct intel_dp *intel_dp);
bf13e81b 610
69d93820
CW
611static intel_wakeref_t
612pps_lock(struct intel_dp *intel_dp)
773538e8 613{
de25eb7f 614 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 615 intel_wakeref_t wakeref;
773538e8
VS
616
617 /*
40c7ae45 618 * See intel_power_sequencer_reset() why we need
773538e8
VS
619 * a power domain reference here.
620 */
69d93820
CW
621 wakeref = intel_display_power_get(dev_priv,
622 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
773538e8
VS
623
624 mutex_lock(&dev_priv->pps_mutex);
69d93820
CW
625
626 return wakeref;
773538e8
VS
627}
628
69d93820
CW
629static intel_wakeref_t
630pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
773538e8 631{
de25eb7f 632 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
773538e8
VS
633
634 mutex_unlock(&dev_priv->pps_mutex);
69d93820
CW
635 intel_display_power_put(dev_priv,
636 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
637 wakeref);
638 return 0;
773538e8
VS
639}
640
69d93820
CW
641#define with_pps_lock(dp, wf) \
642 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
643
961a0db0
VS
644static void
645vlv_power_sequencer_kick(struct intel_dp *intel_dp)
646{
de25eb7f 647 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
961a0db0 648 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
961a0db0 649 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
650 bool pll_enabled, release_cl_override = false;
651 enum dpio_phy phy = DPIO_PHY(pipe);
652 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
830de422 653 u32 DP;
961a0db0
VS
654
655 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
e7f2af78 656 "skipping pipe %c power sequencer kick due to port %c being active\n",
8f4f2797 657 pipe_name(pipe), port_name(intel_dig_port->base.port)))
961a0db0
VS
658 return;
659
660 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
8f4f2797 661 pipe_name(pipe), port_name(intel_dig_port->base.port));
961a0db0
VS
662
663 /* Preserve the BIOS-computed detected bit. This is
664 * supposed to be read-only.
665 */
666 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
667 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
668 DP |= DP_PORT_WIDTH(1);
669 DP |= DP_LINK_TRAIN_PAT_1;
670
920a14b2 671 if (IS_CHERRYVIEW(dev_priv))
59b74c49
VS
672 DP |= DP_PIPE_SEL_CHV(pipe);
673 else
674 DP |= DP_PIPE_SEL(pipe);
961a0db0 675
d288f65f
VS
676 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
677
678 /*
679 * The DPLL for the pipe must be enabled for this to work.
680 * So enable temporarily it if it's not already enabled.
681 */
0047eedc 682 if (!pll_enabled) {
920a14b2 683 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
0047eedc
VS
684 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
685
30ad9814 686 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
3f36b937
TU
687 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
688 DRM_ERROR("Failed to force on pll for pipe %c!\n",
689 pipe_name(pipe));
690 return;
691 }
0047eedc 692 }
d288f65f 693
961a0db0
VS
694 /*
695 * Similar magic as in intel_dp_enable_port().
696 * We _must_ do this port enable + disable trick
e7f2af78 697 * to make this power sequencer lock onto the port.
961a0db0
VS
698 * Otherwise even VDD force bit won't work.
699 */
700 I915_WRITE(intel_dp->output_reg, DP);
701 POSTING_READ(intel_dp->output_reg);
702
703 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
704 POSTING_READ(intel_dp->output_reg);
705
706 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
707 POSTING_READ(intel_dp->output_reg);
d288f65f 708
0047eedc 709 if (!pll_enabled) {
30ad9814 710 vlv_force_pll_off(dev_priv, pipe);
0047eedc
VS
711
712 if (release_cl_override)
713 chv_phy_powergate_ch(dev_priv, phy, ch, false);
714 }
961a0db0
VS
715}
716
9f2bdb00
VS
717static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
718{
719 struct intel_encoder *encoder;
720 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
721
722 /*
723 * We don't have power sequencer currently.
724 * Pick one that's not used by other ports.
725 */
14aa521c
VS
726 for_each_intel_dp(&dev_priv->drm, encoder) {
727 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
9f2bdb00
VS
728
729 if (encoder->type == INTEL_OUTPUT_EDP) {
730 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
731 intel_dp->active_pipe != intel_dp->pps_pipe);
732
733 if (intel_dp->pps_pipe != INVALID_PIPE)
734 pipes &= ~(1 << intel_dp->pps_pipe);
735 } else {
736 WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
737
738 if (intel_dp->active_pipe != INVALID_PIPE)
739 pipes &= ~(1 << intel_dp->active_pipe);
740 }
741 }
742
743 if (pipes == 0)
744 return INVALID_PIPE;
745
746 return ffs(pipes) - 1;
747}
748
bf13e81b
JN
749static enum pipe
750vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
751{
de25eb7f 752 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
bf13e81b 753 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
a8c3344e 754 enum pipe pipe;
bf13e81b 755
e39b999a 756 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 757
a8c3344e 758 /* We should never land here with regular DP ports */
1853a9da 759 WARN_ON(!intel_dp_is_edp(intel_dp));
a8c3344e 760
9f2bdb00
VS
761 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
762 intel_dp->active_pipe != intel_dp->pps_pipe);
763
a4a5d2f8
VS
764 if (intel_dp->pps_pipe != INVALID_PIPE)
765 return intel_dp->pps_pipe;
766
9f2bdb00 767 pipe = vlv_find_free_pps(dev_priv);
a4a5d2f8
VS
768
769 /*
770 * Didn't find one. This should not happen since there
771 * are two power sequencers and up to two eDP ports.
772 */
9f2bdb00 773 if (WARN_ON(pipe == INVALID_PIPE))
a8c3344e 774 pipe = PIPE_A;
a4a5d2f8 775
46bd8383 776 vlv_steal_power_sequencer(dev_priv, pipe);
a8c3344e 777 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
778
779 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
780 pipe_name(intel_dp->pps_pipe),
8f4f2797 781 port_name(intel_dig_port->base.port));
a4a5d2f8
VS
782
783 /* init power sequencer on this pipe and port */
46bd8383
VS
784 intel_dp_init_panel_power_sequencer(intel_dp);
785 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
a4a5d2f8 786
961a0db0
VS
787 /*
788 * Even vdd force doesn't work until we've made
789 * the power sequencer lock in on the port.
790 */
791 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
792
793 return intel_dp->pps_pipe;
794}
795
78597996
ID
796static int
797bxt_power_sequencer_idx(struct intel_dp *intel_dp)
798{
de25eb7f 799 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
73c0fcac 800 int backlight_controller = dev_priv->vbt.backlight.controller;
78597996
ID
801
802 lockdep_assert_held(&dev_priv->pps_mutex);
803
804 /* We should never land here with regular DP ports */
1853a9da 805 WARN_ON(!intel_dp_is_edp(intel_dp));
78597996 806
78597996 807 if (!intel_dp->pps_reset)
73c0fcac 808 return backlight_controller;
78597996
ID
809
810 intel_dp->pps_reset = false;
811
812 /*
813 * Only the HW needs to be reprogrammed, the SW state is fixed and
814 * has been setup during connector init.
815 */
46bd8383 816 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
78597996 817
73c0fcac 818 return backlight_controller;
78597996
ID
819}
820
6491ab27
VS
821typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
822 enum pipe pipe);
823
824static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
825 enum pipe pipe)
826{
44cb734c 827 return I915_READ(PP_STATUS(pipe)) & PP_ON;
6491ab27
VS
828}
829
830static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
831 enum pipe pipe)
832{
44cb734c 833 return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
6491ab27
VS
834}
835
836static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
837 enum pipe pipe)
838{
839 return true;
840}
bf13e81b 841
a4a5d2f8 842static enum pipe
6491ab27
VS
843vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
844 enum port port,
845 vlv_pipe_check pipe_check)
a4a5d2f8
VS
846{
847 enum pipe pipe;
bf13e81b 848
bf13e81b 849 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
44cb734c 850 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
bf13e81b 851 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
852
853 if (port_sel != PANEL_PORT_SELECT_VLV(port))
854 continue;
855
6491ab27
VS
856 if (!pipe_check(dev_priv, pipe))
857 continue;
858
a4a5d2f8 859 return pipe;
bf13e81b
JN
860 }
861
a4a5d2f8
VS
862 return INVALID_PIPE;
863}
864
865static void
866vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
867{
de25eb7f 868 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
a4a5d2f8 869 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8f4f2797 870 enum port port = intel_dig_port->base.port;
a4a5d2f8
VS
871
872 lockdep_assert_held(&dev_priv->pps_mutex);
873
874 /* try to find a pipe with this port selected */
6491ab27
VS
875 /* first pick one where the panel is on */
876 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
877 vlv_pipe_has_pp_on);
878 /* didn't find one? pick one where vdd is on */
879 if (intel_dp->pps_pipe == INVALID_PIPE)
880 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
881 vlv_pipe_has_vdd_on);
882 /* didn't find one? pick one with just the correct port */
883 if (intel_dp->pps_pipe == INVALID_PIPE)
884 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
885 vlv_pipe_any);
a4a5d2f8
VS
886
887 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
888 if (intel_dp->pps_pipe == INVALID_PIPE) {
889 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
890 port_name(port));
891 return;
bf13e81b
JN
892 }
893
a4a5d2f8
VS
894 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
895 port_name(port), pipe_name(intel_dp->pps_pipe));
896
46bd8383
VS
897 intel_dp_init_panel_power_sequencer(intel_dp);
898 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
bf13e81b
JN
899}
900
78597996 901void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
773538e8 902{
773538e8
VS
903 struct intel_encoder *encoder;
904
920a14b2 905 if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
cc3f90f0 906 !IS_GEN9_LP(dev_priv)))
773538e8
VS
907 return;
908
909 /*
910 * We can't grab pps_mutex here due to deadlock with power_domain
911 * mutex when power_domain functions are called while holding pps_mutex.
912 * That also means that in order to use pps_pipe the code needs to
913 * hold both a power domain reference and pps_mutex, and the power domain
914 * reference get/put must be done while _not_ holding pps_mutex.
915 * pps_{lock,unlock}() do these steps in the correct order, so one
916 * should use them always.
917 */
918
14aa521c
VS
919 for_each_intel_dp(&dev_priv->drm, encoder) {
920 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
7e732cac 921
9f2bdb00
VS
922 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
923
924 if (encoder->type != INTEL_OUTPUT_EDP)
925 continue;
926
cc3f90f0 927 if (IS_GEN9_LP(dev_priv))
78597996
ID
928 intel_dp->pps_reset = true;
929 else
930 intel_dp->pps_pipe = INVALID_PIPE;
773538e8 931 }
bf13e81b
JN
932}
933
8e8232d5
ID
934struct pps_registers {
935 i915_reg_t pp_ctrl;
936 i915_reg_t pp_stat;
937 i915_reg_t pp_on;
938 i915_reg_t pp_off;
939 i915_reg_t pp_div;
940};
941
46bd8383 942static void intel_pps_get_registers(struct intel_dp *intel_dp,
8e8232d5
ID
943 struct pps_registers *regs)
944{
de25eb7f 945 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
44cb734c
ID
946 int pps_idx = 0;
947
8e8232d5
ID
948 memset(regs, 0, sizeof(*regs));
949
cc3f90f0 950 if (IS_GEN9_LP(dev_priv))
44cb734c
ID
951 pps_idx = bxt_power_sequencer_idx(intel_dp);
952 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
953 pps_idx = vlv_power_sequencer_pipe(intel_dp);
8e8232d5 954
44cb734c
ID
955 regs->pp_ctrl = PP_CONTROL(pps_idx);
956 regs->pp_stat = PP_STATUS(pps_idx);
957 regs->pp_on = PP_ON_DELAYS(pps_idx);
958 regs->pp_off = PP_OFF_DELAYS(pps_idx);
ab3517c1
JN
959
960 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
c6c30b91 961 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
ab3517c1
JN
962 regs->pp_div = INVALID_MMIO_REG;
963 else
44cb734c 964 regs->pp_div = PP_DIVISOR(pps_idx);
8e8232d5
ID
965}
966
f0f59a00
VS
967static i915_reg_t
968_pp_ctrl_reg(struct intel_dp *intel_dp)
bf13e81b 969{
8e8232d5 970 struct pps_registers regs;
bf13e81b 971
46bd8383 972 intel_pps_get_registers(intel_dp, &regs);
8e8232d5
ID
973
974 return regs.pp_ctrl;
bf13e81b
JN
975}
976
f0f59a00
VS
977static i915_reg_t
978_pp_stat_reg(struct intel_dp *intel_dp)
bf13e81b 979{
8e8232d5 980 struct pps_registers regs;
bf13e81b 981
46bd8383 982 intel_pps_get_registers(intel_dp, &regs);
8e8232d5
ID
983
984 return regs.pp_stat;
bf13e81b
JN
985}
986
01527b31
CT
987/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
988 This function only applicable when panel PM state is not to be tracked */
989static int edp_notify_handler(struct notifier_block *this, unsigned long code,
990 void *unused)
991{
992 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
993 edp_notifier);
de25eb7f 994 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 995 intel_wakeref_t wakeref;
01527b31 996
1853a9da 997 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
01527b31
CT
998 return 0;
999
69d93820
CW
1000 with_pps_lock(intel_dp, wakeref) {
1001 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1002 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
1003 i915_reg_t pp_ctrl_reg, pp_div_reg;
1004 u32 pp_div;
1005
1006 pp_ctrl_reg = PP_CONTROL(pipe);
1007 pp_div_reg = PP_DIVISOR(pipe);
1008 pp_div = I915_READ(pp_div_reg);
1009 pp_div &= PP_REFERENCE_DIVIDER_MASK;
1010
1011 /* 0x1F write to PP_DIV_REG sets max cycle delay */
1012 I915_WRITE(pp_div_reg, pp_div | 0x1F);
bfb0a2cb 1013 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
69d93820
CW
1014 msleep(intel_dp->panel_power_cycle_delay);
1015 }
01527b31
CT
1016 }
1017
1018 return 0;
1019}
1020
4be73780 1021static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 1022{
de25eb7f 1023 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ebf33b18 1024
e39b999a
VS
1025 lockdep_assert_held(&dev_priv->pps_mutex);
1026
920a14b2 1027 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9a42356b
VS
1028 intel_dp->pps_pipe == INVALID_PIPE)
1029 return false;
1030
bf13e81b 1031 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
1032}
1033
4be73780 1034static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 1035{
de25eb7f 1036 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ebf33b18 1037
e39b999a
VS
1038 lockdep_assert_held(&dev_priv->pps_mutex);
1039
920a14b2 1040 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9a42356b
VS
1041 intel_dp->pps_pipe == INVALID_PIPE)
1042 return false;
1043
773538e8 1044 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
1045}
1046
9b984dae
KP
1047static void
1048intel_dp_check_edp(struct intel_dp *intel_dp)
1049{
de25eb7f 1050 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ebf33b18 1051
1853a9da 1052 if (!intel_dp_is_edp(intel_dp))
9b984dae 1053 return;
453c5420 1054
4be73780 1055 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
1056 WARN(1, "eDP powered off while attempting aux channel communication.\n");
1057 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
1058 I915_READ(_pp_stat_reg(intel_dp)),
1059 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
1060 }
1061}
1062
830de422 1063static u32
8a29c778 1064intel_dp_aux_wait_done(struct intel_dp *intel_dp)
9ee32fea 1065{
de25eb7f 1066 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4904fa66 1067 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
830de422 1068 u32 status;
9ee32fea
DV
1069 bool done;
1070
ef04f00d 1071#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
8a29c778
LDM
1072 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
1073 msecs_to_jiffies_timeout(10));
39806c3f
VS
1074
1075 /* just trace the final value */
1076 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1077
9ee32fea 1078 if (!done)
8a29c778 1079 DRM_ERROR("dp aux hw did not signal timeout!\n");
9ee32fea
DV
1080#undef C
1081
1082 return status;
1083}
1084
830de422 1085static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 1086{
de25eb7f 1087 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
9ee32fea 1088
a457f54b
VS
1089 if (index)
1090 return 0;
1091
ec5b01dd
DL
1092 /*
1093 * The clock divider is based off the hrawclk, and would like to run at
a457f54b 1094 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
a4fc5ed6 1095 */
a457f54b 1096 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
ec5b01dd
DL
1097}
1098
830de422 1099static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
ec5b01dd 1100{
de25eb7f 1101 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0 1102 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
ec5b01dd
DL
1103
1104 if (index)
1105 return 0;
1106
a457f54b
VS
1107 /*
1108 * The clock divider is based off the cdclk or PCH rawclk, and would
1109 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1110 * divide by 2000 and use that
1111 */
563d22a0 1112 if (dig_port->aux_ch == AUX_CH_A)
49cd97a3 1113 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
e7dc33f3
VS
1114 else
1115 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
ec5b01dd
DL
1116}
1117
830de422 1118static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
ec5b01dd 1119{
de25eb7f 1120 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0 1121 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
ec5b01dd 1122
563d22a0 1123 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
2c55c336 1124 /* Workaround for non-ULT HSW */
bc86625a
CW
1125 switch (index) {
1126 case 0: return 63;
1127 case 1: return 72;
1128 default: return 0;
1129 }
2c55c336 1130 }
a457f54b
VS
1131
1132 return ilk_get_aux_clock_divider(intel_dp, index);
b84a1cf8
RV
1133}
1134
830de422 1135static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
b6b5e383
DL
1136{
1137 /*
1138 * SKL doesn't need us to program the AUX clock divider (Hardware will
1139 * derive the clock from CDCLK automatically). We still implement the
1140 * get_aux_clock_divider vfunc to plug-in into the existing code.
1141 */
1142 return index ? 0 : 1;
1143}
1144
830de422
JN
1145static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1146 int send_bytes,
1147 u32 aux_clock_divider)
5ed12a19
DL
1148{
1149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8652744b
TU
1150 struct drm_i915_private *dev_priv =
1151 to_i915(intel_dig_port->base.base.dev);
830de422 1152 u32 precharge, timeout;
5ed12a19 1153
cf819eff 1154 if (IS_GEN(dev_priv, 6))
5ed12a19
DL
1155 precharge = 3;
1156 else
1157 precharge = 5;
1158
8f5f63d5 1159 if (IS_BROADWELL(dev_priv))
5ed12a19
DL
1160 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1161 else
1162 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1163
1164 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 1165 DP_AUX_CH_CTL_DONE |
8a29c778 1166 DP_AUX_CH_CTL_INTERRUPT |
788d4433 1167 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 1168 timeout |
788d4433 1169 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
1170 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1171 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 1172 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
1173}
1174
830de422
JN
1175static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1176 int send_bytes,
1177 u32 unused)
b9ca5fad 1178{
6f211ed4 1179 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
830de422 1180 u32 ret;
6f211ed4
AS
1181
1182 ret = DP_AUX_CH_CTL_SEND_BUSY |
1183 DP_AUX_CH_CTL_DONE |
1184 DP_AUX_CH_CTL_INTERRUPT |
1185 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1186 DP_AUX_CH_CTL_TIME_OUT_MAX |
1187 DP_AUX_CH_CTL_RECEIVE_ERROR |
1188 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1189 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1190 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1191
1192 if (intel_dig_port->tc_type == TC_PORT_TBT)
1193 ret |= DP_AUX_CH_CTL_TBT_IO;
1194
1195 return ret;
b9ca5fad
DL
1196}
1197
b84a1cf8 1198static int
f7606265 1199intel_dp_aux_xfer(struct intel_dp *intel_dp,
830de422
JN
1200 const u8 *send, int send_bytes,
1201 u8 *recv, int recv_size,
8159c796 1202 u32 aux_send_ctl_flags)
b84a1cf8
RV
1203{
1204 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
0031fb96
TU
1205 struct drm_i915_private *dev_priv =
1206 to_i915(intel_dig_port->base.base.dev);
4904fa66 1207 i915_reg_t ch_ctl, ch_data[5];
830de422 1208 u32 aux_clock_divider;
69d93820 1209 intel_wakeref_t wakeref;
b84a1cf8 1210 int i, ret, recv_bytes;
5ed12a19 1211 int try, clock = 0;
830de422 1212 u32 status;
884f19e9
JN
1213 bool vdd;
1214
4904fa66
VS
1215 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1216 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1217 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1218
69d93820 1219 wakeref = pps_lock(intel_dp);
e39b999a 1220
72c3500a
VS
1221 /*
1222 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1223 * In such cases we want to leave VDD enabled and it's up to upper layers
1224 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1225 * ourselves.
1226 */
1e0560e0 1227 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
1228
1229 /* dp aux is extremely sensitive to irq latency, hence request the
1230 * lowest possible wakeup latency and so prevent the cpu from going into
1231 * deep sleep states.
1232 */
1233 pm_qos_update_request(&dev_priv->pm_qos, 0);
1234
1235 intel_dp_check_edp(intel_dp);
5eb08b69 1236
11bee43e
JB
1237 /* Try to wait for any previous AUX channel activity */
1238 for (try = 0; try < 3; try++) {
ef04f00d 1239 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
1240 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1241 break;
1242 msleep(1);
1243 }
39806c3f
VS
1244 /* just trace the final value */
1245 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
11bee43e
JB
1246
1247 if (try == 3) {
02196c77
MK
1248 static u32 last_status = -1;
1249 const u32 status = I915_READ(ch_ctl);
1250
1251 if (status != last_status) {
1252 WARN(1, "dp_aux_ch not started status 0x%08x\n",
1253 status);
1254 last_status = status;
1255 }
1256
9ee32fea
DV
1257 ret = -EBUSY;
1258 goto out;
4f7f7b7e
CW
1259 }
1260
46a5ae9f
PZ
1261 /* Only 5 data registers! */
1262 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1263 ret = -E2BIG;
1264 goto out;
1265 }
1266
ec5b01dd 1267 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
8159c796 1268 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
8159c796
VS
1269 send_bytes,
1270 aux_clock_divider);
1271
1272 send_ctl |= aux_send_ctl_flags;
5ed12a19 1273
bc86625a
CW
1274 /* Must try at least 3 times according to DP spec */
1275 for (try = 0; try < 5; try++) {
1276 /* Load the send data into the aux channel data registers */
1277 for (i = 0; i < send_bytes; i += 4)
4904fa66 1278 I915_WRITE(ch_data[i >> 2],
a4f1289e
RV
1279 intel_dp_pack_aux(send + i,
1280 send_bytes - i));
bc86625a
CW
1281
1282 /* Send the command and wait for it to complete */
5ed12a19 1283 I915_WRITE(ch_ctl, send_ctl);
bc86625a 1284
8a29c778 1285 status = intel_dp_aux_wait_done(intel_dp);
bc86625a
CW
1286
1287 /* Clear done status and any errors */
1288 I915_WRITE(ch_ctl,
1289 status |
1290 DP_AUX_CH_CTL_DONE |
1291 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1292 DP_AUX_CH_CTL_RECEIVE_ERROR);
1293
74ebf294
TP
1294 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1295 * 400us delay required for errors and timeouts
1296 * Timeout errors from the HW already meet this
1297 * requirement so skip to next iteration
1298 */
3975f0aa
DP
1299 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1300 continue;
1301
74ebf294
TP
1302 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1303 usleep_range(400, 500);
bc86625a 1304 continue;
74ebf294 1305 }
bc86625a 1306 if (status & DP_AUX_CH_CTL_DONE)
e058c945 1307 goto done;
bc86625a 1308 }
a4fc5ed6
KP
1309 }
1310
a4fc5ed6 1311 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 1312 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
1313 ret = -EBUSY;
1314 goto out;
a4fc5ed6
KP
1315 }
1316
e058c945 1317done:
a4fc5ed6
KP
1318 /* Check for timeout or receive error.
1319 * Timeouts occur when the sink is not connected
1320 */
a5b3da54 1321 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 1322 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
1323 ret = -EIO;
1324 goto out;
a5b3da54 1325 }
1ae8c0a5
KP
1326
1327 /* Timeouts occur when the device isn't connected, so they're
1328 * "normal" -- don't fill the kernel log with these */
a5b3da54 1329 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
a5570fe5 1330 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
1331 ret = -ETIMEDOUT;
1332 goto out;
a4fc5ed6
KP
1333 }
1334
1335 /* Unload any bytes sent back from the other side */
1336 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1337 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
14e01889
RV
1338
1339 /*
1340 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1341 * We have no idea of what happened so we return -EBUSY so
1342 * drm layer takes care for the necessary retries.
1343 */
1344 if (recv_bytes == 0 || recv_bytes > 20) {
1345 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1346 recv_bytes);
14e01889
RV
1347 ret = -EBUSY;
1348 goto out;
1349 }
1350
a4fc5ed6
KP
1351 if (recv_bytes > recv_size)
1352 recv_bytes = recv_size;
0206e353 1353
4f7f7b7e 1354 for (i = 0; i < recv_bytes; i += 4)
4904fa66 1355 intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]),
a4f1289e 1356 recv + i, recv_bytes - i);
a4fc5ed6 1357
9ee32fea
DV
1358 ret = recv_bytes;
1359out:
1360 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1361
884f19e9
JN
1362 if (vdd)
1363 edp_panel_vdd_off(intel_dp, false);
1364
69d93820 1365 pps_unlock(intel_dp, wakeref);
e39b999a 1366
9ee32fea 1367 return ret;
a4fc5ed6
KP
1368}
1369
a6c8aff0
JN
1370#define BARE_ADDRESS_SIZE 3
1371#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
32078b72
VS
1372
1373static void
1374intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1375 const struct drm_dp_aux_msg *msg)
1376{
1377 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1378 txbuf[1] = (msg->address >> 8) & 0xff;
1379 txbuf[2] = msg->address & 0xff;
1380 txbuf[3] = msg->size - 1;
1381}
1382
9d1a1031
JN
1383static ssize_t
1384intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 1385{
9d1a1031 1386 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
830de422 1387 u8 txbuf[20], rxbuf[20];
9d1a1031 1388 size_t txsize, rxsize;
a4fc5ed6 1389 int ret;
a4fc5ed6 1390
32078b72 1391 intel_dp_aux_header(txbuf, msg);
46a5ae9f 1392
9d1a1031
JN
1393 switch (msg->request & ~DP_AUX_I2C_MOT) {
1394 case DP_AUX_NATIVE_WRITE:
1395 case DP_AUX_I2C_WRITE:
c1e74122 1396 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
a6c8aff0 1397 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 1398 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 1399
9d1a1031
JN
1400 if (WARN_ON(txsize > 20))
1401 return -E2BIG;
a4fc5ed6 1402
dd788090
VS
1403 WARN_ON(!msg->buffer != !msg->size);
1404
d81a67cc
ID
1405 if (msg->buffer)
1406 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 1407
f7606265 1408 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
8159c796 1409 rxbuf, rxsize, 0);
9d1a1031
JN
1410 if (ret > 0) {
1411 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 1412
a1ddefd8
JN
1413 if (ret > 1) {
1414 /* Number of bytes written in a short write. */
1415 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1416 } else {
1417 /* Return payload size. */
1418 ret = msg->size;
1419 }
9d1a1031
JN
1420 }
1421 break;
46a5ae9f 1422
9d1a1031
JN
1423 case DP_AUX_NATIVE_READ:
1424 case DP_AUX_I2C_READ:
a6c8aff0 1425 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 1426 rxsize = msg->size + 1;
a4fc5ed6 1427
9d1a1031
JN
1428 if (WARN_ON(rxsize > 20))
1429 return -E2BIG;
a4fc5ed6 1430
f7606265 1431 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
8159c796 1432 rxbuf, rxsize, 0);
9d1a1031
JN
1433 if (ret > 0) {
1434 msg->reply = rxbuf[0] >> 4;
1435 /*
1436 * Assume happy day, and copy the data. The caller is
1437 * expected to check msg->reply before touching it.
1438 *
1439 * Return payload size.
1440 */
1441 ret--;
1442 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1443 }
9d1a1031
JN
1444 break;
1445
1446 default:
1447 ret = -EINVAL;
1448 break;
a4fc5ed6 1449 }
f51a44b9 1450
9d1a1031 1451 return ret;
a4fc5ed6
KP
1452}
1453
8f7ce038 1454
4904fa66 1455static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
da00bdcf 1456{
de25eb7f 1457 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1458 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1459 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1460
bdabdb63
VS
1461 switch (aux_ch) {
1462 case AUX_CH_B:
1463 case AUX_CH_C:
1464 case AUX_CH_D:
1465 return DP_AUX_CH_CTL(aux_ch);
da00bdcf 1466 default:
bdabdb63
VS
1467 MISSING_CASE(aux_ch);
1468 return DP_AUX_CH_CTL(AUX_CH_B);
da00bdcf
VS
1469 }
1470}
1471
4904fa66 1472static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
330e20ec 1473{
de25eb7f 1474 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1475 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1476 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1477
bdabdb63
VS
1478 switch (aux_ch) {
1479 case AUX_CH_B:
1480 case AUX_CH_C:
1481 case AUX_CH_D:
1482 return DP_AUX_CH_DATA(aux_ch, index);
330e20ec 1483 default:
bdabdb63
VS
1484 MISSING_CASE(aux_ch);
1485 return DP_AUX_CH_DATA(AUX_CH_B, index);
330e20ec
VS
1486 }
1487}
1488
4904fa66 1489static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
bdabdb63 1490{
de25eb7f 1491 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1492 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1493 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1494
bdabdb63
VS
1495 switch (aux_ch) {
1496 case AUX_CH_A:
1497 return DP_AUX_CH_CTL(aux_ch);
1498 case AUX_CH_B:
1499 case AUX_CH_C:
1500 case AUX_CH_D:
1501 return PCH_DP_AUX_CH_CTL(aux_ch);
da00bdcf 1502 default:
bdabdb63
VS
1503 MISSING_CASE(aux_ch);
1504 return DP_AUX_CH_CTL(AUX_CH_A);
da00bdcf
VS
1505 }
1506}
1507
4904fa66 1508static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
bdabdb63 1509{
de25eb7f 1510 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1511 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1512 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1513
bdabdb63
VS
1514 switch (aux_ch) {
1515 case AUX_CH_A:
1516 return DP_AUX_CH_DATA(aux_ch, index);
1517 case AUX_CH_B:
1518 case AUX_CH_C:
1519 case AUX_CH_D:
1520 return PCH_DP_AUX_CH_DATA(aux_ch, index);
330e20ec 1521 default:
bdabdb63
VS
1522 MISSING_CASE(aux_ch);
1523 return DP_AUX_CH_DATA(AUX_CH_A, index);
330e20ec
VS
1524 }
1525}
1526
4904fa66 1527static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
bdabdb63 1528{
de25eb7f 1529 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1530 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1531 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1532
bdabdb63
VS
1533 switch (aux_ch) {
1534 case AUX_CH_A:
1535 case AUX_CH_B:
1536 case AUX_CH_C:
1537 case AUX_CH_D:
bb187e93 1538 case AUX_CH_E:
bdabdb63
VS
1539 case AUX_CH_F:
1540 return DP_AUX_CH_CTL(aux_ch);
da00bdcf 1541 default:
bdabdb63
VS
1542 MISSING_CASE(aux_ch);
1543 return DP_AUX_CH_CTL(AUX_CH_A);
da00bdcf
VS
1544 }
1545}
1546
4904fa66 1547static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
bdabdb63 1548{
de25eb7f 1549 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1550 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1551 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1552
bdabdb63
VS
1553 switch (aux_ch) {
1554 case AUX_CH_A:
1555 case AUX_CH_B:
1556 case AUX_CH_C:
1557 case AUX_CH_D:
bb187e93 1558 case AUX_CH_E:
bdabdb63
VS
1559 case AUX_CH_F:
1560 return DP_AUX_CH_DATA(aux_ch, index);
330e20ec 1561 default:
bdabdb63
VS
1562 MISSING_CASE(aux_ch);
1563 return DP_AUX_CH_DATA(AUX_CH_A, index);
330e20ec
VS
1564 }
1565}
1566
91e939ae
VS
1567static void
1568intel_dp_aux_fini(struct intel_dp *intel_dp)
1569{
1570 kfree(intel_dp->aux.name);
1571}
1572
1573static void
1574intel_dp_aux_init(struct intel_dp *intel_dp)
330e20ec 1575{
de25eb7f 1576 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1577 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1578 struct intel_encoder *encoder = &dig_port->base;
91e939ae 1579
4904fa66
VS
1580 if (INTEL_GEN(dev_priv) >= 9) {
1581 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1582 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1583 } else if (HAS_PCH_SPLIT(dev_priv)) {
1584 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1585 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1586 } else {
1587 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1588 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1589 }
330e20ec 1590
91e939ae
VS
1591 if (INTEL_GEN(dev_priv) >= 9)
1592 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1593 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1594 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1595 else if (HAS_PCH_SPLIT(dev_priv))
1596 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1597 else
1598 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
bdabdb63 1599
91e939ae
VS
1600 if (INTEL_GEN(dev_priv) >= 9)
1601 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1602 else
1603 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
ab2c0672 1604
7a418e34 1605 drm_dp_aux_init(&intel_dp->aux);
8316f337 1606
7a418e34 1607 /* Failure to allocate our preferred name is not critical */
bdabdb63
VS
1608 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1609 port_name(encoder->port));
9d1a1031 1610 intel_dp->aux.transfer = intel_dp_aux_transfer;
a4fc5ed6
KP
1611}
1612
e588fa18 1613bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
ed63baaf 1614{
fc603ca7 1615 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
e588fa18 1616
fc603ca7 1617 return max_rate >= 540000;
ed63baaf
TS
1618}
1619
2edd5327
MN
1620bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1621{
1622 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1623
1624 return max_rate >= 810000;
1625}
1626
c6bb3538
DV
1627static void
1628intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1629 struct intel_crtc_state *pipe_config)
c6bb3538 1630{
2f773477 1631 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
9dd4ffdf
CML
1632 const struct dp_link_dpll *divisor = NULL;
1633 int i, count = 0;
c6bb3538 1634
9beb5fea 1635 if (IS_G4X(dev_priv)) {
45101e93
VS
1636 divisor = g4x_dpll;
1637 count = ARRAY_SIZE(g4x_dpll);
6e266956 1638 } else if (HAS_PCH_SPLIT(dev_priv)) {
9dd4ffdf
CML
1639 divisor = pch_dpll;
1640 count = ARRAY_SIZE(pch_dpll);
920a14b2 1641 } else if (IS_CHERRYVIEW(dev_priv)) {
ef9348c8
CML
1642 divisor = chv_dpll;
1643 count = ARRAY_SIZE(chv_dpll);
11a914c2 1644 } else if (IS_VALLEYVIEW(dev_priv)) {
65ce4bf5
CML
1645 divisor = vlv_dpll;
1646 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1647 }
9dd4ffdf
CML
1648
1649 if (divisor && count) {
1650 for (i = 0; i < count; i++) {
840b32b7 1651 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1652 pipe_config->dpll = divisor[i].dpll;
1653 pipe_config->clock_set = true;
1654 break;
1655 }
1656 }
c6bb3538
DV
1657 }
1658}
1659
0336400e
VS
1660static void snprintf_int_array(char *str, size_t len,
1661 const int *array, int nelem)
1662{
1663 int i;
1664
1665 str[0] = '\0';
1666
1667 for (i = 0; i < nelem; i++) {
b2f505be 1668 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1669 if (r >= len)
1670 return;
1671 str += r;
1672 len -= r;
1673 }
1674}
1675
1676static void intel_dp_print_rates(struct intel_dp *intel_dp)
1677{
0336400e
VS
1678 char str[128]; /* FIXME: too big for stack? */
1679
1680 if ((drm_debug & DRM_UT_KMS) == 0)
1681 return;
1682
55cfc580
JN
1683 snprintf_int_array(str, sizeof(str),
1684 intel_dp->source_rates, intel_dp->num_source_rates);
0336400e
VS
1685 DRM_DEBUG_KMS("source rates: %s\n", str);
1686
68f357cb
JN
1687 snprintf_int_array(str, sizeof(str),
1688 intel_dp->sink_rates, intel_dp->num_sink_rates);
0336400e
VS
1689 DRM_DEBUG_KMS("sink rates: %s\n", str);
1690
975ee5fc
JN
1691 snprintf_int_array(str, sizeof(str),
1692 intel_dp->common_rates, intel_dp->num_common_rates);
94ca719e 1693 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1694}
1695
50fec21a
VS
1696int
1697intel_dp_max_link_rate(struct intel_dp *intel_dp)
1698{
50fec21a
VS
1699 int len;
1700
e6c0c64a 1701 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
50fec21a
VS
1702 if (WARN_ON(len <= 0))
1703 return 162000;
1704
975ee5fc 1705 return intel_dp->common_rates[len - 1];
50fec21a
VS
1706}
1707
ed4e9c1d
VS
1708int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1709{
8001b754
JN
1710 int i = intel_dp_rate_index(intel_dp->sink_rates,
1711 intel_dp->num_sink_rates, rate);
b5c72b20
JN
1712
1713 if (WARN_ON(i < 0))
1714 i = 0;
1715
1716 return i;
ed4e9c1d
VS
1717}
1718
94223d04 1719void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
830de422 1720 u8 *link_bw, u8 *rate_select)
04a60f9f 1721{
68f357cb
JN
1722 /* eDP 1.4 rate select method. */
1723 if (intel_dp->use_rate_select) {
04a60f9f
VS
1724 *link_bw = 0;
1725 *rate_select =
1726 intel_dp_rate_select(intel_dp, port_clock);
1727 } else {
1728 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1729 *rate_select = 0;
1730 }
1731}
1732
240999cf 1733static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
a4a15777
MN
1734 const struct intel_crtc_state *pipe_config)
1735{
1736 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1737
240999cf
AS
1738 return INTEL_GEN(dev_priv) >= 11 &&
1739 pipe_config->cpu_transcoder != TRANSCODER_A;
1740}
1741
1742static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1743 const struct intel_crtc_state *pipe_config)
1744{
1745 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1746 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1747}
1748
1749static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1750 const struct intel_crtc_state *pipe_config)
1751{
1752 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
a4a15777
MN
1753
1754 return INTEL_GEN(dev_priv) >= 10 &&
1755 pipe_config->cpu_transcoder != TRANSCODER_A;
1756}
1757
1758static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1759 const struct intel_crtc_state *pipe_config)
1760{
240999cf
AS
1761 if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
1762 return false;
1763
a4a15777
MN
1764 return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1765 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1766}
1767
f580bea9
JN
1768static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1769 struct intel_crtc_state *pipe_config)
f9bb705e 1770{
de25eb7f 1771 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ef32659a 1772 struct intel_connector *intel_connector = intel_dp->attached_connector;
f9bb705e
MK
1773 int bpp, bpc;
1774
1775 bpp = pipe_config->pipe_bpp;
1776 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1777
1778 if (bpc > 0)
1779 bpp = min(bpp, 3*bpc);
1780
ef32659a
JN
1781 if (intel_dp_is_edp(intel_dp)) {
1782 /* Get bpp from vbt only for panels that dont have bpp in edid */
1783 if (intel_connector->base.display_info.bpc == 0 &&
1784 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1785 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1786 dev_priv->vbt.edp.bpp);
1787 bpp = dev_priv->vbt.edp.bpp;
1788 }
1789 }
1790
f9bb705e
MK
1791 return bpp;
1792}
1793
a4971453 1794/* Adjust link config limits based on compliance test requests. */
f1477219 1795void
a4971453
JN
1796intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1797 struct intel_crtc_state *pipe_config,
1798 struct link_config_limits *limits)
1799{
1800 /* For DP Compliance we override the computed bpp for the pipe */
1801 if (intel_dp->compliance.test_data.bpc != 0) {
1802 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1803
1804 limits->min_bpp = limits->max_bpp = bpp;
1805 pipe_config->dither_force_disable = bpp == 6 * 3;
1806
1807 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1808 }
1809
1810 /* Use values requested by Compliance Test Request */
1811 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1812 int index;
1813
1814 /* Validate the compliance test data since max values
1815 * might have changed due to link train fallback.
1816 */
1817 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1818 intel_dp->compliance.test_lane_count)) {
1819 index = intel_dp_rate_index(intel_dp->common_rates,
1820 intel_dp->num_common_rates,
1821 intel_dp->compliance.test_link_rate);
1822 if (index >= 0)
1823 limits->min_clock = limits->max_clock = index;
1824 limits->min_lane_count = limits->max_lane_count =
1825 intel_dp->compliance.test_lane_count;
1826 }
1827 }
1828}
1829
3acd115d 1830/* Optimize link config in order: max bpp, min clock, min lanes */
204474a6 1831static int
3acd115d
JN
1832intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1833 struct intel_crtc_state *pipe_config,
1834 const struct link_config_limits *limits)
1835{
1836 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1837 int bpp, clock, lane_count;
1838 int mode_rate, link_clock, link_avail;
1839
1840 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1841 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1842 bpp);
1843
1844 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1845 for (lane_count = limits->min_lane_count;
1846 lane_count <= limits->max_lane_count;
1847 lane_count <<= 1) {
1848 link_clock = intel_dp->common_rates[clock];
1849 link_avail = intel_dp_max_data_rate(link_clock,
1850 lane_count);
1851
1852 if (mode_rate <= link_avail) {
1853 pipe_config->lane_count = lane_count;
1854 pipe_config->pipe_bpp = bpp;
1855 pipe_config->port_clock = link_clock;
1856
204474a6 1857 return 0;
3acd115d
JN
1858 }
1859 }
1860 }
1861 }
1862
204474a6 1863 return -EINVAL;
3acd115d
JN
1864}
1865
7769db58 1866/* Optimize link config in order: max bpp, min lanes, min clock */
204474a6 1867static int
7769db58
JN
1868intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
1869 struct intel_crtc_state *pipe_config,
1870 const struct link_config_limits *limits)
1871{
1872 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1873 int bpp, clock, lane_count;
1874 int mode_rate, link_clock, link_avail;
1875
1876 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1877 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1878 bpp);
1879
1880 for (lane_count = limits->min_lane_count;
1881 lane_count <= limits->max_lane_count;
1882 lane_count <<= 1) {
1883 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1884 link_clock = intel_dp->common_rates[clock];
1885 link_avail = intel_dp_max_data_rate(link_clock,
1886 lane_count);
1887
1888 if (mode_rate <= link_avail) {
1889 pipe_config->lane_count = lane_count;
1890 pipe_config->pipe_bpp = bpp;
1891 pipe_config->port_clock = link_clock;
1892
204474a6 1893 return 0;
7769db58
JN
1894 }
1895 }
1896 }
1897 }
1898
204474a6 1899 return -EINVAL;
7769db58
JN
1900}
1901
a4a15777
MN
1902static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1903{
1904 int i, num_bpc;
1905 u8 dsc_bpc[3] = {0};
1906
1907 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1908 dsc_bpc);
1909 for (i = 0; i < num_bpc; i++) {
1910 if (dsc_max_bpc >= dsc_bpc[i])
1911 return dsc_bpc[i] * 3;
1912 }
1913
1914 return 0;
1915}
1916
204474a6
LP
1917static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1918 struct intel_crtc_state *pipe_config,
1919 struct drm_connector_state *conn_state,
1920 struct link_config_limits *limits)
a4a15777
MN
1921{
1922 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1923 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1924 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1925 u8 dsc_max_bpc;
1926 int pipe_bpp;
204474a6 1927 int ret;
a4a15777
MN
1928
1929 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
204474a6 1930 return -EINVAL;
a4a15777
MN
1931
1932 dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
1933 conn_state->max_requested_bpc);
1934
1935 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
1936 if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
1937 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
204474a6 1938 return -EINVAL;
a4a15777
MN
1939 }
1940
1941 /*
1942 * For now enable DSC for max bpp, max link rate, max lane count.
1943 * Optimize this later for the minimum possible link rate/lane count
1944 * with DSC enabled for the requested mode.
1945 */
1946 pipe_config->pipe_bpp = pipe_bpp;
1947 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
1948 pipe_config->lane_count = limits->max_lane_count;
1949
1950 if (intel_dp_is_edp(intel_dp)) {
1951 pipe_config->dsc_params.compressed_bpp =
1952 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1953 pipe_config->pipe_bpp);
1954 pipe_config->dsc_params.slice_count =
1955 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1956 true);
1957 } else {
1958 u16 dsc_max_output_bpp;
1959 u8 dsc_dp_slice_count;
1960
1961 dsc_max_output_bpp =
1962 intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
1963 pipe_config->lane_count,
1964 adjusted_mode->crtc_clock,
1965 adjusted_mode->crtc_hdisplay);
1966 dsc_dp_slice_count =
1967 intel_dp_dsc_get_slice_count(intel_dp,
1968 adjusted_mode->crtc_clock,
1969 adjusted_mode->crtc_hdisplay);
1970 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1971 DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
204474a6 1972 return -EINVAL;
a4a15777
MN
1973 }
1974 pipe_config->dsc_params.compressed_bpp = min_t(u16,
1975 dsc_max_output_bpp >> 4,
1976 pipe_config->pipe_bpp);
1977 pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
1978 }
1979 /*
1980 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1981 * is greater than the maximum Cdclock and if slice count is even
1982 * then we need to use 2 VDSC instances.
1983 */
1984 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
1985 if (pipe_config->dsc_params.slice_count > 1) {
1986 pipe_config->dsc_params.dsc_split = true;
1987 } else {
1988 DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
204474a6 1989 return -EINVAL;
a4a15777
MN
1990 }
1991 }
204474a6
LP
1992
1993 ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
1994 if (ret < 0) {
168243c1
GS
1995 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
1996 "Compressed BPP = %d\n",
1997 pipe_config->pipe_bpp,
1998 pipe_config->dsc_params.compressed_bpp);
204474a6 1999 return ret;
168243c1 2000 }
204474a6 2001
a4a15777
MN
2002 pipe_config->dsc_params.compression_enable = true;
2003 DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
2004 "Compressed Bpp = %d Slice Count = %d\n",
2005 pipe_config->pipe_bpp,
2006 pipe_config->dsc_params.compressed_bpp,
2007 pipe_config->dsc_params.slice_count);
2008
204474a6 2009 return 0;
a4a15777
MN
2010}
2011
204474a6 2012static int
981a63eb 2013intel_dp_compute_link_config(struct intel_encoder *encoder,
a4a15777
MN
2014 struct intel_crtc_state *pipe_config,
2015 struct drm_connector_state *conn_state)
a4fc5ed6 2016{
2d112de7 2017 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 2018 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
7c2781e4 2019 struct link_config_limits limits;
94ca719e 2020 int common_len;
204474a6 2021 int ret;
7c2781e4 2022
975ee5fc 2023 common_len = intel_dp_common_len_rate_limit(intel_dp,
e6c0c64a 2024 intel_dp->max_link_rate);
a8f3ef61
SJ
2025
2026 /* No common link rates between source and sink */
94ca719e 2027 WARN_ON(common_len <= 0);
a8f3ef61 2028
7c2781e4
JN
2029 limits.min_clock = 0;
2030 limits.max_clock = common_len - 1;
2031
2032 limits.min_lane_count = 1;
2033 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2034
2035 limits.min_bpp = 6 * 3;
2036 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
a4fc5ed6 2037
7769db58 2038 if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
344c5bbc
JN
2039 /*
2040 * Use the maximum clock and number of lanes the eDP panel
7769db58
JN
2041 * advertizes being capable of. The eDP 1.3 and earlier panels
2042 * are generally designed to support only a single clock and
2043 * lane configuration, and typically these values correspond to
2044 * the native resolution of the panel. With eDP 1.4 rate select
2045 * and DSC, this is decreasingly the case, and we need to be
2046 * able to select less than maximum link config.
344c5bbc 2047 */
7c2781e4
JN
2048 limits.min_lane_count = limits.max_lane_count;
2049 limits.min_clock = limits.max_clock;
7984211e 2050 }
657445fe 2051
a4971453
JN
2052 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2053
7c2781e4
JN
2054 DRM_DEBUG_KMS("DP link computation with max lane count %i "
2055 "max rate %d max bpp %d pixel clock %iKHz\n",
2056 limits.max_lane_count,
2057 intel_dp->common_rates[limits.max_clock],
2058 limits.max_bpp, adjusted_mode->crtc_clock);
2059
a4a15777 2060 if (intel_dp_is_edp(intel_dp))
7769db58
JN
2061 /*
2062 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
2063 * section A.1: "It is recommended that the minimum number of
2064 * lanes be used, using the minimum link rate allowed for that
2065 * lane configuration."
2066 *
2067 * Note that we use the max clock and lane count for eDP 1.3 and
2068 * earlier, and fast vs. wide is irrelevant.
2069 */
a4a15777
MN
2070 ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
2071 &limits);
2072 else
7769db58 2073 /* Optimize for slow and wide. */
a4a15777
MN
2074 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
2075 &limits);
2076
2077 /* enable compression if the mode doesn't fit available BW */
e845f099 2078 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
204474a6
LP
2079 if (ret || intel_dp->force_dsc_en) {
2080 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2081 conn_state, &limits);
2082 if (ret < 0)
2083 return ret;
7769db58 2084 }
981a63eb 2085
a4a15777
MN
2086 if (pipe_config->dsc_params.compression_enable) {
2087 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2088 pipe_config->lane_count, pipe_config->port_clock,
2089 pipe_config->pipe_bpp,
2090 pipe_config->dsc_params.compressed_bpp);
2091
2092 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2093 intel_dp_link_required(adjusted_mode->crtc_clock,
2094 pipe_config->dsc_params.compressed_bpp),
2095 intel_dp_max_data_rate(pipe_config->port_clock,
2096 pipe_config->lane_count));
2097 } else {
2098 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2099 pipe_config->lane_count, pipe_config->port_clock,
2100 pipe_config->pipe_bpp);
2101
2102 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2103 intel_dp_link_required(adjusted_mode->crtc_clock,
2104 pipe_config->pipe_bpp),
2105 intel_dp_max_data_rate(pipe_config->port_clock,
2106 pipe_config->lane_count));
2107 }
204474a6 2108 return 0;
981a63eb
JN
2109}
2110
37aa52bf
VS
2111bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2112 const struct drm_connector_state *conn_state)
2113{
2114 const struct intel_digital_connector_state *intel_conn_state =
2115 to_intel_digital_connector_state(conn_state);
2116 const struct drm_display_mode *adjusted_mode =
2117 &crtc_state->base.adjusted_mode;
2118
2119 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2120 /*
2121 * See:
2122 * CEA-861-E - 5.1 Default Encoding Parameters
2123 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2124 */
2125 return crtc_state->pipe_bpp != 18 &&
2126 drm_default_rgb_quant_range(adjusted_mode) ==
2127 HDMI_QUANTIZATION_RANGE_LIMITED;
2128 } else {
2129 return intel_conn_state->broadcast_rgb ==
2130 INTEL_BROADCAST_RGB_LIMITED;
2131 }
2132}
2133
204474a6 2134int
981a63eb
JN
2135intel_dp_compute_config(struct intel_encoder *encoder,
2136 struct intel_crtc_state *pipe_config,
2137 struct drm_connector_state *conn_state)
2138{
2139 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2140 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2141 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
668b6c17 2142 struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
981a63eb
JN
2143 enum port port = encoder->port;
2144 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2145 struct intel_connector *intel_connector = intel_dp->attached_connector;
2146 struct intel_digital_connector_state *intel_conn_state =
2147 to_intel_digital_connector_state(conn_state);
53ca2edc
LS
2148 bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2149 DP_DPCD_QUIRK_CONSTANT_N);
204474a6 2150 int ret;
981a63eb
JN
2151
2152 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2153 pipe_config->has_pch_encoder = true;
2154
d9facae6 2155 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
668b6c17
SS
2156 if (lspcon->active)
2157 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2158
981a63eb
JN
2159 pipe_config->has_drrs = false;
2160 if (IS_G4X(dev_priv) || port == PORT_A)
2161 pipe_config->has_audio = false;
2162 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2163 pipe_config->has_audio = intel_dp->has_audio;
2164 else
2165 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2166
2167 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
d93fa1b4
JN
2168 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2169 adjusted_mode);
981a63eb
JN
2170
2171 if (INTEL_GEN(dev_priv) >= 9) {
981a63eb
JN
2172 ret = skl_update_scaler_crtc(pipe_config);
2173 if (ret)
2174 return ret;
2175 }
2176
b2ae318a 2177 if (HAS_GMCH(dev_priv))
981a63eb
JN
2178 intel_gmch_panel_fitting(intel_crtc, pipe_config,
2179 conn_state->scaling_mode);
2180 else
2181 intel_pch_panel_fitting(intel_crtc, pipe_config,
2182 conn_state->scaling_mode);
2183 }
2184
e4dd27aa 2185 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
204474a6 2186 return -EINVAL;
e4dd27aa 2187
b2ae318a 2188 if (HAS_GMCH(dev_priv) &&
981a63eb 2189 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
204474a6 2190 return -EINVAL;
981a63eb
JN
2191
2192 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
204474a6 2193 return -EINVAL;
981a63eb 2194
240999cf
AS
2195 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2196 intel_dp_supports_fec(intel_dp, pipe_config);
2197
204474a6
LP
2198 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2199 if (ret < 0)
2200 return ret;
981a63eb 2201
37aa52bf
VS
2202 pipe_config->limited_color_range =
2203 intel_dp_limited_color_range(pipe_config, conn_state);
55bc60db 2204
a4a15777
MN
2205 if (!pipe_config->dsc_params.compression_enable)
2206 intel_link_compute_m_n(pipe_config->pipe_bpp,
2207 pipe_config->lane_count,
2208 adjusted_mode->crtc_clock,
2209 pipe_config->port_clock,
2210 &pipe_config->dp_m_n,
2211 constant_n);
2212 else
ae9e7ced 2213 intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
a4a15777
MN
2214 pipe_config->lane_count,
2215 adjusted_mode->crtc_clock,
2216 pipe_config->port_clock,
2217 &pipe_config->dp_m_n,
2218 constant_n);
9d1a455b 2219
439d7ac0 2220 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 2221 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 2222 pipe_config->has_drrs = true;
981a63eb
JN
2223 intel_link_compute_m_n(pipe_config->pipe_bpp,
2224 pipe_config->lane_count,
2225 intel_connector->panel.downclock_mode->clock,
2226 pipe_config->port_clock,
2227 &pipe_config->dp_m2_n2,
53ca2edc 2228 constant_n);
439d7ac0
PB
2229 }
2230
4f8036a2 2231 if (!HAS_DDI(dev_priv))
840b32b7 2232 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 2233
4d90f2d5
VS
2234 intel_psr_compute_config(intel_dp, pipe_config);
2235
204474a6 2236 return 0;
a4fc5ed6
KP
2237}
2238
901c2daf 2239void intel_dp_set_link_params(struct intel_dp *intel_dp,
830de422 2240 int link_rate, u8 lane_count,
dfa10480 2241 bool link_mst)
901c2daf 2242{
edb2e530 2243 intel_dp->link_trained = false;
dfa10480
ACO
2244 intel_dp->link_rate = link_rate;
2245 intel_dp->lane_count = lane_count;
2246 intel_dp->link_mst = link_mst;
901c2daf
VS
2247}
2248
85cb48a1 2249static void intel_dp_prepare(struct intel_encoder *encoder,
5f88a9c6 2250 const struct intel_crtc_state *pipe_config)
a4fc5ed6 2251{
2f773477 2252 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b934223d 2253 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
8f4f2797 2254 enum port port = encoder->port;
adc10304 2255 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
85cb48a1 2256 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
a4fc5ed6 2257
dfa10480
ACO
2258 intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2259 pipe_config->lane_count,
2260 intel_crtc_has_type(pipe_config,
2261 INTEL_OUTPUT_DP_MST));
901c2daf 2262
417e822d 2263 /*
1a2eb460 2264 * There are four kinds of DP registers:
417e822d
KP
2265 *
2266 * IBX PCH
1a2eb460
KP
2267 * SNB CPU
2268 * IVB CPU
417e822d
KP
2269 * CPT PCH
2270 *
2271 * IBX PCH and CPU are the same for almost everything,
2272 * except that the CPU DP PLL is configured in this
2273 * register
2274 *
2275 * CPT PCH is quite different, having many bits moved
2276 * to the TRANS_DP_CTL register instead. That
2277 * configuration happens (oddly) in ironlake_pch_enable
2278 */
9c9e7927 2279
417e822d
KP
2280 /* Preserve the BIOS-computed detected bit. This is
2281 * supposed to be read-only.
2282 */
2283 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 2284
417e822d 2285 /* Handle DP bits in common between all three register formats */
417e822d 2286 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
85cb48a1 2287 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
a4fc5ed6 2288
417e822d 2289 /* Split out the IBX/CPU vs CPT settings */
32f9d658 2290
b752e995 2291 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
1a2eb460
KP
2292 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2293 intel_dp->DP |= DP_SYNC_HS_HIGH;
2294 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2295 intel_dp->DP |= DP_SYNC_VS_HIGH;
2296 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2297
6aba5b6c 2298 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
2299 intel_dp->DP |= DP_ENHANCED_FRAMING;
2300
59b74c49 2301 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
6e266956 2302 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
e3ef4479
VS
2303 u32 trans_dp;
2304
39e5fa88 2305 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
2306
2307 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2308 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2309 trans_dp |= TRANS_DP_ENH_FRAMING;
2310 else
2311 trans_dp &= ~TRANS_DP_ENH_FRAMING;
2312 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 2313 } else {
c99f53f7 2314 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
0f2a2a75 2315 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
2316
2317 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2318 intel_dp->DP |= DP_SYNC_HS_HIGH;
2319 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2320 intel_dp->DP |= DP_SYNC_VS_HIGH;
2321 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2322
6aba5b6c 2323 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
2324 intel_dp->DP |= DP_ENHANCED_FRAMING;
2325
920a14b2 2326 if (IS_CHERRYVIEW(dev_priv))
59b74c49
VS
2327 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2328 else
2329 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
32f9d658 2330 }
a4fc5ed6
KP
2331}
2332
ffd6749d
PZ
2333#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2334#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 2335
1a5ef5b7
PZ
2336#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
2337#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 2338
ffd6749d
PZ
2339#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2340#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 2341
46bd8383 2342static void intel_pps_verify_state(struct intel_dp *intel_dp);
de9c1b6b 2343
4be73780 2344static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
2345 u32 mask,
2346 u32 value)
bd943159 2347{
de25eb7f 2348 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
f0f59a00 2349 i915_reg_t pp_stat_reg, pp_ctrl_reg;
453c5420 2350
e39b999a
VS
2351 lockdep_assert_held(&dev_priv->pps_mutex);
2352
46bd8383 2353 intel_pps_verify_state(intel_dp);
de9c1b6b 2354
bf13e81b
JN
2355 pp_stat_reg = _pp_stat_reg(intel_dp);
2356 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 2357
99ea7127 2358 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
2359 mask, value,
2360 I915_READ(pp_stat_reg),
2361 I915_READ(pp_ctrl_reg));
32ce697c 2362
97a04e0d 2363 if (intel_wait_for_register(&dev_priv->uncore,
9036ff06
CW
2364 pp_stat_reg, mask, value,
2365 5000))
99ea7127 2366 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
2367 I915_READ(pp_stat_reg),
2368 I915_READ(pp_ctrl_reg));
54c136d4
CW
2369
2370 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 2371}
32ce697c 2372
4be73780 2373static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
2374{
2375 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 2376 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
2377}
2378
4be73780 2379static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
2380{
2381 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 2382 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
2383}
2384
4be73780 2385static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127 2386{
d28d4731
AK
2387 ktime_t panel_power_on_time;
2388 s64 panel_power_off_duration;
2389
99ea7127 2390 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c 2391
d28d4731
AK
2392 /* take the difference of currrent time and panel power off time
2393 * and then make panel wait for t11_t12 if needed. */
2394 panel_power_on_time = ktime_get_boottime();
2395 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2396
dce56b3c
PZ
2397 /* When we disable the VDD override bit last we have to do the manual
2398 * wait. */
d28d4731
AK
2399 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2400 wait_remaining_ms_from_jiffies(jiffies,
2401 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
dce56b3c 2402
4be73780 2403 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
2404}
2405
4be73780 2406static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
2407{
2408 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2409 intel_dp->backlight_on_delay);
2410}
2411
4be73780 2412static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
2413{
2414 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2415 intel_dp->backlight_off_delay);
2416}
99ea7127 2417
832dd3c1
KP
2418/* Read the current pp_control value, unlocking the register if it
2419 * is locked
2420 */
2421
453c5420 2422static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 2423{
de25eb7f 2424 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
453c5420 2425 u32 control;
832dd3c1 2426
e39b999a
VS
2427 lockdep_assert_held(&dev_priv->pps_mutex);
2428
bf13e81b 2429 control = I915_READ(_pp_ctrl_reg(intel_dp));
8090ba8c
ID
2430 if (WARN_ON(!HAS_DDI(dev_priv) &&
2431 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
b0a08bec
VK
2432 control &= ~PANEL_UNLOCK_MASK;
2433 control |= PANEL_UNLOCK_REGS;
2434 }
832dd3c1 2435 return control;
bd943159
KP
2436}
2437
951468f3
VS
2438/*
2439 * Must be paired with edp_panel_vdd_off().
2440 * Must hold pps_mutex around the whole on/off sequence.
2441 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2442 */
1e0560e0 2443static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 2444{
de25eb7f 2445 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4e6e1a54 2446 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5d613501 2447 u32 pp;
f0f59a00 2448 i915_reg_t pp_stat_reg, pp_ctrl_reg;
adddaaf4 2449 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 2450
e39b999a
VS
2451 lockdep_assert_held(&dev_priv->pps_mutex);
2452
1853a9da 2453 if (!intel_dp_is_edp(intel_dp))
adddaaf4 2454 return false;
bd943159 2455
2c623c11 2456 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 2457 intel_dp->want_panel_vdd = true;
99ea7127 2458
4be73780 2459 if (edp_have_panel_vdd(intel_dp))
adddaaf4 2460 return need_to_disable;
b0665d57 2461
337837ac
ID
2462 intel_display_power_get(dev_priv,
2463 intel_aux_power_domain(intel_dig_port));
e9cb81a2 2464
3936fcf4 2465 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
8f4f2797 2466 port_name(intel_dig_port->base.port));
bd943159 2467
4be73780
DV
2468 if (!edp_have_panel_power(intel_dp))
2469 wait_panel_power_cycle(intel_dp);
99ea7127 2470
453c5420 2471 pp = ironlake_get_pp_control(intel_dp);
5d613501 2472 pp |= EDP_FORCE_VDD;
ebf33b18 2473
bf13e81b
JN
2474 pp_stat_reg = _pp_stat_reg(intel_dp);
2475 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2476
2477 I915_WRITE(pp_ctrl_reg, pp);
2478 POSTING_READ(pp_ctrl_reg);
2479 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2480 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
2481 /*
2482 * If the panel wasn't on, delay before accessing aux channel
2483 */
4be73780 2484 if (!edp_have_panel_power(intel_dp)) {
3936fcf4 2485 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
8f4f2797 2486 port_name(intel_dig_port->base.port));
f01eca2e 2487 msleep(intel_dp->panel_power_up_delay);
f01eca2e 2488 }
adddaaf4
JN
2489
2490 return need_to_disable;
2491}
2492
951468f3
VS
2493/*
2494 * Must be paired with intel_edp_panel_vdd_off() or
2495 * intel_edp_panel_off().
2496 * Nested calls to these functions are not allowed since
2497 * we drop the lock. Caller must use some higher level
2498 * locking to prevent nested calls from other threads.
2499 */
b80d6c78 2500void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 2501{
69d93820 2502 intel_wakeref_t wakeref;
c695b6b6 2503 bool vdd;
adddaaf4 2504
1853a9da 2505 if (!intel_dp_is_edp(intel_dp))
c695b6b6
VS
2506 return;
2507
69d93820
CW
2508 vdd = false;
2509 with_pps_lock(intel_dp, wakeref)
2510 vdd = edp_panel_vdd_on(intel_dp);
e2c719b7 2511 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
8f4f2797 2512 port_name(dp_to_dig_port(intel_dp)->base.port));
5d613501
JB
2513}
2514
4be73780 2515static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 2516{
de25eb7f 2517 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
be2c9196
VS
2518 struct intel_digital_port *intel_dig_port =
2519 dp_to_dig_port(intel_dp);
5d613501 2520 u32 pp;
f0f59a00 2521 i915_reg_t pp_stat_reg, pp_ctrl_reg;
5d613501 2522
e39b999a 2523 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 2524
15e899a0 2525 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 2526
15e899a0 2527 if (!edp_have_panel_vdd(intel_dp))
be2c9196 2528 return;
b0665d57 2529
3936fcf4 2530 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
8f4f2797 2531 port_name(intel_dig_port->base.port));
bd943159 2532
be2c9196
VS
2533 pp = ironlake_get_pp_control(intel_dp);
2534 pp &= ~EDP_FORCE_VDD;
453c5420 2535
be2c9196
VS
2536 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2537 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 2538
be2c9196
VS
2539 I915_WRITE(pp_ctrl_reg, pp);
2540 POSTING_READ(pp_ctrl_reg);
90791a5c 2541
be2c9196
VS
2542 /* Make sure sequencer is idle before allowing subsequent activity */
2543 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2544 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 2545
5a162e22 2546 if ((pp & PANEL_POWER_ON) == 0)
d28d4731 2547 intel_dp->panel_power_off_time = ktime_get_boottime();
e9cb81a2 2548
0e6e0be4
CW
2549 intel_display_power_put_unchecked(dev_priv,
2550 intel_aux_power_domain(intel_dig_port));
bd943159 2551}
5d613501 2552
4be73780 2553static void edp_panel_vdd_work(struct work_struct *__work)
bd943159 2554{
69d93820
CW
2555 struct intel_dp *intel_dp =
2556 container_of(to_delayed_work(__work),
2557 struct intel_dp, panel_vdd_work);
2558 intel_wakeref_t wakeref;
bd943159 2559
69d93820
CW
2560 with_pps_lock(intel_dp, wakeref) {
2561 if (!intel_dp->want_panel_vdd)
2562 edp_panel_vdd_off_sync(intel_dp);
2563 }
bd943159
KP
2564}
2565
aba86890
ID
2566static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2567{
2568 unsigned long delay;
2569
2570 /*
2571 * Queue the timer to fire a long time from now (relative to the power
2572 * down delay) to keep the panel power up across a sequence of
2573 * operations.
2574 */
2575 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2576 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2577}
2578
951468f3
VS
2579/*
2580 * Must be paired with edp_panel_vdd_on().
2581 * Must hold pps_mutex around the whole on/off sequence.
2582 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2583 */
4be73780 2584static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 2585{
de25eb7f 2586 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
e39b999a
VS
2587
2588 lockdep_assert_held(&dev_priv->pps_mutex);
2589
1853a9da 2590 if (!intel_dp_is_edp(intel_dp))
97af61f5 2591 return;
5d613501 2592
e2c719b7 2593 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
8f4f2797 2594 port_name(dp_to_dig_port(intel_dp)->base.port));
f2e8b18a 2595
bd943159
KP
2596 intel_dp->want_panel_vdd = false;
2597
aba86890 2598 if (sync)
4be73780 2599 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
2600 else
2601 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
2602}
2603
9f0fb5be 2604static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 2605{
de25eb7f 2606 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
99ea7127 2607 u32 pp;
f0f59a00 2608 i915_reg_t pp_ctrl_reg;
9934c132 2609
9f0fb5be
VS
2610 lockdep_assert_held(&dev_priv->pps_mutex);
2611
1853a9da 2612 if (!intel_dp_is_edp(intel_dp))
bd943159 2613 return;
99ea7127 2614
3936fcf4 2615 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
8f4f2797 2616 port_name(dp_to_dig_port(intel_dp)->base.port));
e39b999a 2617
e7a89ace
VS
2618 if (WARN(edp_have_panel_power(intel_dp),
2619 "eDP port %c panel power already on\n",
8f4f2797 2620 port_name(dp_to_dig_port(intel_dp)->base.port)))
9f0fb5be 2621 return;
9934c132 2622
4be73780 2623 wait_panel_power_cycle(intel_dp);
37c6c9b0 2624
bf13e81b 2625 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2626 pp = ironlake_get_pp_control(intel_dp);
cf819eff 2627 if (IS_GEN(dev_priv, 5)) {
05ce1a49
KP
2628 /* ILK workaround: disable reset around power sequence */
2629 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
2630 I915_WRITE(pp_ctrl_reg, pp);
2631 POSTING_READ(pp_ctrl_reg);
05ce1a49 2632 }
37c6c9b0 2633
5a162e22 2634 pp |= PANEL_POWER_ON;
cf819eff 2635 if (!IS_GEN(dev_priv, 5))
99ea7127
KP
2636 pp |= PANEL_POWER_RESET;
2637
453c5420
JB
2638 I915_WRITE(pp_ctrl_reg, pp);
2639 POSTING_READ(pp_ctrl_reg);
9934c132 2640
4be73780 2641 wait_panel_on(intel_dp);
dce56b3c 2642 intel_dp->last_power_on = jiffies;
9934c132 2643
cf819eff 2644 if (IS_GEN(dev_priv, 5)) {
05ce1a49 2645 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
2646 I915_WRITE(pp_ctrl_reg, pp);
2647 POSTING_READ(pp_ctrl_reg);
05ce1a49 2648 }
9f0fb5be 2649}
e39b999a 2650
9f0fb5be
VS
2651void intel_edp_panel_on(struct intel_dp *intel_dp)
2652{
69d93820
CW
2653 intel_wakeref_t wakeref;
2654
1853a9da 2655 if (!intel_dp_is_edp(intel_dp))
9f0fb5be
VS
2656 return;
2657
69d93820
CW
2658 with_pps_lock(intel_dp, wakeref)
2659 edp_panel_on(intel_dp);
9934c132
JB
2660}
2661
9f0fb5be
VS
2662
2663static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2664{
de25eb7f 2665 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
337837ac 2666 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
99ea7127 2667 u32 pp;
f0f59a00 2668 i915_reg_t pp_ctrl_reg;
9934c132 2669
9f0fb5be
VS
2670 lockdep_assert_held(&dev_priv->pps_mutex);
2671
1853a9da 2672 if (!intel_dp_is_edp(intel_dp))
97af61f5 2673 return;
37c6c9b0 2674
3936fcf4 2675 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
337837ac 2676 port_name(dig_port->base.port));
37c6c9b0 2677
3936fcf4 2678 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
337837ac 2679 port_name(dig_port->base.port));
24f3e092 2680
453c5420 2681 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2682 /* We need to switch off panel power _and_ force vdd, for otherwise some
2683 * panels get very unhappy and cease to work. */
5a162e22 2684 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
b3064154 2685 EDP_BLC_ENABLE);
453c5420 2686
bf13e81b 2687 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2688
849e39f5
PZ
2689 intel_dp->want_panel_vdd = false;
2690
453c5420
JB
2691 I915_WRITE(pp_ctrl_reg, pp);
2692 POSTING_READ(pp_ctrl_reg);
9934c132 2693
4be73780 2694 wait_panel_off(intel_dp);
d7ba25bd 2695 intel_dp->panel_power_off_time = ktime_get_boottime();
849e39f5
PZ
2696
2697 /* We got a reference when we enabled the VDD. */
0e6e0be4 2698 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
9f0fb5be 2699}
e39b999a 2700
9f0fb5be
VS
2701void intel_edp_panel_off(struct intel_dp *intel_dp)
2702{
69d93820
CW
2703 intel_wakeref_t wakeref;
2704
1853a9da 2705 if (!intel_dp_is_edp(intel_dp))
9f0fb5be 2706 return;
e39b999a 2707
69d93820
CW
2708 with_pps_lock(intel_dp, wakeref)
2709 edp_panel_off(intel_dp);
9934c132
JB
2710}
2711
1250d107
JN
2712/* Enable backlight in the panel power control. */
2713static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2714{
de25eb7f 2715 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 2716 intel_wakeref_t wakeref;
32f9d658 2717
01cb9ea6
JB
2718 /*
2719 * If we enable the backlight right away following a panel power
2720 * on, we may see slight flicker as the panel syncs with the eDP
2721 * link. So delay a bit to make sure the image is solid before
2722 * allowing it to appear.
2723 */
4be73780 2724 wait_backlight_on(intel_dp);
e39b999a 2725
69d93820
CW
2726 with_pps_lock(intel_dp, wakeref) {
2727 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2728 u32 pp;
453c5420 2729
69d93820
CW
2730 pp = ironlake_get_pp_control(intel_dp);
2731 pp |= EDP_BLC_ENABLE;
453c5420 2732
69d93820
CW
2733 I915_WRITE(pp_ctrl_reg, pp);
2734 POSTING_READ(pp_ctrl_reg);
2735 }
32f9d658
ZW
2736}
2737
1250d107 2738/* Enable backlight PWM and backlight PP control. */
b037d58f
ML
2739void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2740 const struct drm_connector_state *conn_state)
1250d107 2741{
b037d58f
ML
2742 struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2743
1853a9da 2744 if (!intel_dp_is_edp(intel_dp))
1250d107
JN
2745 return;
2746
2747 DRM_DEBUG_KMS("\n");
2748
b037d58f 2749 intel_panel_enable_backlight(crtc_state, conn_state);
1250d107
JN
2750 _intel_edp_backlight_on(intel_dp);
2751}
2752
2753/* Disable backlight in the panel power control. */
2754static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2755{
de25eb7f 2756 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 2757 intel_wakeref_t wakeref;
32f9d658 2758
1853a9da 2759 if (!intel_dp_is_edp(intel_dp))
f01eca2e
KP
2760 return;
2761
69d93820
CW
2762 with_pps_lock(intel_dp, wakeref) {
2763 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2764 u32 pp;
e39b999a 2765
69d93820
CW
2766 pp = ironlake_get_pp_control(intel_dp);
2767 pp &= ~EDP_BLC_ENABLE;
453c5420 2768
69d93820
CW
2769 I915_WRITE(pp_ctrl_reg, pp);
2770 POSTING_READ(pp_ctrl_reg);
2771 }
e39b999a
VS
2772
2773 intel_dp->last_backlight_off = jiffies;
f7d2323c 2774 edp_wait_backlight_off(intel_dp);
1250d107 2775}
f7d2323c 2776
1250d107 2777/* Disable backlight PP control and backlight PWM. */
b037d58f 2778void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
1250d107 2779{
b037d58f
ML
2780 struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2781
1853a9da 2782 if (!intel_dp_is_edp(intel_dp))
1250d107
JN
2783 return;
2784
2785 DRM_DEBUG_KMS("\n");
f7d2323c 2786
1250d107 2787 _intel_edp_backlight_off(intel_dp);
b037d58f 2788 intel_panel_disable_backlight(old_conn_state);
32f9d658 2789}
a4fc5ed6 2790
73580fb7
JN
2791/*
2792 * Hook for controlling the panel power control backlight through the bl_power
2793 * sysfs attribute. Take care to handle multiple calls.
2794 */
2795static void intel_edp_backlight_power(struct intel_connector *connector,
2796 bool enable)
2797{
2798 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
69d93820 2799 intel_wakeref_t wakeref;
e39b999a
VS
2800 bool is_enabled;
2801
69d93820
CW
2802 is_enabled = false;
2803 with_pps_lock(intel_dp, wakeref)
2804 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
73580fb7
JN
2805 if (is_enabled == enable)
2806 return;
2807
23ba9373
JN
2808 DRM_DEBUG_KMS("panel power control backlight %s\n",
2809 enable ? "enable" : "disable");
73580fb7
JN
2810
2811 if (enable)
2812 _intel_edp_backlight_on(intel_dp);
2813 else
2814 _intel_edp_backlight_off(intel_dp);
2815}
2816
64e1077a
VS
2817static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2818{
2819 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2820 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2821 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2822
2823 I915_STATE_WARN(cur_state != state,
2824 "DP port %c state assertion failure (expected %s, current %s)\n",
8f4f2797 2825 port_name(dig_port->base.port),
87ad3212 2826 onoff(state), onoff(cur_state));
64e1077a
VS
2827}
2828#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2829
2830static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2831{
2832 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2833
2834 I915_STATE_WARN(cur_state != state,
2835 "eDP PLL state assertion failure (expected %s, current %s)\n",
87ad3212 2836 onoff(state), onoff(cur_state));
64e1077a
VS
2837}
2838#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2839#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2840
85cb48a1 2841static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
5f88a9c6 2842 const struct intel_crtc_state *pipe_config)
d240f20f 2843{
85cb48a1 2844 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
64e1077a 2845 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2846
64e1077a
VS
2847 assert_pipe_disabled(dev_priv, crtc->pipe);
2848 assert_dp_port_disabled(intel_dp);
2849 assert_edp_pll_disabled(dev_priv);
2bd2ad64 2850
abfce949 2851 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
85cb48a1 2852 pipe_config->port_clock);
abfce949
VS
2853
2854 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2855
85cb48a1 2856 if (pipe_config->port_clock == 162000)
abfce949
VS
2857 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2858 else
2859 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2860
2861 I915_WRITE(DP_A, intel_dp->DP);
2862 POSTING_READ(DP_A);
2863 udelay(500);
2864
6b23f3e8
VS
2865 /*
2866 * [DevILK] Work around required when enabling DP PLL
2867 * while a pipe is enabled going to FDI:
2868 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2869 * 2. Program DP PLL enable
2870 */
cf819eff 2871 if (IS_GEN(dev_priv, 5))
0f0f74bc 2872 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
6b23f3e8 2873
0767935e 2874 intel_dp->DP |= DP_PLL_ENABLE;
6fec7662 2875
0767935e 2876 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2877 POSTING_READ(DP_A);
2878 udelay(200);
d240f20f
JB
2879}
2880
adc10304
VS
2881static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2882 const struct intel_crtc_state *old_crtc_state)
d240f20f 2883{
adc10304 2884 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
64e1077a 2885 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2886
64e1077a
VS
2887 assert_pipe_disabled(dev_priv, crtc->pipe);
2888 assert_dp_port_disabled(intel_dp);
2889 assert_edp_pll_enabled(dev_priv);
2bd2ad64 2890
abfce949
VS
2891 DRM_DEBUG_KMS("disabling eDP PLL\n");
2892
6fec7662 2893 intel_dp->DP &= ~DP_PLL_ENABLE;
0767935e 2894
6fec7662 2895 I915_WRITE(DP_A, intel_dp->DP);
1af5fa1b 2896 POSTING_READ(DP_A);
d240f20f
JB
2897 udelay(200);
2898}
2899
857c416e
VS
2900static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2901{
2902 /*
2903 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2904 * be capable of signalling downstream hpd with a long pulse.
2905 * Whether or not that means D3 is safe to use is not clear,
2906 * but let's assume so until proven otherwise.
2907 *
2908 * FIXME should really check all downstream ports...
2909 */
2910 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2911 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
2912 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2913}
2914
2279298d
GS
2915void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
2916 const struct intel_crtc_state *crtc_state,
2917 bool enable)
2918{
2919 int ret;
2920
2921 if (!crtc_state->dsc_params.compression_enable)
2922 return;
2923
2924 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
2925 enable ? DP_DECOMPRESSION_EN : 0);
2926 if (ret < 0)
2927 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
2928 enable ? "enable" : "disable");
2929}
2930
c7ad3810 2931/* If the sink supports it, try to set the power state appropriately */
c19b0669 2932void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2933{
2934 int ret, i;
2935
2936 /* Should have a valid DPCD by this point */
2937 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2938 return;
2939
2940 if (mode != DRM_MODE_DPMS_ON) {
857c416e
VS
2941 if (downstream_hpd_needs_d0(intel_dp))
2942 return;
2943
9d1a1031
JN
2944 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2945 DP_SET_POWER_D3);
c7ad3810 2946 } else {
357c0ae9
ID
2947 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2948
c7ad3810
JB
2949 /*
2950 * When turning on, we need to retry for 1ms to give the sink
2951 * time to wake up.
2952 */
2953 for (i = 0; i < 3; i++) {
9d1a1031
JN
2954 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2955 DP_SET_POWER_D0);
c7ad3810
JB
2956 if (ret == 1)
2957 break;
2958 msleep(1);
2959 }
357c0ae9
ID
2960
2961 if (ret == 1 && lspcon->active)
2962 lspcon_wait_pcon_mode(lspcon);
c7ad3810 2963 }
f9cac721
JN
2964
2965 if (ret != 1)
2966 DRM_DEBUG_KMS("failed to %s sink power state\n",
2967 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2968}
2969
59b74c49
VS
2970static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
2971 enum port port, enum pipe *pipe)
2972{
2973 enum pipe p;
2974
2975 for_each_pipe(dev_priv, p) {
2976 u32 val = I915_READ(TRANS_DP_CTL(p));
2977
2978 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
2979 *pipe = p;
2980 return true;
2981 }
2982 }
2983
2984 DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
2985
2986 /* must initialize pipe to something for the asserts */
2987 *pipe = PIPE_A;
2988
2989 return false;
2990}
2991
2992bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
2993 i915_reg_t dp_reg, enum port port,
2994 enum pipe *pipe)
2995{
2996 bool ret;
2997 u32 val;
2998
2999 val = I915_READ(dp_reg);
3000
3001 ret = val & DP_PORT_EN;
3002
3003 /* asserts want to know the pipe even if the port is disabled */
3004 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3005 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3006 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3007 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3008 else if (IS_CHERRYVIEW(dev_priv))
3009 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3010 else
3011 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3012
3013 return ret;
3014}
3015
19d8fe15
DV
3016static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3017 enum pipe *pipe)
d240f20f 3018{
2f773477 3019 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
19d8fe15 3020 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
0e6e0be4 3021 intel_wakeref_t wakeref;
6fa9a5ec 3022 bool ret;
6d129bea 3023
0e6e0be4
CW
3024 wakeref = intel_display_power_get_if_enabled(dev_priv,
3025 encoder->power_domain);
3026 if (!wakeref)
6d129bea
ID
3027 return false;
3028
59b74c49
VS
3029 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3030 encoder->port, pipe);
6fa9a5ec 3031
0e6e0be4 3032 intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
6fa9a5ec
ID
3033
3034 return ret;
19d8fe15 3035}
d240f20f 3036
045ac3b5 3037static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 3038 struct intel_crtc_state *pipe_config)
045ac3b5 3039{
2f773477 3040 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
045ac3b5 3041 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 3042 u32 tmp, flags = 0;
8f4f2797 3043 enum port port = encoder->port;
adc10304 3044 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
045ac3b5 3045
e1214b95
VS
3046 if (encoder->type == INTEL_OUTPUT_EDP)
3047 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3048 else
3049 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
045ac3b5 3050
9ed109a7 3051 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
3052
3053 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 3054
6e266956 3055 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
b81e34c2
VS
3056 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3057
3058 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
3059 flags |= DRM_MODE_FLAG_PHSYNC;
3060 else
3061 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 3062
b81e34c2 3063 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
3064 flags |= DRM_MODE_FLAG_PVSYNC;
3065 else
3066 flags |= DRM_MODE_FLAG_NVSYNC;
3067 } else {
39e5fa88 3068 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
3069 flags |= DRM_MODE_FLAG_PHSYNC;
3070 else
3071 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 3072
39e5fa88 3073 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
3074 flags |= DRM_MODE_FLAG_PVSYNC;
3075 else
3076 flags |= DRM_MODE_FLAG_NVSYNC;
3077 }
045ac3b5 3078
2d112de7 3079 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 3080
c99f53f7 3081 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
8c875fca
VS
3082 pipe_config->limited_color_range = true;
3083
90a6b7b0
VS
3084 pipe_config->lane_count =
3085 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3086
eb14cb74
VS
3087 intel_dp_get_m_n(crtc, pipe_config);
3088
18442d08 3089 if (port == PORT_A) {
b377e0df 3090 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
f1f644dc
JB
3091 pipe_config->port_clock = 162000;
3092 else
3093 pipe_config->port_clock = 270000;
3094 }
18442d08 3095
e3b247da
VS
3096 pipe_config->base.adjusted_mode.crtc_clock =
3097 intel_dotclock_calculate(pipe_config->port_clock,
3098 &pipe_config->dp_m_n);
7f16e5c1 3099
1853a9da 3100 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
6aa23e65 3101 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
c6cd2ee2
JN
3102 /*
3103 * This is a big fat ugly hack.
3104 *
3105 * Some machines in UEFI boot mode provide us a VBT that has 18
3106 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3107 * unknown we fail to light up. Yet the same BIOS boots up with
3108 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3109 * max, not what it tells us to use.
3110 *
3111 * Note: This will still be broken if the eDP panel is not lit
3112 * up by the BIOS, and thus we can't get the mode at module
3113 * load.
3114 */
3115 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
6aa23e65
JN
3116 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3117 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
c6cd2ee2 3118 }
045ac3b5
JB
3119}
3120
fd6bbda9 3121static void intel_disable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3122 const struct intel_crtc_state *old_crtc_state,
3123 const struct drm_connector_state *old_conn_state)
d240f20f 3124{
e8cb4558 3125 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
495a5bb8 3126
edb2e530
VS
3127 intel_dp->link_trained = false;
3128
85cb48a1 3129 if (old_crtc_state->has_audio)
8ec47de2
VS
3130 intel_audio_codec_disable(encoder,
3131 old_crtc_state, old_conn_state);
6cb49835
DV
3132
3133 /* Make sure the panel is off before trying to change the mode. But also
3134 * ensure that we have vdd while we switch off the panel. */
24f3e092 3135 intel_edp_panel_vdd_on(intel_dp);
b037d58f 3136 intel_edp_backlight_off(old_conn_state);
fdbc3b1f 3137 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 3138 intel_edp_panel_off(intel_dp);
1a8ff607
VS
3139}
3140
3141static void g4x_disable_dp(struct intel_encoder *encoder,
3142 const struct intel_crtc_state *old_crtc_state,
3143 const struct drm_connector_state *old_conn_state)
1a8ff607
VS
3144{
3145 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3146}
3147
3148static void vlv_disable_dp(struct intel_encoder *encoder,
3149 const struct intel_crtc_state *old_crtc_state,
3150 const struct drm_connector_state *old_conn_state)
3151{
1a8ff607 3152 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
d240f20f
JB
3153}
3154
51a9f6df 3155static void g4x_post_disable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3156 const struct intel_crtc_state *old_crtc_state,
3157 const struct drm_connector_state *old_conn_state)
d240f20f 3158{
2bd2ad64 3159 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
adc10304 3160 enum port port = encoder->port;
2bd2ad64 3161
51a9f6df
VS
3162 /*
3163 * Bspec does not list a specific disable sequence for g4x DP.
3164 * Follow the ilk+ sequence (disable pipe before the port) for
3165 * g4x DP as it does not suffer from underruns like the normal
3166 * g4x modeset sequence (disable pipe after the port).
3167 */
adc10304 3168 intel_dp_link_down(encoder, old_crtc_state);
abfce949
VS
3169
3170 /* Only ilk+ has port A */
08aff3fe 3171 if (port == PORT_A)
adc10304 3172 ironlake_edp_pll_off(intel_dp, old_crtc_state);
49277c31
VS
3173}
3174
fd6bbda9 3175static void vlv_post_disable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3176 const struct intel_crtc_state *old_crtc_state,
3177 const struct drm_connector_state *old_conn_state)
49277c31 3178{
adc10304 3179 intel_dp_link_down(encoder, old_crtc_state);
2bd2ad64
DV
3180}
3181
fd6bbda9 3182static void chv_post_disable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3183 const struct intel_crtc_state *old_crtc_state,
3184 const struct drm_connector_state *old_conn_state)
a8f327fb 3185{
adc10304 3186 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
97fd4d5c 3187
adc10304 3188 intel_dp_link_down(encoder, old_crtc_state);
a8f327fb
VS
3189
3190 mutex_lock(&dev_priv->sb_lock);
3191
3192 /* Assert data lane reset */
2e1029c6 3193 chv_data_lane_soft_reset(encoder, old_crtc_state, true);
580d3811 3194
a580516d 3195 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
3196}
3197
7b13b58a
VS
3198static void
3199_intel_dp_set_link_train(struct intel_dp *intel_dp,
830de422
JN
3200 u32 *DP,
3201 u8 dp_train_pat)
7b13b58a 3202{
de25eb7f 3203 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7b13b58a 3204 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8f4f2797 3205 enum port port = intel_dig_port->base.port;
830de422 3206 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
7b13b58a 3207
2edd5327 3208 if (dp_train_pat & train_pat_mask)
8b0878a0 3209 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
2edd5327 3210 dp_train_pat & train_pat_mask);
8b0878a0 3211
4f8036a2 3212 if (HAS_DDI(dev_priv)) {
830de422 3213 u32 temp = I915_READ(DP_TP_CTL(port));
7b13b58a
VS
3214
3215 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3216 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3217 else
3218 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3219
3220 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2edd5327 3221 switch (dp_train_pat & train_pat_mask) {
7b13b58a
VS
3222 case DP_TRAINING_PATTERN_DISABLE:
3223 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3224
3225 break;
3226 case DP_TRAINING_PATTERN_1:
3227 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3228 break;
3229 case DP_TRAINING_PATTERN_2:
3230 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3231 break;
3232 case DP_TRAINING_PATTERN_3:
3233 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3234 break;
2edd5327
MN
3235 case DP_TRAINING_PATTERN_4:
3236 temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3237 break;
7b13b58a
VS
3238 }
3239 I915_WRITE(DP_TP_CTL(port), temp);
3240
b752e995 3241 } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
6e266956 3242 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
7b13b58a
VS
3243 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3244
3245 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3246 case DP_TRAINING_PATTERN_DISABLE:
3247 *DP |= DP_LINK_TRAIN_OFF_CPT;
3248 break;
3249 case DP_TRAINING_PATTERN_1:
3250 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3251 break;
3252 case DP_TRAINING_PATTERN_2:
3253 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3254 break;
3255 case DP_TRAINING_PATTERN_3:
8b0878a0 3256 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
7b13b58a
VS
3257 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3258 break;
3259 }
3260
3261 } else {
3b358cda 3262 *DP &= ~DP_LINK_TRAIN_MASK;
7b13b58a
VS
3263
3264 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3265 case DP_TRAINING_PATTERN_DISABLE:
3266 *DP |= DP_LINK_TRAIN_OFF;
3267 break;
3268 case DP_TRAINING_PATTERN_1:
3269 *DP |= DP_LINK_TRAIN_PAT_1;
3270 break;
3271 case DP_TRAINING_PATTERN_2:
3272 *DP |= DP_LINK_TRAIN_PAT_2;
3273 break;
3274 case DP_TRAINING_PATTERN_3:
3b358cda
VS
3275 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3276 *DP |= DP_LINK_TRAIN_PAT_2;
7b13b58a
VS
3277 break;
3278 }
3279 }
3280}
3281
85cb48a1 3282static void intel_dp_enable_port(struct intel_dp *intel_dp,
5f88a9c6 3283 const struct intel_crtc_state *old_crtc_state)
7b13b58a 3284{
de25eb7f 3285 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7b13b58a 3286
7b13b58a 3287 /* enable with pattern 1 (as per spec) */
7b13b58a 3288
8b0878a0 3289 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
7b713f50
VS
3290
3291 /*
3292 * Magic for VLV/CHV. We _must_ first set up the register
3293 * without actually enabling the port, and then do another
3294 * write to enable the port. Otherwise link training will
3295 * fail when the power sequencer is freshly used for this port.
3296 */
3297 intel_dp->DP |= DP_PORT_EN;
85cb48a1 3298 if (old_crtc_state->has_audio)
6fec7662 3299 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
7b713f50
VS
3300
3301 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3302 POSTING_READ(intel_dp->output_reg);
580d3811
VS
3303}
3304
85cb48a1 3305static void intel_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3306 const struct intel_crtc_state *pipe_config,
3307 const struct drm_connector_state *conn_state)
d240f20f 3308{
2f773477 3309 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
e8cb4558 3310 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
adc10304 3311 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
830de422 3312 u32 dp_reg = I915_READ(intel_dp->output_reg);
d6fbdd15 3313 enum pipe pipe = crtc->pipe;
69d93820 3314 intel_wakeref_t wakeref;
5d613501 3315
0c33d8d7
DV
3316 if (WARN_ON(dp_reg & DP_PORT_EN))
3317 return;
5d613501 3318
69d93820
CW
3319 with_pps_lock(intel_dp, wakeref) {
3320 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3321 vlv_init_panel_power_sequencer(encoder, pipe_config);
093e3f13 3322
69d93820 3323 intel_dp_enable_port(intel_dp, pipe_config);
093e3f13 3324
69d93820
CW
3325 edp_panel_vdd_on(intel_dp);
3326 edp_panel_on(intel_dp);
3327 edp_panel_vdd_off(intel_dp, true);
3328 }
093e3f13 3329
920a14b2 3330 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
e0fce78f
VS
3331 unsigned int lane_mask = 0x0;
3332
920a14b2 3333 if (IS_CHERRYVIEW(dev_priv))
85cb48a1 3334 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
e0fce78f 3335
9b6de0a1
VS
3336 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3337 lane_mask);
e0fce78f 3338 }
61234fa5 3339
f01eca2e 3340 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 3341 intel_dp_start_link_train(intel_dp);
3ab9c637 3342 intel_dp_stop_link_train(intel_dp);
c1dec79a 3343
85cb48a1 3344 if (pipe_config->has_audio) {
c1dec79a 3345 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
d6fbdd15 3346 pipe_name(pipe));
bbf35e9d 3347 intel_audio_codec_enable(encoder, pipe_config, conn_state);
c1dec79a 3348 }
ab1f90f9 3349}
89b667f8 3350
fd6bbda9 3351static void g4x_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3352 const struct intel_crtc_state *pipe_config,
3353 const struct drm_connector_state *conn_state)
ecff4f3b 3354{
bbf35e9d 3355 intel_enable_dp(encoder, pipe_config, conn_state);
b037d58f 3356 intel_edp_backlight_on(pipe_config, conn_state);
ab1f90f9 3357}
89b667f8 3358
fd6bbda9 3359static void vlv_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3360 const struct intel_crtc_state *pipe_config,
3361 const struct drm_connector_state *conn_state)
ab1f90f9 3362{
b037d58f 3363 intel_edp_backlight_on(pipe_config, conn_state);
d240f20f
JB
3364}
3365
fd6bbda9 3366static void g4x_pre_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3367 const struct intel_crtc_state *pipe_config,
3368 const struct drm_connector_state *conn_state)
ab1f90f9
JN
3369{
3370 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
8f4f2797 3371 enum port port = encoder->port;
ab1f90f9 3372
85cb48a1 3373 intel_dp_prepare(encoder, pipe_config);
8ac33ed3 3374
d41f1efb 3375 /* Only ilk+ has port A */
abfce949 3376 if (port == PORT_A)
85cb48a1 3377 ironlake_edp_pll_on(intel_dp, pipe_config);
ab1f90f9
JN
3378}
3379
83b84597
VS
3380static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3381{
3382 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
fac5e23e 3383 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
83b84597 3384 enum pipe pipe = intel_dp->pps_pipe;
44cb734c 3385 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
83b84597 3386
9f2bdb00
VS
3387 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3388
d158694f
VS
3389 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3390 return;
3391
83b84597
VS
3392 edp_panel_vdd_off_sync(intel_dp);
3393
3394 /*
e7f2af78 3395 * VLV seems to get confused when multiple power sequencers
83b84597
VS
3396 * have the same port selected (even if only one has power/vdd
3397 * enabled). The failure manifests as vlv_wait_port_ready() failing
3398 * CHV on the other hand doesn't seem to mind having the same port
e7f2af78 3399 * selected in multiple power sequencers, but let's clear the
83b84597
VS
3400 * port select always when logically disconnecting a power sequencer
3401 * from a port.
3402 */
3403 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
8f4f2797 3404 pipe_name(pipe), port_name(intel_dig_port->base.port));
83b84597
VS
3405 I915_WRITE(pp_on_reg, 0);
3406 POSTING_READ(pp_on_reg);
3407
3408 intel_dp->pps_pipe = INVALID_PIPE;
3409}
3410
46bd8383 3411static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
a4a5d2f8
VS
3412 enum pipe pipe)
3413{
a4a5d2f8
VS
3414 struct intel_encoder *encoder;
3415
3416 lockdep_assert_held(&dev_priv->pps_mutex);
3417
14aa521c
VS
3418 for_each_intel_dp(&dev_priv->drm, encoder) {
3419 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3420 enum port port = encoder->port;
a4a5d2f8 3421
9f2bdb00
VS
3422 WARN(intel_dp->active_pipe == pipe,
3423 "stealing pipe %c power sequencer from active (e)DP port %c\n",
3424 pipe_name(pipe), port_name(port));
3425
a4a5d2f8
VS
3426 if (intel_dp->pps_pipe != pipe)
3427 continue;
3428
3429 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 3430 pipe_name(pipe), port_name(port));
a4a5d2f8
VS
3431
3432 /* make sure vdd is off before we steal it */
83b84597 3433 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
3434 }
3435}
3436
adc10304
VS
3437static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3438 const struct intel_crtc_state *crtc_state)
a4a5d2f8 3439{
46bd8383 3440 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
adc10304 3441 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
adc10304 3442 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
a4a5d2f8
VS
3443
3444 lockdep_assert_held(&dev_priv->pps_mutex);
3445
9f2bdb00 3446 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
093e3f13 3447
9f2bdb00
VS
3448 if (intel_dp->pps_pipe != INVALID_PIPE &&
3449 intel_dp->pps_pipe != crtc->pipe) {
3450 /*
3451 * If another power sequencer was being used on this
3452 * port previously make sure to turn off vdd there while
3453 * we still have control of it.
3454 */
83b84597 3455 vlv_detach_power_sequencer(intel_dp);
9f2bdb00 3456 }
a4a5d2f8
VS
3457
3458 /*
3459 * We may be stealing the power
3460 * sequencer from another port.
3461 */
46bd8383 3462 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
a4a5d2f8 3463
9f2bdb00
VS
3464 intel_dp->active_pipe = crtc->pipe;
3465
1853a9da 3466 if (!intel_dp_is_edp(intel_dp))
9f2bdb00
VS
3467 return;
3468
a4a5d2f8
VS
3469 /* now it's all ours */
3470 intel_dp->pps_pipe = crtc->pipe;
3471
3472 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
adc10304 3473 pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
a4a5d2f8
VS
3474
3475 /* init power sequencer on this pipe and port */
46bd8383
VS
3476 intel_dp_init_panel_power_sequencer(intel_dp);
3477 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
a4a5d2f8
VS
3478}
3479
fd6bbda9 3480static void vlv_pre_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3481 const struct intel_crtc_state *pipe_config,
3482 const struct drm_connector_state *conn_state)
a4fc5ed6 3483{
2e1029c6 3484 vlv_phy_pre_encoder_enable(encoder, pipe_config);
ab1f90f9 3485
bbf35e9d 3486 intel_enable_dp(encoder, pipe_config, conn_state);
89b667f8
JB
3487}
3488
fd6bbda9 3489static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
5f88a9c6
VS
3490 const struct intel_crtc_state *pipe_config,
3491 const struct drm_connector_state *conn_state)
89b667f8 3492{
85cb48a1 3493 intel_dp_prepare(encoder, pipe_config);
8ac33ed3 3494
2e1029c6 3495 vlv_phy_pre_pll_enable(encoder, pipe_config);
a4fc5ed6
KP
3496}
3497
fd6bbda9 3498static void chv_pre_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3499 const struct intel_crtc_state *pipe_config,
3500 const struct drm_connector_state *conn_state)
e4a1d846 3501{
2e1029c6 3502 chv_phy_pre_encoder_enable(encoder, pipe_config);
e4a1d846 3503
bbf35e9d 3504 intel_enable_dp(encoder, pipe_config, conn_state);
b0b33846
VS
3505
3506 /* Second common lane will stay alive on its own now */
e7d2a717 3507 chv_phy_release_cl2_override(encoder);
e4a1d846
CML
3508}
3509
fd6bbda9 3510static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
5f88a9c6
VS
3511 const struct intel_crtc_state *pipe_config,
3512 const struct drm_connector_state *conn_state)
9197c88b 3513{
85cb48a1 3514 intel_dp_prepare(encoder, pipe_config);
625695f8 3515
2e1029c6 3516 chv_phy_pre_pll_enable(encoder, pipe_config);
9197c88b
VS
3517}
3518
fd6bbda9 3519static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
2e1029c6
VS
3520 const struct intel_crtc_state *old_crtc_state,
3521 const struct drm_connector_state *old_conn_state)
d6db995f 3522{
2e1029c6 3523 chv_phy_post_pll_disable(encoder, old_crtc_state);
d6db995f
VS
3524}
3525
a4fc5ed6
KP
3526/*
3527 * Fetch AUX CH registers 0x202 - 0x207 which contain
3528 * link status information
3529 */
94223d04 3530bool
830de422 3531intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3532{
9f085ebb
L
3533 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3534 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3535}
3536
1100244e 3537/* These are source-specific values. */
830de422 3538u8
1a2eb460 3539intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3540{
de25eb7f 3541 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
a393e964
VS
3542 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3543 enum port port = encoder->port;
1a2eb460 3544
a393e964 3545 if (HAS_DDI(dev_priv))
ffe5111e 3546 return intel_ddi_dp_voltage_max(encoder);
a393e964 3547 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
bd60018a 3548 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
b752e995 3549 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
bd60018a 3550 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
6e266956 3551 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
bd60018a 3552 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3553 else
bd60018a 3554 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3555}
3556
830de422
JN
3557u8
3558intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
1a2eb460 3559{
de25eb7f 3560 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4718a365
VS
3561 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3562 enum port port = encoder->port;
1a2eb460 3563
4718a365
VS
3564 if (HAS_DDI(dev_priv)) {
3565 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
8652744b 3566 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
e2fa6fba 3567 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3568 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3569 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3570 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3571 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3572 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3573 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3574 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3575 default:
bd60018a 3576 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3577 }
b752e995 3578 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
1a2eb460 3579 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3580 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3581 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3582 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3583 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3584 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3585 default:
bd60018a 3586 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3587 }
3588 } else {
3589 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3590 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3591 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3592 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3593 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3594 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3595 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3596 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3597 default:
bd60018a 3598 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3599 }
a4fc5ed6
KP
3600 }
3601}
3602
830de422 3603static u32 vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba 3604{
53d98725 3605 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
e2fa6fba
P
3606 unsigned long demph_reg_value, preemph_reg_value,
3607 uniqtranscale_reg_value;
830de422 3608 u8 train_set = intel_dp->train_set[0];
e2fa6fba
P
3609
3610 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3611 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3612 preemph_reg_value = 0x0004000;
3613 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3614 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3615 demph_reg_value = 0x2B405555;
3616 uniqtranscale_reg_value = 0x552AB83A;
3617 break;
bd60018a 3618 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3619 demph_reg_value = 0x2B404040;
3620 uniqtranscale_reg_value = 0x5548B83A;
3621 break;
bd60018a 3622 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3623 demph_reg_value = 0x2B245555;
3624 uniqtranscale_reg_value = 0x5560B83A;
3625 break;
bd60018a 3626 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3627 demph_reg_value = 0x2B405555;
3628 uniqtranscale_reg_value = 0x5598DA3A;
3629 break;
3630 default:
3631 return 0;
3632 }
3633 break;
bd60018a 3634 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3635 preemph_reg_value = 0x0002000;
3636 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3637 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3638 demph_reg_value = 0x2B404040;
3639 uniqtranscale_reg_value = 0x5552B83A;
3640 break;
bd60018a 3641 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3642 demph_reg_value = 0x2B404848;
3643 uniqtranscale_reg_value = 0x5580B83A;
3644 break;
bd60018a 3645 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3646 demph_reg_value = 0x2B404040;
3647 uniqtranscale_reg_value = 0x55ADDA3A;
3648 break;
3649 default:
3650 return 0;
3651 }
3652 break;
bd60018a 3653 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3654 preemph_reg_value = 0x0000000;
3655 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3656 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3657 demph_reg_value = 0x2B305555;
3658 uniqtranscale_reg_value = 0x5570B83A;
3659 break;
bd60018a 3660 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3661 demph_reg_value = 0x2B2B4040;
3662 uniqtranscale_reg_value = 0x55ADDA3A;
3663 break;
3664 default:
3665 return 0;
3666 }
3667 break;
bd60018a 3668 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3669 preemph_reg_value = 0x0006000;
3670 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3671 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3672 demph_reg_value = 0x1B405555;
3673 uniqtranscale_reg_value = 0x55ADDA3A;
3674 break;
3675 default:
3676 return 0;
3677 }
3678 break;
3679 default:
3680 return 0;
3681 }
3682
53d98725
ACO
3683 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3684 uniqtranscale_reg_value, 0);
e2fa6fba
P
3685
3686 return 0;
3687}
3688
830de422 3689static u32 chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846 3690{
b7fa22d8
ACO
3691 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3692 u32 deemph_reg_value, margin_reg_value;
3693 bool uniq_trans_scale = false;
830de422 3694 u8 train_set = intel_dp->train_set[0];
e4a1d846
CML
3695
3696 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3697 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3698 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3699 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3700 deemph_reg_value = 128;
3701 margin_reg_value = 52;
3702 break;
bd60018a 3703 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3704 deemph_reg_value = 128;
3705 margin_reg_value = 77;
3706 break;
bd60018a 3707 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3708 deemph_reg_value = 128;
3709 margin_reg_value = 102;
3710 break;
bd60018a 3711 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3712 deemph_reg_value = 128;
3713 margin_reg_value = 154;
b7fa22d8 3714 uniq_trans_scale = true;
e4a1d846
CML
3715 break;
3716 default:
3717 return 0;
3718 }
3719 break;
bd60018a 3720 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3721 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3722 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3723 deemph_reg_value = 85;
3724 margin_reg_value = 78;
3725 break;
bd60018a 3726 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3727 deemph_reg_value = 85;
3728 margin_reg_value = 116;
3729 break;
bd60018a 3730 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3731 deemph_reg_value = 85;
3732 margin_reg_value = 154;
3733 break;
3734 default:
3735 return 0;
3736 }
3737 break;
bd60018a 3738 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3739 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3740 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3741 deemph_reg_value = 64;
3742 margin_reg_value = 104;
3743 break;
bd60018a 3744 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3745 deemph_reg_value = 64;
3746 margin_reg_value = 154;
3747 break;
3748 default:
3749 return 0;
3750 }
3751 break;
bd60018a 3752 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3753 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3754 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3755 deemph_reg_value = 43;
3756 margin_reg_value = 154;
3757 break;
3758 default:
3759 return 0;
3760 }
3761 break;
3762 default:
3763 return 0;
3764 }
3765
b7fa22d8
ACO
3766 chv_set_phy_signal_level(encoder, deemph_reg_value,
3767 margin_reg_value, uniq_trans_scale);
e4a1d846
CML
3768
3769 return 0;
3770}
3771
830de422
JN
3772static u32
3773g4x_signal_levels(u8 train_set)
a4fc5ed6 3774{
830de422 3775 u32 signal_levels = 0;
a4fc5ed6 3776
3cf2efb1 3777 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3778 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3779 default:
3780 signal_levels |= DP_VOLTAGE_0_4;
3781 break;
bd60018a 3782 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3783 signal_levels |= DP_VOLTAGE_0_6;
3784 break;
bd60018a 3785 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3786 signal_levels |= DP_VOLTAGE_0_8;
3787 break;
bd60018a 3788 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3789 signal_levels |= DP_VOLTAGE_1_2;
3790 break;
3791 }
3cf2efb1 3792 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3793 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3794 default:
3795 signal_levels |= DP_PRE_EMPHASIS_0;
3796 break;
bd60018a 3797 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3798 signal_levels |= DP_PRE_EMPHASIS_3_5;
3799 break;
bd60018a 3800 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3801 signal_levels |= DP_PRE_EMPHASIS_6;
3802 break;
bd60018a 3803 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3804 signal_levels |= DP_PRE_EMPHASIS_9_5;
3805 break;
3806 }
3807 return signal_levels;
3808}
3809
4d82c2b5 3810/* SNB CPU eDP voltage swing and pre-emphasis control */
830de422
JN
3811static u32
3812snb_cpu_edp_signal_levels(u8 train_set)
e3421a18 3813{
3c5a62b5
YL
3814 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3815 DP_TRAIN_PRE_EMPHASIS_MASK);
3816 switch (signal_levels) {
bd60018a
SJ
3817 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3818 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3819 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3820 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3821 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3822 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3823 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3824 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3825 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3826 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3827 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3828 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3829 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3830 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3831 default:
3c5a62b5
YL
3832 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3833 "0x%x\n", signal_levels);
3834 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3835 }
3836}
3837
4d82c2b5 3838/* IVB CPU eDP voltage swing and pre-emphasis control */
830de422
JN
3839static u32
3840ivb_cpu_edp_signal_levels(u8 train_set)
1a2eb460
KP
3841{
3842 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3843 DP_TRAIN_PRE_EMPHASIS_MASK);
3844 switch (signal_levels) {
bd60018a 3845 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3846 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3847 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3848 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3849 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3850 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3851
bd60018a 3852 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3853 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3854 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3855 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3856
bd60018a 3857 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3858 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3859 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3860 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3861
3862 default:
3863 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3864 "0x%x\n", signal_levels);
3865 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3866 }
3867}
3868
94223d04 3869void
f4eb692e 3870intel_dp_set_signal_levels(struct intel_dp *intel_dp)
f0a3424e 3871{
de25eb7f 3872 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
f0a3424e 3873 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8f4f2797 3874 enum port port = intel_dig_port->base.port;
830de422
JN
3875 u32 signal_levels, mask = 0;
3876 u8 train_set = intel_dp->train_set[0];
f0a3424e 3877
61cdfb9e 3878 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
d509af6c
RV
3879 signal_levels = bxt_signal_levels(intel_dp);
3880 } else if (HAS_DDI(dev_priv)) {
f8896f5d 3881 signal_levels = ddi_signal_levels(intel_dp);
d509af6c 3882 mask = DDI_BUF_EMP_MASK;
920a14b2 3883 } else if (IS_CHERRYVIEW(dev_priv)) {
5829975c 3884 signal_levels = chv_signal_levels(intel_dp);
11a914c2 3885 } else if (IS_VALLEYVIEW(dev_priv)) {
5829975c 3886 signal_levels = vlv_signal_levels(intel_dp);
b752e995 3887 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
4d82c2b5 3888 signal_levels = ivb_cpu_edp_signal_levels(train_set);
f0a3424e 3889 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
cf819eff 3890 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
4d82c2b5 3891 signal_levels = snb_cpu_edp_signal_levels(train_set);
f0a3424e
PZ
3892 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3893 } else {
45101e93 3894 signal_levels = g4x_signal_levels(train_set);
f0a3424e
PZ
3895 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3896 }
3897
96fb9f9b
VK
3898 if (mask)
3899 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3900
3901 DRM_DEBUG_KMS("Using vswing level %d\n",
3902 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3903 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3904 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3905 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e 3906
f4eb692e 3907 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
b905a915
ACO
3908
3909 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3910 POSTING_READ(intel_dp->output_reg);
f0a3424e
PZ
3911}
3912
94223d04 3913void
e9c176d5 3914intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
830de422 3915 u8 dp_train_pat)
a4fc5ed6 3916{
174edf1f 3917 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3918 struct drm_i915_private *dev_priv =
3919 to_i915(intel_dig_port->base.base.dev);
a4fc5ed6 3920
f4eb692e 3921 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
47ea7542 3922
f4eb692e 3923 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
ea5b213a 3924 POSTING_READ(intel_dp->output_reg);
e9c176d5
ACO
3925}
3926
94223d04 3927void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3ab9c637 3928{
de25eb7f 3929 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3ab9c637 3930 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8f4f2797 3931 enum port port = intel_dig_port->base.port;
830de422 3932 u32 val;
3ab9c637 3933
4f8036a2 3934 if (!HAS_DDI(dev_priv))
3ab9c637
ID
3935 return;
3936
3937 val = I915_READ(DP_TP_CTL(port));
3938 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3939 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3940 I915_WRITE(DP_TP_CTL(port), val);
3941
3942 /*
3943 * On PORT_A we can have only eDP in SST mode. There the only reason
3944 * we need to set idle transmission mode is to work around a HW issue
3945 * where we enable the pipe while not in idle link-training mode.
3946 * In this case there is requirement to wait for a minimum number of
3947 * idle patterns to be sent.
3948 */
3949 if (port == PORT_A)
3950 return;
3951
97a04e0d 3952 if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
a767017f
CW
3953 DP_TP_STATUS_IDLE_DONE,
3954 DP_TP_STATUS_IDLE_DONE,
3955 1))
3ab9c637
ID
3956 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3957}
3958
a4fc5ed6 3959static void
adc10304
VS
3960intel_dp_link_down(struct intel_encoder *encoder,
3961 const struct intel_crtc_state *old_crtc_state)
a4fc5ed6 3962{
adc10304
VS
3963 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3964 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3965 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
3966 enum port port = encoder->port;
830de422 3967 u32 DP = intel_dp->DP;
a4fc5ed6 3968
4f8036a2 3969 if (WARN_ON(HAS_DDI(dev_priv)))
c19b0669
PZ
3970 return;
3971
0c33d8d7 3972 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3973 return;
3974
28c97730 3975 DRM_DEBUG_KMS("\n");
32f9d658 3976
b752e995 3977 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
6e266956 3978 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
e3421a18 3979 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3980 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3981 } else {
3b358cda 3982 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3983 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3984 }
1612c8bd 3985 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3986 POSTING_READ(intel_dp->output_reg);
5eb08b69 3987
1612c8bd
VS
3988 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3989 I915_WRITE(intel_dp->output_reg, DP);
3990 POSTING_READ(intel_dp->output_reg);
3991
3992 /*
3993 * HW workaround for IBX, we need to move the port
3994 * to transcoder A after disabling it to allow the
3995 * matching HDMI port to be enabled on transcoder A.
3996 */
6e266956 3997 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
0c241d5b
VS
3998 /*
3999 * We get CPU/PCH FIFO underruns on the other pipe when
4000 * doing the workaround. Sweep them under the rug.
4001 */
4002 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4003 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4004
1612c8bd 4005 /* always enable with pattern 1 (as per spec) */
59b74c49
VS
4006 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4007 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4008 DP_LINK_TRAIN_PAT_1;
1612c8bd
VS
4009 I915_WRITE(intel_dp->output_reg, DP);
4010 POSTING_READ(intel_dp->output_reg);
4011
4012 DP &= ~DP_PORT_EN;
5bddd17f 4013 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 4014 POSTING_READ(intel_dp->output_reg);
0c241d5b 4015
0f0f74bc 4016 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
0c241d5b
VS
4017 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4018 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5bddd17f
EA
4019 }
4020
f01eca2e 4021 msleep(intel_dp->panel_power_down_delay);
6fec7662
VS
4022
4023 intel_dp->DP = DP;
9f2bdb00
VS
4024
4025 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
69d93820
CW
4026 intel_wakeref_t wakeref;
4027
4028 with_pps_lock(intel_dp, wakeref)
4029 intel_dp->active_pipe = INVALID_PIPE;
9f2bdb00 4030 }
a4fc5ed6
KP
4031}
4032
a1d92652
MA
4033static void
4034intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4035{
4036 u8 dpcd_ext[6];
4037
4038 /*
4039 * Prior to DP1.3 the bit represented by
4040 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4041 * if it is set DP_DPCD_REV at 0000h could be at a value less than
4042 * the true capability of the panel. The only way to check is to
4043 * then compare 0000h and 2200h.
4044 */
4045 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4046 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4047 return;
4048
4049 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4050 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4051 DRM_ERROR("DPCD failed read at extended capabilities\n");
4052 return;
4053 }
4054
4055 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4056 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4057 return;
4058 }
4059
4060 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4061 return;
4062
4063 DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4064 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4065
4066 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4067}
4068
24e807e7 4069bool
fe5a66f9 4070intel_dp_read_dpcd(struct intel_dp *intel_dp)
92fd8fd1 4071{
9f085ebb
L
4072 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4073 sizeof(intel_dp->dpcd)) < 0)
edb39244 4074 return false; /* aux transfer failed */
92fd8fd1 4075
a1d92652
MA
4076 intel_dp_extended_receiver_capabilities(intel_dp);
4077
a8e98153 4078 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 4079
fe5a66f9
VS
4080 return intel_dp->dpcd[DP_DPCD_REV] != 0;
4081}
edb39244 4082
93ac092f
MN
4083static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4084{
4085 /*
4086 * Clear the cached register set to avoid using stale values
4087 * for the sinks that do not support DSC.
4088 */
4089 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4090
08cadae8
AS
4091 /* Clear fec_capable to avoid using stale values */
4092 intel_dp->fec_capable = 0;
4093
93ac092f
MN
4094 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4095 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4096 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4097 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4098 intel_dp->dsc_dpcd,
4099 sizeof(intel_dp->dsc_dpcd)) < 0)
4100 DRM_ERROR("Failed to read DPCD register 0x%x\n",
4101 DP_DSC_SUPPORT);
4102
4103 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4104 (int)sizeof(intel_dp->dsc_dpcd),
4105 intel_dp->dsc_dpcd);
0ce611c9 4106
08cadae8 4107 /* FEC is supported only on DP 1.4 */
0ce611c9
CW
4108 if (!intel_dp_is_edp(intel_dp) &&
4109 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4110 &intel_dp->fec_capable) < 0)
4111 DRM_ERROR("Failed to read FEC DPCD register\n");
08cadae8 4112
0ce611c9 4113 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
93ac092f
MN
4114 }
4115}
4116
fe5a66f9
VS
4117static bool
4118intel_edp_init_dpcd(struct intel_dp *intel_dp)
4119{
4120 struct drm_i915_private *dev_priv =
4121 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
30d9aa42 4122
fe5a66f9
VS
4123 /* this function is meant to be called only once */
4124 WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
30d9aa42 4125
fe5a66f9 4126 if (!intel_dp_read_dpcd(intel_dp))
30d9aa42
SS
4127 return false;
4128
84c36753
JN
4129 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4130 drm_dp_is_branch(intel_dp->dpcd));
12a47a42 4131
fe5a66f9
VS
4132 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4133 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4134 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
474d1ec4 4135
7c838e2a
JN
4136 /*
4137 * Read the eDP display control registers.
4138 *
4139 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4140 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4141 * set, but require eDP 1.4+ detection (e.g. for supported link rates
4142 * method). The display control registers should read zero if they're
4143 * not supported anyway.
4144 */
4145 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
f7170e2e
DC
4146 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4147 sizeof(intel_dp->edp_dpcd))
e6ed2a1b 4148 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
fe5a66f9 4149 intel_dp->edp_dpcd);
06ea66b6 4150
84bb2916
DP
4151 /*
4152 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4153 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4154 */
4155 intel_psr_init_dpcd(intel_dp);
4156
e6ed2a1b
JN
4157 /* Read the eDP 1.4+ supported link rates. */
4158 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
94ca719e 4159 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
4160 int i;
4161
9f085ebb
L
4162 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4163 sink_rates, sizeof(sink_rates));
ea2d8a42 4164
94ca719e
VS
4165 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4166 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
4167
4168 if (val == 0)
4169 break;
4170
fd81c44e
DP
4171 /* Value read multiplied by 200kHz gives the per-lane
4172 * link rate in kHz. The source rates are, however,
4173 * stored in terms of LS_Clk kHz. The full conversion
4174 * back to symbols is
4175 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4176 */
af77b974 4177 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 4178 }
94ca719e 4179 intel_dp->num_sink_rates = i;
fc0f8e25 4180 }
0336400e 4181
e6ed2a1b
JN
4182 /*
4183 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4184 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4185 */
68f357cb
JN
4186 if (intel_dp->num_sink_rates)
4187 intel_dp->use_rate_select = true;
4188 else
4189 intel_dp_set_sink_rates(intel_dp);
4190
975ee5fc
JN
4191 intel_dp_set_common_rates(intel_dp);
4192
93ac092f
MN
4193 /* Read the eDP DSC DPCD registers */
4194 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4195 intel_dp_get_dsc_sink_cap(intel_dp);
4196
fe5a66f9
VS
4197 return true;
4198}
4199
4200
4201static bool
4202intel_dp_get_dpcd(struct intel_dp *intel_dp)
4203{
4204 if (!intel_dp_read_dpcd(intel_dp))
4205 return false;
4206
68f357cb 4207 /* Don't clobber cached eDP rates. */
1853a9da 4208 if (!intel_dp_is_edp(intel_dp)) {
68f357cb 4209 intel_dp_set_sink_rates(intel_dp);
975ee5fc
JN
4210 intel_dp_set_common_rates(intel_dp);
4211 }
68f357cb 4212
fe5a66f9 4213 /*
2bb06265
JRS
4214 * Some eDP panels do not set a valid value for sink count, that is why
4215 * it don't care about read it here and in intel_edp_init_dpcd().
fe5a66f9 4216 */
2bb06265
JRS
4217 if (!intel_dp_is_edp(intel_dp)) {
4218 u8 count;
4219 ssize_t r;
fe5a66f9 4220
2bb06265
JRS
4221 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4222 if (r < 1)
4223 return false;
4224
4225 /*
4226 * Sink count can change between short pulse hpd hence
4227 * a member variable in intel_dp will track any changes
4228 * between short pulse interrupts.
4229 */
4230 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4231
4232 /*
4233 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4234 * a dongle is present but no display. Unless we require to know
4235 * if a dongle is present or not, we don't need to update
4236 * downstream port information. So, an early return here saves
4237 * time from performing other operations which are not required.
4238 */
4239 if (!intel_dp->sink_count)
4240 return false;
4241 }
0336400e 4242
c726ad01 4243 if (!drm_dp_is_branch(intel_dp->dpcd))
edb39244
AJ
4244 return true; /* native DP sink */
4245
4246 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4247 return true; /* no per-port downstream info */
4248
9f085ebb
L
4249 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4250 intel_dp->downstream_ports,
4251 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
4252 return false; /* downstream port status fetch failed */
4253
4254 return true;
92fd8fd1
KP
4255}
4256
0e32b39c 4257static bool
9dbf5a4e 4258intel_dp_sink_can_mst(struct intel_dp *intel_dp)
0e32b39c 4259{
010b9b39 4260 u8 mstm_cap;
0e32b39c 4261
0e32b39c
DA
4262 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4263 return false;
4264
010b9b39 4265 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
c4e3170a 4266 return false;
0e32b39c 4267
010b9b39 4268 return mstm_cap & DP_MST_CAP;
c4e3170a
VS
4269}
4270
9dbf5a4e
VS
4271static bool
4272intel_dp_can_mst(struct intel_dp *intel_dp)
4273{
4274 return i915_modparams.enable_dp_mst &&
4275 intel_dp->can_mst &&
4276 intel_dp_sink_can_mst(intel_dp);
4277}
4278
c4e3170a
VS
4279static void
4280intel_dp_configure_mst(struct intel_dp *intel_dp)
4281{
9dbf5a4e
VS
4282 struct intel_encoder *encoder =
4283 &dp_to_dig_port(intel_dp)->base;
4284 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4285
4286 DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4287 port_name(encoder->port), yesno(intel_dp->can_mst),
4288 yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
c4e3170a
VS
4289
4290 if (!intel_dp->can_mst)
4291 return;
4292
9dbf5a4e
VS
4293 intel_dp->is_mst = sink_can_mst &&
4294 i915_modparams.enable_dp_mst;
c4e3170a
VS
4295
4296 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4297 intel_dp->is_mst);
0e32b39c
DA
4298}
4299
0e32b39c
DA
4300static bool
4301intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4302{
e8b2577c
PD
4303 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4304 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4305 DP_DPRX_ESI_LEN;
0e32b39c
DA
4306}
4307
830de422 4308u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
d9218c8f
MN
4309 int mode_clock, int mode_hdisplay)
4310{
4311 u16 bits_per_pixel, max_bpp_small_joiner_ram;
4312 int i;
4313
4314 /*
4315 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4316 * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4317 * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4318 * for MST -> TimeSlotsPerMTP has to be calculated
4319 */
4320 bits_per_pixel = (link_clock * lane_count * 8 *
4321 DP_DSC_FEC_OVERHEAD_FACTOR) /
4322 mode_clock;
4323
4324 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4325 max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
4326 mode_hdisplay;
4327
4328 /*
4329 * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4330 * check, output bpp from small joiner RAM check)
4331 */
4332 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
4333
4334 /* Error out if the max bpp is less than smallest allowed valid bpp */
4335 if (bits_per_pixel < valid_dsc_bpp[0]) {
4336 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
4337 return 0;
4338 }
4339
4340 /* Find the nearest match in the array of known BPPs from VESA */
4341 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
4342 if (bits_per_pixel < valid_dsc_bpp[i + 1])
4343 break;
4344 }
4345 bits_per_pixel = valid_dsc_bpp[i];
4346
4347 /*
4348 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4349 * fractional part is 0
4350 */
4351 return bits_per_pixel << 4;
4352}
4353
4354u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
4355 int mode_clock,
4356 int mode_hdisplay)
4357{
4358 u8 min_slice_count, i;
4359 int max_slice_width;
4360
4361 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
4362 min_slice_count = DIV_ROUND_UP(mode_clock,
4363 DP_DSC_MAX_ENC_THROUGHPUT_0);
4364 else
4365 min_slice_count = DIV_ROUND_UP(mode_clock,
4366 DP_DSC_MAX_ENC_THROUGHPUT_1);
4367
4368 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
4369 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
4370 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4371 max_slice_width);
4372 return 0;
4373 }
4374 /* Also take into account max slice width */
830de422 4375 min_slice_count = min_t(u8, min_slice_count,
d9218c8f
MN
4376 DIV_ROUND_UP(mode_hdisplay,
4377 max_slice_width));
4378
4379 /* Find the closest match to the valid slice count values */
4380 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
4381 if (valid_dsc_slicecount[i] >
4382 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
4383 false))
4384 break;
4385 if (min_slice_count <= valid_dsc_slicecount[i])
4386 return valid_dsc_slicecount[i];
4387 }
4388
4389 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
4390 return 0;
4391}
4392
830de422 4393static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
c5d5ab7a 4394{
da15f7cb 4395 int status = 0;
140ef138 4396 int test_link_rate;
830de422 4397 u8 test_lane_count, test_link_bw;
da15f7cb
MN
4398 /* (DP CTS 1.2)
4399 * 4.3.1.11
4400 */
4401 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4402 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4403 &test_lane_count);
4404
4405 if (status <= 0) {
4406 DRM_DEBUG_KMS("Lane count read failed\n");
4407 return DP_TEST_NAK;
4408 }
4409 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
da15f7cb
MN
4410
4411 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4412 &test_link_bw);
4413 if (status <= 0) {
4414 DRM_DEBUG_KMS("Link Rate read failed\n");
4415 return DP_TEST_NAK;
4416 }
da15f7cb 4417 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
140ef138
MN
4418
4419 /* Validate the requested link rate and lane count */
4420 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4421 test_lane_count))
da15f7cb
MN
4422 return DP_TEST_NAK;
4423
4424 intel_dp->compliance.test_lane_count = test_lane_count;
4425 intel_dp->compliance.test_link_rate = test_link_rate;
4426
4427 return DP_TEST_ACK;
c5d5ab7a
TP
4428}
4429
830de422 4430static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
c5d5ab7a 4431{
830de422
JN
4432 u8 test_pattern;
4433 u8 test_misc;
611032bf
MN
4434 __be16 h_width, v_height;
4435 int status = 0;
4436
4437 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
010b9b39
JN
4438 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4439 &test_pattern);
611032bf
MN
4440 if (status <= 0) {
4441 DRM_DEBUG_KMS("Test pattern read failed\n");
4442 return DP_TEST_NAK;
4443 }
4444 if (test_pattern != DP_COLOR_RAMP)
4445 return DP_TEST_NAK;
4446
4447 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4448 &h_width, 2);
4449 if (status <= 0) {
4450 DRM_DEBUG_KMS("H Width read failed\n");
4451 return DP_TEST_NAK;
4452 }
4453
4454 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4455 &v_height, 2);
4456 if (status <= 0) {
4457 DRM_DEBUG_KMS("V Height read failed\n");
4458 return DP_TEST_NAK;
4459 }
4460
010b9b39
JN
4461 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4462 &test_misc);
611032bf
MN
4463 if (status <= 0) {
4464 DRM_DEBUG_KMS("TEST MISC read failed\n");
4465 return DP_TEST_NAK;
4466 }
4467 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4468 return DP_TEST_NAK;
4469 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4470 return DP_TEST_NAK;
4471 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4472 case DP_TEST_BIT_DEPTH_6:
4473 intel_dp->compliance.test_data.bpc = 6;
4474 break;
4475 case DP_TEST_BIT_DEPTH_8:
4476 intel_dp->compliance.test_data.bpc = 8;
4477 break;
4478 default:
4479 return DP_TEST_NAK;
4480 }
4481
4482 intel_dp->compliance.test_data.video_pattern = test_pattern;
4483 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4484 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4485 /* Set test active flag here so userspace doesn't interrupt things */
4486 intel_dp->compliance.test_active = 1;
4487
4488 return DP_TEST_ACK;
c5d5ab7a
TP
4489}
4490
830de422 4491static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4492{
830de422 4493 u8 test_result = DP_TEST_ACK;
559be30c
TP
4494 struct intel_connector *intel_connector = intel_dp->attached_connector;
4495 struct drm_connector *connector = &intel_connector->base;
4496
4497 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4498 connector->edid_corrupt ||
559be30c
TP
4499 intel_dp->aux.i2c_defer_count > 6) {
4500 /* Check EDID read for NACKs, DEFERs and corruption
4501 * (DP CTS 1.2 Core r1.1)
4502 * 4.2.2.4 : Failed EDID read, I2C_NAK
4503 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4504 * 4.2.2.6 : EDID corruption detected
4505 * Use failsafe mode for all cases
4506 */
4507 if (intel_dp->aux.i2c_nack_count > 0 ||
4508 intel_dp->aux.i2c_defer_count > 0)
4509 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4510 intel_dp->aux.i2c_nack_count,
4511 intel_dp->aux.i2c_defer_count);
c1617abc 4512 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
559be30c 4513 } else {
f79b468e
TS
4514 struct edid *block = intel_connector->detect_edid;
4515
4516 /* We have to write the checksum
4517 * of the last block read
4518 */
4519 block += intel_connector->detect_edid->extensions;
4520
010b9b39
JN
4521 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4522 block->checksum) <= 0)
559be30c
TP
4523 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4524
4525 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
b48a5ba9 4526 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
559be30c
TP
4527 }
4528
4529 /* Set test active flag here so userspace doesn't interrupt things */
c1617abc 4530 intel_dp->compliance.test_active = 1;
559be30c 4531
c5d5ab7a
TP
4532 return test_result;
4533}
4534
830de422 4535static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4536{
830de422 4537 u8 test_result = DP_TEST_NAK;
c5d5ab7a
TP
4538 return test_result;
4539}
4540
4541static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4542{
830de422
JN
4543 u8 response = DP_TEST_NAK;
4544 u8 request = 0;
5ec63bbd 4545 int status;
c5d5ab7a 4546
5ec63bbd 4547 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
c5d5ab7a
TP
4548 if (status <= 0) {
4549 DRM_DEBUG_KMS("Could not read test request from sink\n");
4550 goto update_status;
4551 }
4552
5ec63bbd 4553 switch (request) {
c5d5ab7a
TP
4554 case DP_TEST_LINK_TRAINING:
4555 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
c5d5ab7a
TP
4556 response = intel_dp_autotest_link_training(intel_dp);
4557 break;
4558 case DP_TEST_LINK_VIDEO_PATTERN:
4559 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
c5d5ab7a
TP
4560 response = intel_dp_autotest_video_pattern(intel_dp);
4561 break;
4562 case DP_TEST_LINK_EDID_READ:
4563 DRM_DEBUG_KMS("EDID test requested\n");
c5d5ab7a
TP
4564 response = intel_dp_autotest_edid(intel_dp);
4565 break;
4566 case DP_TEST_LINK_PHY_TEST_PATTERN:
4567 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
c5d5ab7a
TP
4568 response = intel_dp_autotest_phy_pattern(intel_dp);
4569 break;
4570 default:
5ec63bbd 4571 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
c5d5ab7a
TP
4572 break;
4573 }
4574
5ec63bbd
JN
4575 if (response & DP_TEST_ACK)
4576 intel_dp->compliance.test_type = request;
4577
c5d5ab7a 4578update_status:
5ec63bbd 4579 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
c5d5ab7a
TP
4580 if (status <= 0)
4581 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4582}
4583
0e32b39c
DA
4584static int
4585intel_dp_check_mst_status(struct intel_dp *intel_dp)
4586{
4587 bool bret;
4588
4589 if (intel_dp->is_mst) {
e8b2577c 4590 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
0e32b39c
DA
4591 int ret = 0;
4592 int retry;
4593 bool handled;
45ef40aa
DP
4594
4595 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
0e32b39c
DA
4596 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4597go_again:
4598 if (bret == true) {
4599
4600 /* check link status - esi[10] = 0x200c */
45ef40aa 4601 if (intel_dp->active_mst_links > 0 &&
901c2daf 4602 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4603 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4604 intel_dp_start_link_train(intel_dp);
0e32b39c
DA
4605 intel_dp_stop_link_train(intel_dp);
4606 }
4607
6f34cc39 4608 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4609 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4610
4611 if (handled) {
4612 for (retry = 0; retry < 3; retry++) {
4613 int wret;
4614 wret = drm_dp_dpcd_write(&intel_dp->aux,
4615 DP_SINK_COUNT_ESI+1,
4616 &esi[1], 3);
4617 if (wret == 3) {
4618 break;
4619 }
4620 }
4621
4622 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4623 if (bret == true) {
6f34cc39 4624 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4625 goto go_again;
4626 }
4627 } else
4628 ret = 0;
4629
4630 return ret;
4631 } else {
0e32b39c
DA
4632 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4633 intel_dp->is_mst = false;
6cbb55c0
LP
4634 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4635 intel_dp->is_mst);
0e32b39c
DA
4636 }
4637 }
4638 return -EINVAL;
4639}
4640
c85d200e
VS
4641static bool
4642intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4643{
4644 u8 link_status[DP_LINK_STATUS_SIZE];
4645
edb2e530 4646 if (!intel_dp->link_trained)
2f8e7ea9
JRS
4647 return false;
4648
4649 /*
4650 * While PSR source HW is enabled, it will control main-link sending
4651 * frames, enabling and disabling it so trying to do a retrain will fail
4652 * as the link would or not be on or it could mix training patterns
4653 * and frame data at the same time causing retrain to fail.
4654 * Also when exiting PSR, HW will retrain the link anyways fixing
4655 * any link status error.
4656 */
4657 if (intel_psr_enabled(intel_dp))
edb2e530
VS
4658 return false;
4659
4660 if (!intel_dp_get_link_status(intel_dp, link_status))
c85d200e 4661 return false;
c85d200e
VS
4662
4663 /*
4664 * Validate the cached values of intel_dp->link_rate and
4665 * intel_dp->lane_count before attempting to retrain.
4666 */
4667 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4668 intel_dp->lane_count))
4669 return false;
4670
4671 /* Retrain if Channel EQ or CR not ok */
4672 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4673}
4674
c85d200e
VS
4675int intel_dp_retrain_link(struct intel_encoder *encoder,
4676 struct drm_modeset_acquire_ctx *ctx)
bfd02b3c 4677{
bfd02b3c 4678 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
c85d200e
VS
4679 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4680 struct intel_connector *connector = intel_dp->attached_connector;
4681 struct drm_connector_state *conn_state;
4682 struct intel_crtc_state *crtc_state;
4683 struct intel_crtc *crtc;
4684 int ret;
4685
4686 /* FIXME handle the MST connectors as well */
4687
4688 if (!connector || connector->base.status != connector_status_connected)
4689 return 0;
4690
4691 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4692 ctx);
4693 if (ret)
4694 return ret;
4695
4696 conn_state = connector->base.state;
4697
4698 crtc = to_intel_crtc(conn_state->crtc);
4699 if (!crtc)
4700 return 0;
4701
4702 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4703 if (ret)
4704 return ret;
4705
4706 crtc_state = to_intel_crtc_state(crtc->base.state);
4707
4708 WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4709
4710 if (!crtc_state->base.active)
4711 return 0;
4712
4713 if (conn_state->commit &&
4714 !try_wait_for_completion(&conn_state->commit->hw_done))
4715 return 0;
4716
4717 if (!intel_dp_needs_link_retrain(intel_dp))
4718 return 0;
bfd02b3c
VS
4719
4720 /* Suppress underruns caused by re-training */
4721 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
f56f6648 4722 if (crtc_state->has_pch_encoder)
bfd02b3c
VS
4723 intel_set_pch_fifo_underrun_reporting(dev_priv,
4724 intel_crtc_pch_transcoder(crtc), false);
4725
4726 intel_dp_start_link_train(intel_dp);
4727 intel_dp_stop_link_train(intel_dp);
4728
4729 /* Keep underrun reporting disabled until things are stable */
0f0f74bc 4730 intel_wait_for_vblank(dev_priv, crtc->pipe);
bfd02b3c
VS
4731
4732 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
f56f6648 4733 if (crtc_state->has_pch_encoder)
bfd02b3c
VS
4734 intel_set_pch_fifo_underrun_reporting(dev_priv,
4735 intel_crtc_pch_transcoder(crtc), true);
c85d200e
VS
4736
4737 return 0;
bfd02b3c
VS
4738}
4739
c85d200e
VS
4740/*
4741 * If display is now connected check links status,
4742 * there has been known issues of link loss triggering
4743 * long pulse.
4744 *
4745 * Some sinks (eg. ASUS PB287Q) seem to perform some
4746 * weird HPD ping pong during modesets. So we can apparently
4747 * end up with HPD going low during a modeset, and then
4748 * going back up soon after. And once that happens we must
4749 * retrain the link to get a picture. That's in case no
4750 * userspace component reacted to intermittent HPD dip.
4751 */
4752static bool intel_dp_hotplug(struct intel_encoder *encoder,
4753 struct intel_connector *connector)
5c9114d0 4754{
c85d200e
VS
4755 struct drm_modeset_acquire_ctx ctx;
4756 bool changed;
4757 int ret;
5c9114d0 4758
c85d200e 4759 changed = intel_encoder_hotplug(encoder, connector);
5c9114d0 4760
c85d200e 4761 drm_modeset_acquire_init(&ctx, 0);
42e5e657 4762
c85d200e
VS
4763 for (;;) {
4764 ret = intel_dp_retrain_link(encoder, &ctx);
5c9114d0 4765
c85d200e
VS
4766 if (ret == -EDEADLK) {
4767 drm_modeset_backoff(&ctx);
4768 continue;
4769 }
5c9114d0 4770
c85d200e
VS
4771 break;
4772 }
d4cb3fd9 4773
c85d200e
VS
4774 drm_modeset_drop_locks(&ctx);
4775 drm_modeset_acquire_fini(&ctx);
4776 WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
bfd02b3c 4777
c85d200e 4778 return changed;
5c9114d0
SS
4779}
4780
9844bc87
DP
4781static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4782{
4783 u8 val;
4784
4785 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4786 return;
4787
4788 if (drm_dp_dpcd_readb(&intel_dp->aux,
4789 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4790 return;
4791
4792 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4793
4794 if (val & DP_AUTOMATED_TEST_REQUEST)
4795 intel_dp_handle_test_request(intel_dp);
4796
342ac601 4797 if (val & DP_CP_IRQ)
09d56393 4798 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
342ac601
R
4799
4800 if (val & DP_SINK_SPECIFIC_IRQ)
4801 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
9844bc87
DP
4802}
4803
a4fc5ed6
KP
4804/*
4805 * According to DP spec
4806 * 5.1.2:
4807 * 1. Read DPCD
4808 * 2. Configure link according to Receiver Capabilities
4809 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4810 * 4. Check link status on receipt of hot-plug interrupt
39ff747b
SS
4811 *
4812 * intel_dp_short_pulse - handles short pulse interrupts
4813 * when full detection is not required.
4814 * Returns %true if short pulse is handled and full detection
4815 * is NOT required and %false otherwise.
a4fc5ed6 4816 */
39ff747b 4817static bool
5c9114d0 4818intel_dp_short_pulse(struct intel_dp *intel_dp)
a4fc5ed6 4819{
de25eb7f 4820 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
39ff747b
SS
4821 u8 old_sink_count = intel_dp->sink_count;
4822 bool ret;
5b215bcf 4823
4df6960e
SS
4824 /*
4825 * Clearing compliance test variables to allow capturing
4826 * of values for next automated test request.
4827 */
c1617abc 4828 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4df6960e 4829
39ff747b
SS
4830 /*
4831 * Now read the DPCD to see if it's actually running
4832 * If the current value of sink count doesn't match with
4833 * the value that was stored earlier or dpcd read failed
4834 * we need to do full detection
4835 */
4836 ret = intel_dp_get_dpcd(intel_dp);
4837
4838 if ((old_sink_count != intel_dp->sink_count) || !ret) {
4839 /* No need to proceed if we are going to do full detect */
4840 return false;
59cd09e1
JB
4841 }
4842
9844bc87 4843 intel_dp_check_service_irq(intel_dp);
a60f0e38 4844
82e00d11
HV
4845 /* Handle CEC interrupts, if any */
4846 drm_dp_cec_irq(&intel_dp->aux);
4847
c85d200e
VS
4848 /* defer to the hotplug work for link retraining if needed */
4849 if (intel_dp_needs_link_retrain(intel_dp))
4850 return false;
42e5e657 4851
cc3054ff
JRS
4852 intel_psr_short_pulse(intel_dp);
4853
da15f7cb
MN
4854 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4855 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4856 /* Send a Hotplug Uevent to userspace to start modeset */
2f773477 4857 drm_kms_helper_hotplug_event(&dev_priv->drm);
da15f7cb 4858 }
39ff747b
SS
4859
4860 return true;
a4fc5ed6 4861}
a4fc5ed6 4862
caf9ab24 4863/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4864static enum drm_connector_status
26d61aad 4865intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4866{
e393d0d6 4867 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
830de422
JN
4868 u8 *dpcd = intel_dp->dpcd;
4869 u8 type;
caf9ab24 4870
e393d0d6
ID
4871 if (lspcon->active)
4872 lspcon_resume(lspcon);
4873
caf9ab24
AJ
4874 if (!intel_dp_get_dpcd(intel_dp))
4875 return connector_status_disconnected;
4876
1853a9da 4877 if (intel_dp_is_edp(intel_dp))
1034ce70
SS
4878 return connector_status_connected;
4879
caf9ab24 4880 /* if there's no downstream port, we're done */
c726ad01 4881 if (!drm_dp_is_branch(dpcd))
26d61aad 4882 return connector_status_connected;
caf9ab24
AJ
4883
4884 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4885 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4886 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
9d1a1031 4887
30d9aa42
SS
4888 return intel_dp->sink_count ?
4889 connector_status_connected : connector_status_disconnected;
caf9ab24
AJ
4890 }
4891
c4e3170a
VS
4892 if (intel_dp_can_mst(intel_dp))
4893 return connector_status_connected;
4894
caf9ab24 4895 /* If no HPD, poke DDC gently */
0b99836f 4896 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4897 return connector_status_connected;
caf9ab24
AJ
4898
4899 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4900 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4901 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4902 if (type == DP_DS_PORT_TYPE_VGA ||
4903 type == DP_DS_PORT_TYPE_NON_EDID)
4904 return connector_status_unknown;
4905 } else {
4906 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4907 DP_DWN_STRM_PORT_TYPE_MASK;
4908 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4909 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4910 return connector_status_unknown;
4911 }
caf9ab24
AJ
4912
4913 /* Anything else is out of spec, warn and ignore */
4914 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4915 return connector_status_disconnected;
71ba9000
AJ
4916}
4917
d410b56d
CW
4918static enum drm_connector_status
4919edp_detect(struct intel_dp *intel_dp)
4920{
b93b41af 4921 return connector_status_connected;
d410b56d
CW
4922}
4923
7533eb4f 4924static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5eb08b69 4925{
7533eb4f 4926 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b93433cc 4927 u32 bit;
01cb9ea6 4928
7533eb4f
RV
4929 switch (encoder->hpd_pin) {
4930 case HPD_PORT_B:
0df53b77
JN
4931 bit = SDE_PORTB_HOTPLUG;
4932 break;
7533eb4f 4933 case HPD_PORT_C:
0df53b77
JN
4934 bit = SDE_PORTC_HOTPLUG;
4935 break;
7533eb4f 4936 case HPD_PORT_D:
0df53b77
JN
4937 bit = SDE_PORTD_HOTPLUG;
4938 break;
4939 default:
7533eb4f 4940 MISSING_CASE(encoder->hpd_pin);
0df53b77
JN
4941 return false;
4942 }
4943
4944 return I915_READ(SDEISR) & bit;
4945}
4946
7533eb4f 4947static bool cpt_digital_port_connected(struct intel_encoder *encoder)
0df53b77 4948{
7533eb4f 4949 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
0df53b77
JN
4950 u32 bit;
4951
7533eb4f
RV
4952 switch (encoder->hpd_pin) {
4953 case HPD_PORT_B:
0df53b77
JN
4954 bit = SDE_PORTB_HOTPLUG_CPT;
4955 break;
7533eb4f 4956 case HPD_PORT_C:
0df53b77
JN
4957 bit = SDE_PORTC_HOTPLUG_CPT;
4958 break;
7533eb4f 4959 case HPD_PORT_D:
0df53b77
JN
4960 bit = SDE_PORTD_HOTPLUG_CPT;
4961 break;
93e5f0b6 4962 default:
7533eb4f 4963 MISSING_CASE(encoder->hpd_pin);
93e5f0b6
VS
4964 return false;
4965 }
4966
4967 return I915_READ(SDEISR) & bit;
4968}
4969
7533eb4f 4970static bool spt_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 4971{
7533eb4f 4972 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
93e5f0b6
VS
4973 u32 bit;
4974
7533eb4f
RV
4975 switch (encoder->hpd_pin) {
4976 case HPD_PORT_A:
93e5f0b6
VS
4977 bit = SDE_PORTA_HOTPLUG_SPT;
4978 break;
7533eb4f 4979 case HPD_PORT_E:
a78695d3
JN
4980 bit = SDE_PORTE_HOTPLUG_SPT;
4981 break;
0df53b77 4982 default:
7533eb4f 4983 return cpt_digital_port_connected(encoder);
b93433cc 4984 }
1b469639 4985
b93433cc 4986 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4987}
4988
7533eb4f 4989static bool g4x_digital_port_connected(struct intel_encoder *encoder)
a4fc5ed6 4990{
7533eb4f 4991 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
9642c81c 4992 u32 bit;
5eb08b69 4993
7533eb4f
RV
4994 switch (encoder->hpd_pin) {
4995 case HPD_PORT_B:
9642c81c
JN
4996 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4997 break;
7533eb4f 4998 case HPD_PORT_C:
9642c81c
JN
4999 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5000 break;
7533eb4f 5001 case HPD_PORT_D:
9642c81c
JN
5002 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5003 break;
5004 default:
7533eb4f 5005 MISSING_CASE(encoder->hpd_pin);
9642c81c
JN
5006 return false;
5007 }
5008
5009 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5010}
5011
7533eb4f 5012static bool gm45_digital_port_connected(struct intel_encoder *encoder)
9642c81c 5013{
7533eb4f 5014 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
9642c81c
JN
5015 u32 bit;
5016
7533eb4f
RV
5017 switch (encoder->hpd_pin) {
5018 case HPD_PORT_B:
0780cd36 5019 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
9642c81c 5020 break;
7533eb4f 5021 case HPD_PORT_C:
0780cd36 5022 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
9642c81c 5023 break;
7533eb4f 5024 case HPD_PORT_D:
0780cd36 5025 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
5026 break;
5027 default:
7533eb4f 5028 MISSING_CASE(encoder->hpd_pin);
9642c81c 5029 return false;
a4fc5ed6
KP
5030 }
5031
1d245987 5032 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
5033}
5034
7533eb4f 5035static bool ilk_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 5036{
7533eb4f
RV
5037 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5038
5039 if (encoder->hpd_pin == HPD_PORT_A)
93e5f0b6
VS
5040 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5041 else
7533eb4f 5042 return ibx_digital_port_connected(encoder);
93e5f0b6
VS
5043}
5044
7533eb4f 5045static bool snb_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 5046{
7533eb4f
RV
5047 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5048
5049 if (encoder->hpd_pin == HPD_PORT_A)
93e5f0b6
VS
5050 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5051 else
7533eb4f 5052 return cpt_digital_port_connected(encoder);
93e5f0b6
VS
5053}
5054
7533eb4f 5055static bool ivb_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 5056{
7533eb4f
RV
5057 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5058
5059 if (encoder->hpd_pin == HPD_PORT_A)
93e5f0b6
VS
5060 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
5061 else
7533eb4f 5062 return cpt_digital_port_connected(encoder);
93e5f0b6
VS
5063}
5064
7533eb4f 5065static bool bdw_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 5066{
7533eb4f
RV
5067 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5068
5069 if (encoder->hpd_pin == HPD_PORT_A)
93e5f0b6
VS
5070 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5071 else
7533eb4f 5072 return cpt_digital_port_connected(encoder);
93e5f0b6
VS
5073}
5074
7533eb4f 5075static bool bxt_digital_port_connected(struct intel_encoder *encoder)
e464bfde 5076{
7533eb4f 5077 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
e464bfde
JN
5078 u32 bit;
5079
7533eb4f
RV
5080 switch (encoder->hpd_pin) {
5081 case HPD_PORT_A:
e464bfde
JN
5082 bit = BXT_DE_PORT_HP_DDIA;
5083 break;
7533eb4f 5084 case HPD_PORT_B:
e464bfde
JN
5085 bit = BXT_DE_PORT_HP_DDIB;
5086 break;
7533eb4f 5087 case HPD_PORT_C:
e464bfde
JN
5088 bit = BXT_DE_PORT_HP_DDIC;
5089 break;
5090 default:
7533eb4f 5091 MISSING_CASE(encoder->hpd_pin);
e464bfde
JN
5092 return false;
5093 }
5094
5095 return I915_READ(GEN8_DE_PORT_ISR) & bit;
5096}
5097
b9fcddab
PZ
5098static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5099 struct intel_digital_port *intel_dig_port)
5100{
5101 enum port port = intel_dig_port->base.port;
5102
5103 return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5104}
5105
f0236a85
ID
5106static const char *tc_type_name(enum tc_port_type type)
5107{
5108 static const char * const names[] = {
5109 [TC_PORT_UNKNOWN] = "unknown",
5110 [TC_PORT_LEGACY] = "legacy",
5111 [TC_PORT_TYPEC] = "typec",
5112 [TC_PORT_TBT] = "tbt",
5113 };
5114
5115 if (WARN_ON(type >= ARRAY_SIZE(names)))
5116 type = TC_PORT_UNKNOWN;
5117
5118 return names[type];
5119}
5120
6075546f
PZ
5121static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
5122 struct intel_digital_port *intel_dig_port,
5123 bool is_legacy, bool is_typec, bool is_tbt)
5124{
5125 enum port port = intel_dig_port->base.port;
5126 enum tc_port_type old_type = intel_dig_port->tc_type;
6075546f
PZ
5127
5128 WARN_ON(is_legacy + is_typec + is_tbt != 1);
5129
f0236a85 5130 if (is_legacy)
6075546f 5131 intel_dig_port->tc_type = TC_PORT_LEGACY;
f0236a85 5132 else if (is_typec)
6075546f 5133 intel_dig_port->tc_type = TC_PORT_TYPEC;
f0236a85 5134 else if (is_tbt)
6075546f 5135 intel_dig_port->tc_type = TC_PORT_TBT;
f0236a85 5136 else
6075546f 5137 return;
6075546f
PZ
5138
5139 /* Types are not supposed to be changed at runtime. */
5140 WARN_ON(old_type != TC_PORT_UNKNOWN &&
5141 old_type != intel_dig_port->tc_type);
5142
5143 if (old_type != intel_dig_port->tc_type)
5144 DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
f0236a85 5145 tc_type_name(intel_dig_port->tc_type));
6075546f
PZ
5146}
5147
39d1e234
PZ
5148/*
5149 * This function implements the first part of the Connect Flow described by our
5150 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
5151 * lanes, EDID, etc) is done as needed in the typical places.
5152 *
5153 * Unlike the other ports, type-C ports are not available to use as soon as we
5154 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
5155 * display, USB, etc. As a result, handshaking through FIA is required around
5156 * connect and disconnect to cleanly transfer ownership with the controller and
5157 * set the type-C power state.
5158 *
5159 * We could opt to only do the connect flow when we actually try to use the AUX
5160 * channels or do a modeset, then immediately run the disconnect flow after
5161 * usage, but there are some implications on this for a dynamic environment:
5162 * things may go away or change behind our backs. So for now our driver is
5163 * always trying to acquire ownership of the controller as soon as it gets an
5164 * interrupt (or polls state and sees a port is connected) and only gives it
5165 * back when it sees a disconnect. Implementation of a more fine-grained model
5166 * will require a lot of coordination with user space and thorough testing for
5167 * the extra possible cases.
5168 */
5169static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
5170 struct intel_digital_port *dig_port)
5171{
5172 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5173 u32 val;
5174
5175 if (dig_port->tc_type != TC_PORT_LEGACY &&
5176 dig_port->tc_type != TC_PORT_TYPEC)
5177 return true;
5178
5179 val = I915_READ(PORT_TX_DFLEXDPPMS);
5180 if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
5181 DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
f6bff60e 5182 WARN_ON(dig_port->tc_legacy_port);
39d1e234
PZ
5183 return false;
5184 }
5185
5186 /*
5187 * This function may be called many times in a row without an HPD event
5188 * in between, so try to avoid the write when we can.
5189 */
5190 val = I915_READ(PORT_TX_DFLEXDPCSSS);
5191 if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
5192 val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5193 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5194 }
5195
5196 /*
5197 * Now we have to re-check the live state, in case the port recently
5198 * became disconnected. Not necessary for legacy mode.
5199 */
5200 if (dig_port->tc_type == TC_PORT_TYPEC &&
5201 !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
5202 DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
d1b5973c 5203 icl_tc_phy_disconnect(dev_priv, dig_port);
39d1e234
PZ
5204 return false;
5205 }
5206
5207 return true;
5208}
5209
5210/*
5211 * See the comment at the connect function. This implements the Disconnect
5212 * Flow.
5213 */
f6bff60e
ID
5214void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
5215 struct intel_digital_port *dig_port)
39d1e234
PZ
5216{
5217 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
39d1e234 5218
b298ba5f 5219 if (dig_port->tc_type == TC_PORT_UNKNOWN)
39d1e234
PZ
5220 return;
5221
5222 /*
b298ba5f
JRS
5223 * TBT disconnection flow is read the live status, what was done in
5224 * caller.
39d1e234 5225 */
b298ba5f
JRS
5226 if (dig_port->tc_type == TC_PORT_TYPEC ||
5227 dig_port->tc_type == TC_PORT_LEGACY) {
5228 u32 val;
5229
5230 val = I915_READ(PORT_TX_DFLEXDPCSSS);
39d1e234
PZ
5231 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5232 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5233 }
b298ba5f 5234
f0236a85
ID
5235 DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
5236 port_name(dig_port->base.port),
5237 tc_type_name(dig_port->tc_type));
5238
b298ba5f 5239 dig_port->tc_type = TC_PORT_UNKNOWN;
39d1e234
PZ
5240}
5241
5242/*
5243 * The type-C ports are different because even when they are connected, they may
5244 * not be available/usable by the graphics driver: see the comment on
5245 * icl_tc_phy_connect(). So in our driver instead of adding the additional
5246 * concept of "usable" and make everything check for "connected and usable" we
5247 * define a port as "connected" when it is not only connected, but also when it
5248 * is usable by the rest of the driver. That maintains the old assumption that
5249 * connected ports are usable, and avoids exposing to the users objects they
5250 * can't really use.
5251 */
b9fcddab
PZ
5252static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
5253 struct intel_digital_port *intel_dig_port)
5254{
5255 enum port port = intel_dig_port->base.port;
5256 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
5257 bool is_legacy, is_typec, is_tbt;
5258 u32 dpsp;
5259
2a041c97
ID
5260 /*
5261 * WARN if we got a legacy port HPD, but VBT didn't mark the port as
5262 * legacy. Treat the port as legacy from now on.
5263 */
5264 if (WARN_ON(!intel_dig_port->tc_legacy_port &&
5265 I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)))
5266 intel_dig_port->tc_legacy_port = true;
5267 is_legacy = intel_dig_port->tc_legacy_port;
b9fcddab
PZ
5268
5269 /*
5270 * The spec says we shouldn't be using the ISR bits for detecting
5271 * between TC and TBT. We should use DFLEXDPSP.
5272 */
5273 dpsp = I915_READ(PORT_TX_DFLEXDPSP);
5274 is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
5275 is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
5276
39d1e234
PZ
5277 if (!is_legacy && !is_typec && !is_tbt) {
5278 icl_tc_phy_disconnect(dev_priv, intel_dig_port);
f6bff60e 5279
6075546f 5280 return false;
39d1e234 5281 }
6075546f
PZ
5282
5283 icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
5284 is_tbt);
b9fcddab 5285
39d1e234
PZ
5286 if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
5287 return false;
5288
6075546f 5289 return true;
b9fcddab
PZ
5290}
5291
5292static bool icl_digital_port_connected(struct intel_encoder *encoder)
5293{
5294 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5295 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
5296
c0aa8344 5297 if (intel_port_is_combophy(dev_priv, encoder->port))
b9fcddab 5298 return icl_combo_port_connected(dev_priv, dig_port);
c0aa8344 5299 else if (intel_port_is_tc(dev_priv, encoder->port))
b9fcddab 5300 return icl_tc_port_connected(dev_priv, dig_port);
c0aa8344 5301 else
b9fcddab 5302 MISSING_CASE(encoder->hpd_pin);
c0aa8344
MK
5303
5304 return false;
b9fcddab
PZ
5305}
5306
7e66bcf2
JN
5307/*
5308 * intel_digital_port_connected - is the specified port connected?
7533eb4f 5309 * @encoder: intel_encoder
7e66bcf2 5310 *
39d1e234
PZ
5311 * In cases where there's a connector physically connected but it can't be used
5312 * by our hardware we also return false, since the rest of the driver should
5313 * pretty much treat the port as disconnected. This is relevant for type-C
5314 * (starting on ICL) where there's ownership involved.
5315 *
7533eb4f 5316 * Return %true if port is connected, %false otherwise.
7e66bcf2 5317 */
7533eb4f 5318bool intel_digital_port_connected(struct intel_encoder *encoder)
7e66bcf2 5319{
7533eb4f
RV
5320 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5321
b2ae318a 5322 if (HAS_GMCH(dev_priv)) {
93e5f0b6 5323 if (IS_GM45(dev_priv))
7533eb4f 5324 return gm45_digital_port_connected(encoder);
93e5f0b6 5325 else
7533eb4f 5326 return g4x_digital_port_connected(encoder);
93e5f0b6
VS
5327 }
5328
210126bd
RV
5329 if (INTEL_GEN(dev_priv) >= 11)
5330 return icl_digital_port_connected(encoder);
cf819eff 5331 else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
210126bd 5332 return spt_digital_port_connected(encoder);
cc3f90f0 5333 else if (IS_GEN9_LP(dev_priv))
7533eb4f 5334 return bxt_digital_port_connected(encoder);
cf819eff 5335 else if (IS_GEN(dev_priv, 8))
210126bd 5336 return bdw_digital_port_connected(encoder);
cf819eff 5337 else if (IS_GEN(dev_priv, 7))
210126bd 5338 return ivb_digital_port_connected(encoder);
cf819eff 5339 else if (IS_GEN(dev_priv, 6))
210126bd 5340 return snb_digital_port_connected(encoder);
cf819eff 5341 else if (IS_GEN(dev_priv, 5))
210126bd
RV
5342 return ilk_digital_port_connected(encoder);
5343
5344 MISSING_CASE(INTEL_GEN(dev_priv));
5345 return false;
7e66bcf2
JN
5346}
5347
8c241fef 5348static struct edid *
beb60608 5349intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 5350{
beb60608 5351 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 5352
9cd300e0
JN
5353 /* use cached edid if we have one */
5354 if (intel_connector->edid) {
9cd300e0
JN
5355 /* invalid edid */
5356 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
5357 return NULL;
5358
55e9edeb 5359 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
5360 } else
5361 return drm_get_edid(&intel_connector->base,
5362 &intel_dp->aux.ddc);
5363}
8c241fef 5364
beb60608
CW
5365static void
5366intel_dp_set_edid(struct intel_dp *intel_dp)
5367{
5368 struct intel_connector *intel_connector = intel_dp->attached_connector;
5369 struct edid *edid;
8c241fef 5370
f21a2198 5371 intel_dp_unset_edid(intel_dp);
beb60608
CW
5372 edid = intel_dp_get_edid(intel_dp);
5373 intel_connector->detect_edid = edid;
5374
e6b72c94 5375 intel_dp->has_audio = drm_detect_monitor_audio(edid);
82e00d11 5376 drm_dp_cec_set_edid(&intel_dp->aux, edid);
8c241fef
KP
5377}
5378
beb60608
CW
5379static void
5380intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 5381{
beb60608 5382 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 5383
82e00d11 5384 drm_dp_cec_unset_edid(&intel_dp->aux);
beb60608
CW
5385 kfree(intel_connector->detect_edid);
5386 intel_connector->detect_edid = NULL;
9cd300e0 5387
beb60608
CW
5388 intel_dp->has_audio = false;
5389}
d6f24d0f 5390
6c5ed5ae 5391static int
cbfa8ac8
DP
5392intel_dp_detect(struct drm_connector *connector,
5393 struct drm_modeset_acquire_ctx *ctx,
5394 bool force)
a9756bb5 5395{
cbfa8ac8
DP
5396 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5397 struct intel_dp *intel_dp = intel_attached_dp(connector);
337837ac
ID
5398 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5399 struct intel_encoder *encoder = &dig_port->base;
a9756bb5 5400 enum drm_connector_status status;
337837ac
ID
5401 enum intel_display_power_domain aux_domain =
5402 intel_aux_power_domain(dig_port);
0e6e0be4 5403 intel_wakeref_t wakeref;
a9756bb5 5404
cbfa8ac8
DP
5405 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5406 connector->base.id, connector->name);
2f773477 5407 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
6c5ed5ae 5408
0e6e0be4 5409 wakeref = intel_display_power_get(dev_priv, aux_domain);
a9756bb5 5410
b93b41af 5411 /* Can't disconnect eDP */
1853a9da 5412 if (intel_dp_is_edp(intel_dp))
d410b56d 5413 status = edp_detect(intel_dp);
d5acd97f 5414 else if (intel_digital_port_connected(encoder))
c555a81d 5415 status = intel_dp_detect_dpcd(intel_dp);
a9756bb5 5416 else
c555a81d
ACO
5417 status = connector_status_disconnected;
5418
5cb651a7 5419 if (status == connector_status_disconnected) {
c1617abc 5420 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
93ac092f 5421 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4df6960e 5422
0e505a08 5423 if (intel_dp->is_mst) {
5424 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5425 intel_dp->is_mst,
5426 intel_dp->mst_mgr.mst_state);
5427 intel_dp->is_mst = false;
5428 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5429 intel_dp->is_mst);
5430 }
5431
c8c8fb33 5432 goto out;
4df6960e 5433 }
a9756bb5 5434
d7e8ef02 5435 if (intel_dp->reset_link_params) {
540b0b7f
JN
5436 /* Initial max link lane count */
5437 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
f482984a 5438
540b0b7f
JN
5439 /* Initial max link rate */
5440 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
d7e8ef02
MN
5441
5442 intel_dp->reset_link_params = false;
5443 }
f482984a 5444
fe5a66f9
VS
5445 intel_dp_print_rates(intel_dp);
5446
93ac092f
MN
5447 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5448 if (INTEL_GEN(dev_priv) >= 11)
5449 intel_dp_get_dsc_sink_cap(intel_dp);
5450
84c36753
JN
5451 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5452 drm_dp_is_branch(intel_dp->dpcd));
0e390a33 5453
c4e3170a
VS
5454 intel_dp_configure_mst(intel_dp);
5455
5456 if (intel_dp->is_mst) {
f21a2198
SS
5457 /*
5458 * If we are in MST mode then this connector
5459 * won't appear connected or have anything
5460 * with EDID on it
5461 */
0e32b39c
DA
5462 status = connector_status_disconnected;
5463 goto out;
f24f6eb9
DP
5464 }
5465
5466 /*
5467 * Some external monitors do not signal loss of link synchronization
5468 * with an IRQ_HPD, so force a link status check.
5469 */
47658556
DP
5470 if (!intel_dp_is_edp(intel_dp)) {
5471 int ret;
5472
5473 ret = intel_dp_retrain_link(encoder, ctx);
5474 if (ret) {
0e6e0be4 5475 intel_display_power_put(dev_priv, aux_domain, wakeref);
47658556
DP
5476 return ret;
5477 }
5478 }
0e32b39c 5479
4df6960e
SS
5480 /*
5481 * Clearing NACK and defer counts to get their exact values
5482 * while reading EDID which are required by Compliance tests
5483 * 4.2.2.4 and 4.2.2.5
5484 */
5485 intel_dp->aux.i2c_nack_count = 0;
5486 intel_dp->aux.i2c_defer_count = 0;
5487
beb60608 5488 intel_dp_set_edid(intel_dp);
cbfa8ac8
DP
5489 if (intel_dp_is_edp(intel_dp) ||
5490 to_intel_connector(connector)->detect_edid)
5cb651a7 5491 status = connector_status_connected;
c8c8fb33 5492
9844bc87 5493 intel_dp_check_service_irq(intel_dp);
09b1eb13 5494
c8c8fb33 5495out:
5cb651a7 5496 if (status != connector_status_connected && !intel_dp->is_mst)
f21a2198 5497 intel_dp_unset_edid(intel_dp);
7d23e3c3 5498
0e6e0be4 5499 intel_display_power_put(dev_priv, aux_domain, wakeref);
5cb651a7 5500 return status;
f21a2198
SS
5501}
5502
beb60608
CW
5503static void
5504intel_dp_force(struct drm_connector *connector)
a4fc5ed6 5505{
df0e9248 5506 struct intel_dp *intel_dp = intel_attached_dp(connector);
337837ac
ID
5507 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5508 struct intel_encoder *intel_encoder = &dig_port->base;
25f78f58 5509 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
337837ac
ID
5510 enum intel_display_power_domain aux_domain =
5511 intel_aux_power_domain(dig_port);
0e6e0be4 5512 intel_wakeref_t wakeref;
a4fc5ed6 5513
beb60608
CW
5514 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5515 connector->base.id, connector->name);
5516 intel_dp_unset_edid(intel_dp);
a4fc5ed6 5517
beb60608
CW
5518 if (connector->status != connector_status_connected)
5519 return;
671dedd2 5520
0e6e0be4 5521 wakeref = intel_display_power_get(dev_priv, aux_domain);
beb60608
CW
5522
5523 intel_dp_set_edid(intel_dp);
5524
0e6e0be4 5525 intel_display_power_put(dev_priv, aux_domain, wakeref);
beb60608
CW
5526}
5527
5528static int intel_dp_get_modes(struct drm_connector *connector)
5529{
5530 struct intel_connector *intel_connector = to_intel_connector(connector);
5531 struct edid *edid;
5532
5533 edid = intel_connector->detect_edid;
5534 if (edid) {
5535 int ret = intel_connector_update_modes(connector, edid);
5536 if (ret)
5537 return ret;
5538 }
32f9d658 5539
f8779fda 5540 /* if eDP has no EDID, fall back to fixed mode */
1853a9da 5541 if (intel_dp_is_edp(intel_attached_dp(connector)) &&
beb60608 5542 intel_connector->panel.fixed_mode) {
f8779fda 5543 struct drm_display_mode *mode;
beb60608
CW
5544
5545 mode = drm_mode_duplicate(connector->dev,
dd06f90e 5546 intel_connector->panel.fixed_mode);
f8779fda 5547 if (mode) {
32f9d658
ZW
5548 drm_mode_probed_add(connector, mode);
5549 return 1;
5550 }
5551 }
beb60608 5552
32f9d658 5553 return 0;
a4fc5ed6
KP
5554}
5555
7a418e34
CW
5556static int
5557intel_dp_connector_register(struct drm_connector *connector)
5558{
5559 struct intel_dp *intel_dp = intel_attached_dp(connector);
82e00d11 5560 struct drm_device *dev = connector->dev;
1ebaa0b9
CW
5561 int ret;
5562
5563 ret = intel_connector_register(connector);
5564 if (ret)
5565 return ret;
7a418e34
CW
5566
5567 i915_debugfs_connector_add(connector);
5568
5569 DRM_DEBUG_KMS("registering %s bus for %s\n",
5570 intel_dp->aux.name, connector->kdev->kobj.name);
5571
5572 intel_dp->aux.dev = connector->kdev;
82e00d11
HV
5573 ret = drm_dp_aux_register(&intel_dp->aux);
5574 if (!ret)
5575 drm_dp_cec_register_connector(&intel_dp->aux,
5576 connector->name, dev->dev);
5577 return ret;
7a418e34
CW
5578}
5579
c191eca1
CW
5580static void
5581intel_dp_connector_unregister(struct drm_connector *connector)
5582{
82e00d11
HV
5583 struct intel_dp *intel_dp = intel_attached_dp(connector);
5584
5585 drm_dp_cec_unregister_connector(&intel_dp->aux);
5586 drm_dp_aux_unregister(&intel_dp->aux);
c191eca1
CW
5587 intel_connector_unregister(connector);
5588}
5589
f6bff60e 5590void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
24d05927 5591{
da63a9f2
PZ
5592 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5593 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 5594
0e32b39c 5595 intel_dp_mst_encoder_cleanup(intel_dig_port);
1853a9da 5596 if (intel_dp_is_edp(intel_dp)) {
69d93820
CW
5597 intel_wakeref_t wakeref;
5598
bd943159 5599 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5600 /*
5601 * vdd might still be enabled do to the delayed vdd off.
5602 * Make sure vdd is actually turned off here.
5603 */
69d93820
CW
5604 with_pps_lock(intel_dp, wakeref)
5605 edp_panel_vdd_off_sync(intel_dp);
773538e8 5606
01527b31
CT
5607 if (intel_dp->edp_notifier.notifier_call) {
5608 unregister_reboot_notifier(&intel_dp->edp_notifier);
5609 intel_dp->edp_notifier.notifier_call = NULL;
5610 }
bd943159 5611 }
99681886
CW
5612
5613 intel_dp_aux_fini(intel_dp);
f6bff60e
ID
5614}
5615
5616static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5617{
5618 intel_dp_encoder_flush_work(encoder);
99681886 5619
c8bd0e49 5620 drm_encoder_cleanup(encoder);
f6bff60e 5621 kfree(enc_to_dig_port(encoder));
24d05927
DV
5622}
5623
bf93ba67 5624void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
07f9cd0b
ID
5625{
5626 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
69d93820 5627 intel_wakeref_t wakeref;
07f9cd0b 5628
1853a9da 5629 if (!intel_dp_is_edp(intel_dp))
07f9cd0b
ID
5630 return;
5631
951468f3
VS
5632 /*
5633 * vdd might still be enabled do to the delayed vdd off.
5634 * Make sure vdd is actually turned off here.
5635 */
afa4e53a 5636 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
69d93820
CW
5637 with_pps_lock(intel_dp, wakeref)
5638 edp_panel_vdd_off_sync(intel_dp);
07f9cd0b
ID
5639}
5640
cf9cb35f
R
5641static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5642{
5643 long ret;
5644
5645#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5646 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5647 msecs_to_jiffies(timeout));
5648
5649 if (!ret)
5650 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5651}
5652
20f24d77
SP
5653static
5654int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5655 u8 *an)
5656{
5657 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
32078b72
VS
5658 static const struct drm_dp_aux_msg msg = {
5659 .request = DP_AUX_NATIVE_WRITE,
5660 .address = DP_AUX_HDCP_AKSV,
5661 .size = DRM_HDCP_KSV_LEN,
5662 };
830de422 5663 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
20f24d77
SP
5664 ssize_t dpcd_ret;
5665 int ret;
5666
5667 /* Output An first, that's easy */
5668 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5669 an, DRM_HDCP_AN_LEN);
5670 if (dpcd_ret != DRM_HDCP_AN_LEN) {
3aae21fc
R
5671 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5672 dpcd_ret);
20f24d77
SP
5673 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5674 }
5675
5676 /*
5677 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5678 * order to get it on the wire, we need to create the AUX header as if
5679 * we were writing the data, and then tickle the hardware to output the
5680 * data once the header is sent out.
5681 */
32078b72 5682 intel_dp_aux_header(txbuf, &msg);
20f24d77 5683
32078b72 5684 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
8159c796
VS
5685 rxbuf, sizeof(rxbuf),
5686 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
20f24d77 5687 if (ret < 0) {
3aae21fc 5688 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
20f24d77
SP
5689 return ret;
5690 } else if (ret == 0) {
3aae21fc 5691 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
20f24d77
SP
5692 return -EIO;
5693 }
5694
5695 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
4cf74aaf
R
5696 if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5697 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5698 reply);
5699 return -EIO;
5700 }
5701 return 0;
20f24d77
SP
5702}
5703
5704static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5705 u8 *bksv)
5706{
5707 ssize_t ret;
5708 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5709 DRM_HDCP_KSV_LEN);
5710 if (ret != DRM_HDCP_KSV_LEN) {
3aae21fc 5711 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
5712 return ret >= 0 ? -EIO : ret;
5713 }
5714 return 0;
5715}
5716
5717static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5718 u8 *bstatus)
5719{
5720 ssize_t ret;
5721 /*
5722 * For some reason the HDMI and DP HDCP specs call this register
5723 * definition by different names. In the HDMI spec, it's called BSTATUS,
5724 * but in DP it's called BINFO.
5725 */
5726 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5727 bstatus, DRM_HDCP_BSTATUS_LEN);
5728 if (ret != DRM_HDCP_BSTATUS_LEN) {
3aae21fc 5729 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
5730 return ret >= 0 ? -EIO : ret;
5731 }
5732 return 0;
5733}
5734
5735static
791a98dd
R
5736int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5737 u8 *bcaps)
20f24d77
SP
5738{
5739 ssize_t ret;
791a98dd 5740
20f24d77 5741 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
791a98dd 5742 bcaps, 1);
20f24d77 5743 if (ret != 1) {
3aae21fc 5744 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
5745 return ret >= 0 ? -EIO : ret;
5746 }
791a98dd
R
5747
5748 return 0;
5749}
5750
5751static
5752int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5753 bool *repeater_present)
5754{
5755 ssize_t ret;
5756 u8 bcaps;
5757
5758 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5759 if (ret)
5760 return ret;
5761
20f24d77
SP
5762 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5763 return 0;
5764}
5765
5766static
5767int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5768 u8 *ri_prime)
5769{
5770 ssize_t ret;
5771 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5772 ri_prime, DRM_HDCP_RI_LEN);
5773 if (ret != DRM_HDCP_RI_LEN) {
3aae21fc 5774 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
5775 return ret >= 0 ? -EIO : ret;
5776 }
5777 return 0;
5778}
5779
5780static
5781int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5782 bool *ksv_ready)
5783{
5784 ssize_t ret;
5785 u8 bstatus;
5786 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5787 &bstatus, 1);
5788 if (ret != 1) {
3aae21fc 5789 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
5790 return ret >= 0 ? -EIO : ret;
5791 }
5792 *ksv_ready = bstatus & DP_BSTATUS_READY;
5793 return 0;
5794}
5795
5796static
5797int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5798 int num_downstream, u8 *ksv_fifo)
5799{
5800 ssize_t ret;
5801 int i;
5802
5803 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5804 for (i = 0; i < num_downstream; i += 3) {
5805 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5806 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5807 DP_AUX_HDCP_KSV_FIFO,
5808 ksv_fifo + i * DRM_HDCP_KSV_LEN,
5809 len);
5810 if (ret != len) {
3aae21fc
R
5811 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5812 i, ret);
20f24d77
SP
5813 return ret >= 0 ? -EIO : ret;
5814 }
5815 }
5816 return 0;
5817}
5818
5819static
5820int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5821 int i, u32 *part)
5822{
5823 ssize_t ret;
5824
5825 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5826 return -EINVAL;
5827
5828 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5829 DP_AUX_HDCP_V_PRIME(i), part,
5830 DRM_HDCP_V_PRIME_PART_LEN);
5831 if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
3aae21fc 5832 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
20f24d77
SP
5833 return ret >= 0 ? -EIO : ret;
5834 }
5835 return 0;
5836}
5837
5838static
5839int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5840 bool enable)
5841{
5842 /* Not used for single stream DisplayPort setups */
5843 return 0;
5844}
5845
5846static
5847bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5848{
5849 ssize_t ret;
5850 u8 bstatus;
b7fc1a9b 5851
20f24d77
SP
5852 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5853 &bstatus, 1);
5854 if (ret != 1) {
3aae21fc 5855 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
b7fc1a9b 5856 return false;
20f24d77 5857 }
b7fc1a9b 5858
20f24d77
SP
5859 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5860}
5861
791a98dd
R
5862static
5863int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
5864 bool *hdcp_capable)
5865{
5866 ssize_t ret;
5867 u8 bcaps;
5868
5869 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5870 if (ret)
5871 return ret;
5872
5873 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
5874 return 0;
5875}
5876
238d3a9e
R
5877struct hdcp2_dp_errata_stream_type {
5878 u8 msg_id;
5879 u8 stream_type;
5880} __packed;
5881
5882static struct hdcp2_dp_msg_data {
5883 u8 msg_id;
5884 u32 offset;
5885 bool msg_detectable;
5886 u32 timeout;
5887 u32 timeout2; /* Added for non_paired situation */
5888 } hdcp2_msg_data[] = {
5889 {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
5890 {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
5891 false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
5892 {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
5893 false, 0, 0},
5894 {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
5895 false, 0, 0},
5896 {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
5897 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
5898 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
5899 {HDCP_2_2_AKE_SEND_PAIRING_INFO,
5900 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
5901 HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
5902 {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
5903 {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
5904 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
5905 {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
5906 0, 0},
5907 {HDCP_2_2_REP_SEND_RECVID_LIST,
5908 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
5909 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
5910 {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
5911 0, 0},
5912 {HDCP_2_2_REP_STREAM_MANAGE,
5913 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
5914 0, 0},
5915 {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
5916 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
5917/* local define to shovel this through the write_2_2 interface */
5918#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
5919 {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
5920 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
5921 0, 0},
5922 };
5923
5924static inline
5925int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
5926 u8 *rx_status)
5927{
5928 ssize_t ret;
5929
5930 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5931 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
5932 HDCP_2_2_DP_RXSTATUS_LEN);
5933 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
5934 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5935 return ret >= 0 ? -EIO : ret;
5936 }
5937
5938 return 0;
5939}
5940
5941static
5942int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
5943 u8 msg_id, bool *msg_ready)
5944{
5945 u8 rx_status;
5946 int ret;
5947
5948 *msg_ready = false;
5949 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
5950 if (ret < 0)
5951 return ret;
5952
5953 switch (msg_id) {
5954 case HDCP_2_2_AKE_SEND_HPRIME:
5955 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
5956 *msg_ready = true;
5957 break;
5958 case HDCP_2_2_AKE_SEND_PAIRING_INFO:
5959 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
5960 *msg_ready = true;
5961 break;
5962 case HDCP_2_2_REP_SEND_RECVID_LIST:
5963 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
5964 *msg_ready = true;
5965 break;
5966 default:
5967 DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
5968 return -EINVAL;
5969 }
5970
5971 return 0;
5972}
5973
5974static ssize_t
5975intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
5976 struct hdcp2_dp_msg_data *hdcp2_msg_data)
5977{
5978 struct intel_dp *dp = &intel_dig_port->dp;
5979 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
5980 u8 msg_id = hdcp2_msg_data->msg_id;
5981 int ret, timeout;
5982 bool msg_ready = false;
5983
5984 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
5985 timeout = hdcp2_msg_data->timeout2;
5986 else
5987 timeout = hdcp2_msg_data->timeout;
5988
5989 /*
5990 * There is no way to detect the CERT, LPRIME and STREAM_READY
5991 * availability. So Wait for timeout and read the msg.
5992 */
5993 if (!hdcp2_msg_data->msg_detectable) {
5994 mdelay(timeout);
5995 ret = 0;
5996 } else {
cf9cb35f
R
5997 /*
5998 * As we want to check the msg availability at timeout, Ignoring
5999 * the timeout at wait for CP_IRQ.
6000 */
6001 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
6002 ret = hdcp2_detect_msg_availability(intel_dig_port,
6003 msg_id, &msg_ready);
238d3a9e
R
6004 if (!msg_ready)
6005 ret = -ETIMEDOUT;
6006 }
6007
6008 if (ret)
6009 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
6010 hdcp2_msg_data->msg_id, ret, timeout);
6011
6012 return ret;
6013}
6014
6015static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
6016{
6017 int i;
6018
6019 for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
6020 if (hdcp2_msg_data[i].msg_id == msg_id)
6021 return &hdcp2_msg_data[i];
6022
6023 return NULL;
6024}
6025
6026static
6027int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
6028 void *buf, size_t size)
6029{
cf9cb35f
R
6030 struct intel_dp *dp = &intel_dig_port->dp;
6031 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
238d3a9e
R
6032 unsigned int offset;
6033 u8 *byte = buf;
6034 ssize_t ret, bytes_to_write, len;
6035 struct hdcp2_dp_msg_data *hdcp2_msg_data;
6036
6037 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
6038 if (!hdcp2_msg_data)
6039 return -EINVAL;
6040
6041 offset = hdcp2_msg_data->offset;
6042
6043 /* No msg_id in DP HDCP2.2 msgs */
6044 bytes_to_write = size - 1;
6045 byte++;
6046
cf9cb35f
R
6047 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
6048
238d3a9e
R
6049 while (bytes_to_write) {
6050 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
6051 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
6052
6053 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
6054 offset, (void *)byte, len);
6055 if (ret < 0)
6056 return ret;
6057
6058 bytes_to_write -= ret;
6059 byte += ret;
6060 offset += ret;
6061 }
6062
6063 return size;
6064}
6065
6066static
6067ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
6068{
6069 u8 rx_info[HDCP_2_2_RXINFO_LEN];
6070 u32 dev_cnt;
6071 ssize_t ret;
6072
6073 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6074 DP_HDCP_2_2_REG_RXINFO_OFFSET,
6075 (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6076 if (ret != HDCP_2_2_RXINFO_LEN)
6077 return ret >= 0 ? -EIO : ret;
6078
6079 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6080 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6081
6082 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6083 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6084
6085 ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6086 HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6087 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6088
6089 return ret;
6090}
6091
6092static
6093int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6094 u8 msg_id, void *buf, size_t size)
6095{
6096 unsigned int offset;
6097 u8 *byte = buf;
6098 ssize_t ret, bytes_to_recv, len;
6099 struct hdcp2_dp_msg_data *hdcp2_msg_data;
6100
6101 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6102 if (!hdcp2_msg_data)
6103 return -EINVAL;
6104 offset = hdcp2_msg_data->offset;
6105
6106 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6107 if (ret < 0)
6108 return ret;
6109
6110 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6111 ret = get_receiver_id_list_size(intel_dig_port);
6112 if (ret < 0)
6113 return ret;
6114
6115 size = ret;
6116 }
6117 bytes_to_recv = size - 1;
6118
6119 /* DP adaptation msgs has no msg_id */
6120 byte++;
6121
6122 while (bytes_to_recv) {
6123 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6124 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6125
6126 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6127 (void *)byte, len);
6128 if (ret < 0) {
6129 DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6130 return ret;
6131 }
6132
6133 bytes_to_recv -= ret;
6134 byte += ret;
6135 offset += ret;
6136 }
6137 byte = buf;
6138 *byte = msg_id;
6139
6140 return size;
6141}
6142
6143static
6144int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6145 bool is_repeater, u8 content_type)
6146{
6147 struct hdcp2_dp_errata_stream_type stream_type_msg;
6148
6149 if (is_repeater)
6150 return 0;
6151
6152 /*
6153 * Errata for DP: As Stream type is used for encryption, Receiver
6154 * should be communicated with stream type for the decryption of the
6155 * content.
6156 * Repeater will be communicated with stream type as a part of it's
6157 * auth later in time.
6158 */
6159 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6160 stream_type_msg.stream_type = content_type;
6161
6162 return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
6163 sizeof(stream_type_msg));
6164}
6165
6166static
6167int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6168{
6169 u8 rx_status;
6170 int ret;
6171
6172 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6173 if (ret)
6174 return ret;
6175
6176 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6177 ret = HDCP_REAUTH_REQUEST;
6178 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6179 ret = HDCP_LINK_INTEGRITY_FAILURE;
6180 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6181 ret = HDCP_TOPOLOGY_CHANGE;
6182
6183 return ret;
6184}
6185
6186static
6187int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6188 bool *capable)
6189{
6190 u8 rx_caps[3];
6191 int ret;
6192
6193 *capable = false;
6194 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6195 DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6196 rx_caps, HDCP_2_2_RXCAPS_LEN);
6197 if (ret != HDCP_2_2_RXCAPS_LEN)
6198 return ret >= 0 ? -EIO : ret;
6199
6200 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6201 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6202 *capable = true;
6203
6204 return 0;
6205}
6206
20f24d77
SP
6207static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6208 .write_an_aksv = intel_dp_hdcp_write_an_aksv,
6209 .read_bksv = intel_dp_hdcp_read_bksv,
6210 .read_bstatus = intel_dp_hdcp_read_bstatus,
6211 .repeater_present = intel_dp_hdcp_repeater_present,
6212 .read_ri_prime = intel_dp_hdcp_read_ri_prime,
6213 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6214 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6215 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6216 .toggle_signalling = intel_dp_hdcp_toggle_signalling,
6217 .check_link = intel_dp_hdcp_check_link,
791a98dd 6218 .hdcp_capable = intel_dp_hdcp_capable,
238d3a9e
R
6219 .write_2_2_msg = intel_dp_hdcp2_write_msg,
6220 .read_2_2_msg = intel_dp_hdcp2_read_msg,
6221 .config_stream_type = intel_dp_hdcp2_config_stream_type,
6222 .check_2_2_link = intel_dp_hdcp2_check_link,
6223 .hdcp_2_2_capable = intel_dp_hdcp2_capable,
6224 .protocol = HDCP_PROTOCOL_DP,
20f24d77
SP
6225};
6226
49e6bc51
VS
6227static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6228{
de25eb7f 6229 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
337837ac 6230 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
49e6bc51
VS
6231
6232 lockdep_assert_held(&dev_priv->pps_mutex);
6233
6234 if (!edp_have_panel_vdd(intel_dp))
6235 return;
6236
6237 /*
6238 * The VDD bit needs a power domain reference, so if the bit is
6239 * already enabled when we boot or resume, grab this reference and
6240 * schedule a vdd off, so we don't hold on to the reference
6241 * indefinitely.
6242 */
6243 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
337837ac 6244 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
49e6bc51
VS
6245
6246 edp_panel_vdd_schedule_off(intel_dp);
6247}
6248
9f2bdb00
VS
6249static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6250{
de25eb7f 6251 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
59b74c49
VS
6252 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6253 enum pipe pipe;
9f2bdb00 6254
59b74c49
VS
6255 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6256 encoder->port, &pipe))
6257 return pipe;
9f2bdb00 6258
59b74c49 6259 return INVALID_PIPE;
9f2bdb00
VS
6260}
6261
bf93ba67 6262void intel_dp_encoder_reset(struct drm_encoder *encoder)
6d93c0c4 6263{
64989ca4 6264 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
dd75f6dd
ID
6265 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6266 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
69d93820 6267 intel_wakeref_t wakeref;
64989ca4
VS
6268
6269 if (!HAS_DDI(dev_priv))
6270 intel_dp->DP = I915_READ(intel_dp->output_reg);
49e6bc51 6271
dd75f6dd 6272 if (lspcon->active)
910530c0
SS
6273 lspcon_resume(lspcon);
6274
d7e8ef02
MN
6275 intel_dp->reset_link_params = true;
6276
69d93820
CW
6277 with_pps_lock(intel_dp, wakeref) {
6278 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6279 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
49e6bc51 6280
69d93820
CW
6281 if (intel_dp_is_edp(intel_dp)) {
6282 /*
6283 * Reinit the power sequencer, in case BIOS did
6284 * something nasty with it.
6285 */
6286 intel_dp_pps_init(intel_dp);
6287 intel_edp_panel_vdd_sanitize(intel_dp);
6288 }
9f2bdb00 6289 }
6d93c0c4
ID
6290}
6291
a4fc5ed6 6292static const struct drm_connector_funcs intel_dp_connector_funcs = {
beb60608 6293 .force = intel_dp_force,
a4fc5ed6 6294 .fill_modes = drm_helper_probe_single_connector_modes,
8f647a01
ML
6295 .atomic_get_property = intel_digital_connector_atomic_get_property,
6296 .atomic_set_property = intel_digital_connector_atomic_set_property,
7a418e34 6297 .late_register = intel_dp_connector_register,
c191eca1 6298 .early_unregister = intel_dp_connector_unregister,
d4b26e4f 6299 .destroy = intel_connector_destroy,
c6f95f27 6300 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
8f647a01 6301 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
a4fc5ed6
KP
6302};
6303
6304static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6c5ed5ae 6305 .detect_ctx = intel_dp_detect,
a4fc5ed6
KP
6306 .get_modes = intel_dp_get_modes,
6307 .mode_valid = intel_dp_mode_valid,
8f647a01 6308 .atomic_check = intel_digital_connector_atomic_check,
a4fc5ed6
KP
6309};
6310
a4fc5ed6 6311static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 6312 .reset = intel_dp_encoder_reset,
24d05927 6313 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
6314};
6315
b2c5c181 6316enum irqreturn
13cf5504
DA
6317intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6318{
6319 struct intel_dp *intel_dp = &intel_dig_port->dp;
de25eb7f 6320 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
b2c5c181 6321 enum irqreturn ret = IRQ_NONE;
0e6e0be4 6322 intel_wakeref_t wakeref;
1c767b33 6323
7a7f84cc
VS
6324 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
6325 /*
6326 * vdd off can generate a long pulse on eDP which
6327 * would require vdd on to handle it, and thus we
6328 * would end up in an endless cycle of
6329 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6330 */
6331 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
8f4f2797 6332 port_name(intel_dig_port->base.port));
a8b3d52f 6333 return IRQ_HANDLED;
7a7f84cc
VS
6334 }
6335
26fbb774 6336 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
8f4f2797 6337 port_name(intel_dig_port->base.port),
0e32b39c 6338 long_hpd ? "long" : "short");
13cf5504 6339
27d4efc5 6340 if (long_hpd) {
d7e8ef02 6341 intel_dp->reset_link_params = true;
27d4efc5
VS
6342 return IRQ_NONE;
6343 }
6344
0e6e0be4
CW
6345 wakeref = intel_display_power_get(dev_priv,
6346 intel_aux_power_domain(intel_dig_port));
1c767b33 6347
27d4efc5
VS
6348 if (intel_dp->is_mst) {
6349 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6350 /*
6351 * If we were in MST mode, and device is not
6352 * there, get out of MST mode
6353 */
6354 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6355 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6356 intel_dp->is_mst = false;
6357 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6358 intel_dp->is_mst);
27d4efc5 6359 goto put_power;
0e32b39c 6360 }
27d4efc5 6361 }
0e32b39c 6362
27d4efc5 6363 if (!intel_dp->is_mst) {
c85d200e 6364 bool handled;
42e5e657
DV
6365
6366 handled = intel_dp_short_pulse(intel_dp);
6367
cbfa8ac8 6368 if (!handled)
27d4efc5 6369 goto put_power;
0e32b39c 6370 }
b2c5c181
DV
6371
6372 ret = IRQ_HANDLED;
6373
1c767b33 6374put_power:
337837ac 6375 intel_display_power_put(dev_priv,
0e6e0be4
CW
6376 intel_aux_power_domain(intel_dig_port),
6377 wakeref);
1c767b33
ID
6378
6379 return ret;
13cf5504
DA
6380}
6381
477ec328 6382/* check the VBT to see whether the eDP is on another port */
7b91bf7f 6383bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
36e83a18 6384{
53ce81a7
VS
6385 /*
6386 * eDP not supported on g4x. so bail out early just
6387 * for a bit extra safety in case the VBT is bonkers.
6388 */
dd11bc10 6389 if (INTEL_GEN(dev_priv) < 5)
53ce81a7
VS
6390 return false;
6391
a98d9c1d 6392 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
3b32a35b
VS
6393 return true;
6394
951d9efe 6395 return intel_bios_is_port_edp(dev_priv, port);
36e83a18
ZY
6396}
6397
200819ab 6398static void
f684960e
CW
6399intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6400{
8b45330a 6401 struct drm_i915_private *dev_priv = to_i915(connector->dev);
68ec0736
VS
6402 enum port port = dp_to_dig_port(intel_dp)->base.port;
6403
6404 if (!IS_G4X(dev_priv) && port != PORT_A)
6405 intel_attach_force_audio_property(connector);
8b45330a 6406
e953fd7b 6407 intel_attach_broadcast_rgb_property(connector);
b2ae318a 6408 if (HAS_GMCH(dev_priv))
f1a12172
RS
6409 drm_connector_attach_max_bpc_property(connector, 6, 10);
6410 else if (INTEL_GEN(dev_priv) >= 5)
6411 drm_connector_attach_max_bpc_property(connector, 6, 12);
53b41837 6412
1853a9da 6413 if (intel_dp_is_edp(intel_dp)) {
8b45330a
ML
6414 u32 allowed_scalers;
6415
6416 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
b2ae318a 6417 if (!HAS_GMCH(dev_priv))
8b45330a
ML
6418 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6419
6420 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6421
eead06df 6422 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
8b45330a 6423
53b41837 6424 }
f684960e
CW
6425}
6426
dada1a9f
ID
6427static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6428{
d28d4731 6429 intel_dp->panel_power_off_time = ktime_get_boottime();
dada1a9f
ID
6430 intel_dp->last_power_on = jiffies;
6431 intel_dp->last_backlight_off = jiffies;
6432}
6433
67a54566 6434static void
46bd8383 6435intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
67a54566 6436{
de25eb7f 6437 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ab3517c1 6438 u32 pp_on, pp_off, pp_ctl;
8e8232d5 6439 struct pps_registers regs;
453c5420 6440
46bd8383 6441 intel_pps_get_registers(intel_dp, &regs);
67a54566 6442
b0a08bec 6443 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 6444
1b61c4a3
JN
6445 /* Ensure PPS is unlocked */
6446 if (!HAS_DDI(dev_priv))
6447 I915_WRITE(regs.pp_ctrl, pp_ctl);
6448
8e8232d5
ID
6449 pp_on = I915_READ(regs.pp_on);
6450 pp_off = I915_READ(regs.pp_off);
67a54566
DV
6451
6452 /* Pull timing values out of registers */
78b36b10
JN
6453 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6454 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6455 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6456 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
67a54566 6457
ab3517c1
JN
6458 if (i915_mmio_reg_valid(regs.pp_div)) {
6459 u32 pp_div;
6460
6461 pp_div = I915_READ(regs.pp_div);
6462
78b36b10 6463 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
ab3517c1 6464 } else {
78b36b10 6465 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
b0a08bec 6466 }
54648618
ID
6467}
6468
de9c1b6b
ID
6469static void
6470intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6471{
6472 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6473 state_name,
6474 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6475}
6476
6477static void
46bd8383 6478intel_pps_verify_state(struct intel_dp *intel_dp)
de9c1b6b
ID
6479{
6480 struct edp_power_seq hw;
6481 struct edp_power_seq *sw = &intel_dp->pps_delays;
6482
46bd8383 6483 intel_pps_readout_hw_state(intel_dp, &hw);
de9c1b6b
ID
6484
6485 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6486 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6487 DRM_ERROR("PPS state mismatch\n");
6488 intel_pps_dump_state("sw", sw);
6489 intel_pps_dump_state("hw", &hw);
6490 }
6491}
6492
54648618 6493static void
46bd8383 6494intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
54648618 6495{
de25eb7f 6496 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
54648618
ID
6497 struct edp_power_seq cur, vbt, spec,
6498 *final = &intel_dp->pps_delays;
6499
6500 lockdep_assert_held(&dev_priv->pps_mutex);
6501
6502 /* already initialized? */
6503 if (final->t11_t12 != 0)
6504 return;
6505
46bd8383 6506 intel_pps_readout_hw_state(intel_dp, &cur);
67a54566 6507
de9c1b6b 6508 intel_pps_dump_state("cur", &cur);
67a54566 6509
6aa23e65 6510 vbt = dev_priv->vbt.edp.pps;
c99a259b
MN
6511 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6512 * of 500ms appears to be too short. Ocassionally the panel
6513 * just fails to power back on. Increasing the delay to 800ms
6514 * seems sufficient to avoid this problem.
6515 */
6516 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
7313f5a9 6517 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
c99a259b
MN
6518 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6519 vbt.t11_t12);
6520 }
770a17a5
MN
6521 /* T11_T12 delay is special and actually in units of 100ms, but zero
6522 * based in the hw (so we need to add 100 ms). But the sw vbt
6523 * table multiplies it with 1000 to make it in units of 100usec,
6524 * too. */
6525 vbt.t11_t12 += 100 * 10;
67a54566
DV
6526
6527 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6528 * our hw here, which are all in 100usec. */
6529 spec.t1_t3 = 210 * 10;
6530 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6531 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6532 spec.t10 = 500 * 10;
6533 /* This one is special and actually in units of 100ms, but zero
6534 * based in the hw (so we need to add 100 ms). But the sw vbt
6535 * table multiplies it with 1000 to make it in units of 100usec,
6536 * too. */
6537 spec.t11_t12 = (510 + 100) * 10;
6538
de9c1b6b 6539 intel_pps_dump_state("vbt", &vbt);
67a54566
DV
6540
6541 /* Use the max of the register settings and vbt. If both are
6542 * unset, fall back to the spec limits. */
36b5f425 6543#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
6544 spec.field : \
6545 max(cur.field, vbt.field))
6546 assign_final(t1_t3);
6547 assign_final(t8);
6548 assign_final(t9);
6549 assign_final(t10);
6550 assign_final(t11_t12);
6551#undef assign_final
6552
36b5f425 6553#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
6554 intel_dp->panel_power_up_delay = get_delay(t1_t3);
6555 intel_dp->backlight_on_delay = get_delay(t8);
6556 intel_dp->backlight_off_delay = get_delay(t9);
6557 intel_dp->panel_power_down_delay = get_delay(t10);
6558 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6559#undef get_delay
6560
f30d26e4
JN
6561 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6562 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
6563 intel_dp->panel_power_cycle_delay);
6564
6565 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6566 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
de9c1b6b
ID
6567
6568 /*
6569 * We override the HW backlight delays to 1 because we do manual waits
6570 * on them. For T8, even BSpec recommends doing it. For T9, if we
6571 * don't do this, we'll end up waiting for the backlight off delay
6572 * twice: once when we do the manual sleep, and once when we disable
6573 * the panel and wait for the PP_STATUS bit to become zero.
6574 */
6575 final->t8 = 1;
6576 final->t9 = 1;
5643205c
ID
6577
6578 /*
6579 * HW has only a 100msec granularity for t11_t12 so round it up
6580 * accordingly.
6581 */
6582 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
f30d26e4
JN
6583}
6584
6585static void
46bd8383 6586intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
5d5ab2d2 6587 bool force_disable_vdd)
f30d26e4 6588{
de25eb7f 6589 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ab3517c1 6590 u32 pp_on, pp_off, port_sel = 0;
e7dc33f3 6591 int div = dev_priv->rawclk_freq / 1000;
8e8232d5 6592 struct pps_registers regs;
8f4f2797 6593 enum port port = dp_to_dig_port(intel_dp)->base.port;
36b5f425 6594 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 6595
e39b999a 6596 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 6597
46bd8383 6598 intel_pps_get_registers(intel_dp, &regs);
453c5420 6599
5d5ab2d2
VS
6600 /*
6601 * On some VLV machines the BIOS can leave the VDD
e7f2af78 6602 * enabled even on power sequencers which aren't
5d5ab2d2
VS
6603 * hooked up to any port. This would mess up the
6604 * power domain tracking the first time we pick
6605 * one of these power sequencers for use since
6606 * edp_panel_vdd_on() would notice that the VDD was
6607 * already on and therefore wouldn't grab the power
6608 * domain reference. Disable VDD first to avoid this.
6609 * This also avoids spuriously turning the VDD on as
e7f2af78 6610 * soon as the new power sequencer gets initialized.
5d5ab2d2
VS
6611 */
6612 if (force_disable_vdd) {
6613 u32 pp = ironlake_get_pp_control(intel_dp);
6614
6615 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
6616
6617 if (pp & EDP_FORCE_VDD)
6618 DRM_DEBUG_KMS("VDD already on, disabling first\n");
6619
6620 pp &= ~EDP_FORCE_VDD;
6621
6622 I915_WRITE(regs.pp_ctrl, pp);
6623 }
6624
78b36b10
JN
6625 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6626 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6627 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6628 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
67a54566
DV
6629
6630 /* Haswell doesn't have any port selection bits for the panel
6631 * power sequencer any more. */
920a14b2 6632 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
ad933b56 6633 port_sel = PANEL_PORT_SELECT_VLV(port);
6e266956 6634 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
05bf51d3
VS
6635 switch (port) {
6636 case PORT_A:
a24c144c 6637 port_sel = PANEL_PORT_SELECT_DPA;
05bf51d3
VS
6638 break;
6639 case PORT_C:
6640 port_sel = PANEL_PORT_SELECT_DPC;
6641 break;
6642 case PORT_D:
a24c144c 6643 port_sel = PANEL_PORT_SELECT_DPD;
05bf51d3
VS
6644 break;
6645 default:
6646 MISSING_CASE(port);
6647 break;
6648 }
67a54566
DV
6649 }
6650
453c5420
JB
6651 pp_on |= port_sel;
6652
8e8232d5
ID
6653 I915_WRITE(regs.pp_on, pp_on);
6654 I915_WRITE(regs.pp_off, pp_off);
ab3517c1
JN
6655
6656 /*
6657 * Compute the divisor for the pp clock, simply match the Bspec formula.
6658 */
6659 if (i915_mmio_reg_valid(regs.pp_div)) {
78b36b10
JN
6660 I915_WRITE(regs.pp_div,
6661 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
6662 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
ab3517c1
JN
6663 } else {
6664 u32 pp_ctl;
6665
6666 pp_ctl = I915_READ(regs.pp_ctrl);
6667 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
78b36b10 6668 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
ab3517c1
JN
6669 I915_WRITE(regs.pp_ctrl, pp_ctl);
6670 }
67a54566 6671
67a54566 6672 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
8e8232d5
ID
6673 I915_READ(regs.pp_on),
6674 I915_READ(regs.pp_off),
ab3517c1
JN
6675 i915_mmio_reg_valid(regs.pp_div) ?
6676 I915_READ(regs.pp_div) :
6677 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
f684960e
CW
6678}
6679
46bd8383 6680static void intel_dp_pps_init(struct intel_dp *intel_dp)
335f752b 6681{
de25eb7f 6682 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
920a14b2
TU
6683
6684 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
335f752b
ID
6685 vlv_initial_power_sequencer_setup(intel_dp);
6686 } else {
46bd8383
VS
6687 intel_dp_init_panel_power_sequencer(intel_dp);
6688 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
335f752b
ID
6689 }
6690}
6691
b33a2815
VK
6692/**
6693 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5423adf1 6694 * @dev_priv: i915 device
e896402c 6695 * @crtc_state: a pointer to the active intel_crtc_state
b33a2815
VK
6696 * @refresh_rate: RR to be programmed
6697 *
6698 * This function gets called when refresh rate (RR) has to be changed from
6699 * one frequency to another. Switches can be between high and low RR
6700 * supported by the panel or to any other RR based on media playback (in
6701 * this case, RR value needs to be passed from user space).
6702 *
6703 * The caller of this function needs to take a lock on dev_priv->drrs.
6704 */
85cb48a1 6705static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5f88a9c6 6706 const struct intel_crtc_state *crtc_state,
85cb48a1 6707 int refresh_rate)
439d7ac0 6708{
439d7ac0 6709 struct intel_encoder *encoder;
96178eeb
VK
6710 struct intel_digital_port *dig_port = NULL;
6711 struct intel_dp *intel_dp = dev_priv->drrs.dp;
85cb48a1 6712 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
96178eeb 6713 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
6714
6715 if (refresh_rate <= 0) {
6716 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
6717 return;
6718 }
6719
96178eeb
VK
6720 if (intel_dp == NULL) {
6721 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
6722 return;
6723 }
6724
96178eeb
VK
6725 dig_port = dp_to_dig_port(intel_dp);
6726 encoder = &dig_port->base;
439d7ac0
PB
6727
6728 if (!intel_crtc) {
6729 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
6730 return;
6731 }
6732
96178eeb 6733 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
6734 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6735 return;
6736 }
6737
96178eeb
VK
6738 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
6739 refresh_rate)
439d7ac0
PB
6740 index = DRRS_LOW_RR;
6741
96178eeb 6742 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
6743 DRM_DEBUG_KMS(
6744 "DRRS requested for previously set RR...ignoring\n");
6745 return;
6746 }
6747
85cb48a1 6748 if (!crtc_state->base.active) {
439d7ac0
PB
6749 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
6750 return;
6751 }
6752
85cb48a1 6753 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
a4c30b1d
VK
6754 switch (index) {
6755 case DRRS_HIGH_RR:
4c354754 6756 intel_dp_set_m_n(crtc_state, M1_N1);
a4c30b1d
VK
6757 break;
6758 case DRRS_LOW_RR:
4c354754 6759 intel_dp_set_m_n(crtc_state, M2_N2);
a4c30b1d
VK
6760 break;
6761 case DRRS_MAX_RR:
6762 default:
6763 DRM_ERROR("Unsupported refreshrate type\n");
6764 }
85cb48a1
ML
6765 } else if (INTEL_GEN(dev_priv) > 6) {
6766 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
649636ef 6767 u32 val;
a4c30b1d 6768
649636ef 6769 val = I915_READ(reg);
439d7ac0 6770 if (index > DRRS_HIGH_RR) {
85cb48a1 6771 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6fa7aec1
VK
6772 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6773 else
6774 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 6775 } else {
85cb48a1 6776 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6fa7aec1
VK
6777 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6778 else
6779 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
6780 }
6781 I915_WRITE(reg, val);
6782 }
6783
4e9ac947
VK
6784 dev_priv->drrs.refresh_rate_type = index;
6785
6786 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
6787}
6788
b33a2815
VK
6789/**
6790 * intel_edp_drrs_enable - init drrs struct if supported
6791 * @intel_dp: DP struct
5423adf1 6792 * @crtc_state: A pointer to the active crtc state.
b33a2815
VK
6793 *
6794 * Initializes frontbuffer_bits and drrs.dp
6795 */
85cb48a1 6796void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5f88a9c6 6797 const struct intel_crtc_state *crtc_state)
c395578e 6798{
de25eb7f 6799 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
c395578e 6800
85cb48a1 6801 if (!crtc_state->has_drrs) {
c395578e
VK
6802 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6803 return;
6804 }
6805
da83ef85
RS
6806 if (dev_priv->psr.enabled) {
6807 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6808 return;
6809 }
6810
c395578e 6811 mutex_lock(&dev_priv->drrs.mutex);
f69a0d71
HG
6812 if (dev_priv->drrs.dp) {
6813 DRM_DEBUG_KMS("DRRS already enabled\n");
c395578e
VK
6814 goto unlock;
6815 }
6816
6817 dev_priv->drrs.busy_frontbuffer_bits = 0;
6818
6819 dev_priv->drrs.dp = intel_dp;
6820
6821unlock:
6822 mutex_unlock(&dev_priv->drrs.mutex);
6823}
6824
b33a2815
VK
6825/**
6826 * intel_edp_drrs_disable - Disable DRRS
6827 * @intel_dp: DP struct
5423adf1 6828 * @old_crtc_state: Pointer to old crtc_state.
b33a2815
VK
6829 *
6830 */
85cb48a1 6831void intel_edp_drrs_disable(struct intel_dp *intel_dp,
5f88a9c6 6832 const struct intel_crtc_state *old_crtc_state)
c395578e 6833{
de25eb7f 6834 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
c395578e 6835
85cb48a1 6836 if (!old_crtc_state->has_drrs)
c395578e
VK
6837 return;
6838
6839 mutex_lock(&dev_priv->drrs.mutex);
6840 if (!dev_priv->drrs.dp) {
6841 mutex_unlock(&dev_priv->drrs.mutex);
6842 return;
6843 }
6844
6845 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
85cb48a1
ML
6846 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
6847 intel_dp->attached_connector->panel.fixed_mode->vrefresh);
c395578e
VK
6848
6849 dev_priv->drrs.dp = NULL;
6850 mutex_unlock(&dev_priv->drrs.mutex);
6851
6852 cancel_delayed_work_sync(&dev_priv->drrs.work);
6853}
6854
4e9ac947
VK
6855static void intel_edp_drrs_downclock_work(struct work_struct *work)
6856{
6857 struct drm_i915_private *dev_priv =
6858 container_of(work, typeof(*dev_priv), drrs.work.work);
6859 struct intel_dp *intel_dp;
6860
6861 mutex_lock(&dev_priv->drrs.mutex);
6862
6863 intel_dp = dev_priv->drrs.dp;
6864
6865 if (!intel_dp)
6866 goto unlock;
6867
439d7ac0 6868 /*
4e9ac947
VK
6869 * The delayed work can race with an invalidate hence we need to
6870 * recheck.
439d7ac0
PB
6871 */
6872
4e9ac947
VK
6873 if (dev_priv->drrs.busy_frontbuffer_bits)
6874 goto unlock;
439d7ac0 6875
85cb48a1
ML
6876 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
6877 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6878
6879 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6880 intel_dp->attached_connector->panel.downclock_mode->vrefresh);
6881 }
439d7ac0 6882
4e9ac947 6883unlock:
4e9ac947 6884 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
6885}
6886
b33a2815 6887/**
0ddfd203 6888 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5748b6a1 6889 * @dev_priv: i915 device
b33a2815
VK
6890 * @frontbuffer_bits: frontbuffer plane tracking bits
6891 *
0ddfd203
R
6892 * This function gets called everytime rendering on the given planes start.
6893 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
6894 *
6895 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6896 */
5748b6a1
CW
6897void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
6898 unsigned int frontbuffer_bits)
a93fad0f 6899{
a93fad0f
VK
6900 struct drm_crtc *crtc;
6901 enum pipe pipe;
6902
9da7d693 6903 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
6904 return;
6905
88f933a8 6906 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 6907
a93fad0f 6908 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
6909 if (!dev_priv->drrs.dp) {
6910 mutex_unlock(&dev_priv->drrs.mutex);
6911 return;
6912 }
6913
a93fad0f
VK
6914 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6915 pipe = to_intel_crtc(crtc)->pipe;
6916
c1d038c6
DV
6917 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6918 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
6919
0ddfd203 6920 /* invalidate means busy screen hence upclock */
c1d038c6 6921 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
85cb48a1
ML
6922 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6923 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
a93fad0f 6924
a93fad0f
VK
6925 mutex_unlock(&dev_priv->drrs.mutex);
6926}
6927
b33a2815 6928/**
0ddfd203 6929 * intel_edp_drrs_flush - Restart Idleness DRRS
5748b6a1 6930 * @dev_priv: i915 device
b33a2815
VK
6931 * @frontbuffer_bits: frontbuffer plane tracking bits
6932 *
0ddfd203
R
6933 * This function gets called every time rendering on the given planes has
6934 * completed or flip on a crtc is completed. So DRRS should be upclocked
6935 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
6936 * if no other planes are dirty.
b33a2815
VK
6937 *
6938 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6939 */
5748b6a1
CW
6940void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
6941 unsigned int frontbuffer_bits)
a93fad0f 6942{
a93fad0f
VK
6943 struct drm_crtc *crtc;
6944 enum pipe pipe;
6945
9da7d693 6946 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
6947 return;
6948
88f933a8 6949 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 6950
a93fad0f 6951 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
6952 if (!dev_priv->drrs.dp) {
6953 mutex_unlock(&dev_priv->drrs.mutex);
6954 return;
6955 }
6956
a93fad0f
VK
6957 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6958 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
6959
6960 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
6961 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6962
0ddfd203 6963 /* flush means busy screen hence upclock */
c1d038c6 6964 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
85cb48a1
ML
6965 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6966 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
0ddfd203
R
6967
6968 /*
6969 * flush also means no more activity hence schedule downclock, if all
6970 * other fbs are quiescent too
6971 */
6972 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
6973 schedule_delayed_work(&dev_priv->drrs.work,
6974 msecs_to_jiffies(1000));
6975 mutex_unlock(&dev_priv->drrs.mutex);
6976}
6977
b33a2815
VK
6978/**
6979 * DOC: Display Refresh Rate Switching (DRRS)
6980 *
6981 * Display Refresh Rate Switching (DRRS) is a power conservation feature
6982 * which enables swtching between low and high refresh rates,
6983 * dynamically, based on the usage scenario. This feature is applicable
6984 * for internal panels.
6985 *
6986 * Indication that the panel supports DRRS is given by the panel EDID, which
6987 * would list multiple refresh rates for one resolution.
6988 *
6989 * DRRS is of 2 types - static and seamless.
6990 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6991 * (may appear as a blink on screen) and is used in dock-undock scenario.
6992 * Seamless DRRS involves changing RR without any visual effect to the user
6993 * and can be used during normal system usage. This is done by programming
6994 * certain registers.
6995 *
6996 * Support for static/seamless DRRS may be indicated in the VBT based on
6997 * inputs from the panel spec.
6998 *
6999 * DRRS saves power by switching to low RR based on usage scenarios.
7000 *
2e7a5701
DV
7001 * The implementation is based on frontbuffer tracking implementation. When
7002 * there is a disturbance on the screen triggered by user activity or a periodic
7003 * system activity, DRRS is disabled (RR is changed to high RR). When there is
7004 * no movement on screen, after a timeout of 1 second, a switch to low RR is
7005 * made.
7006 *
7007 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
7008 * and intel_edp_drrs_flush() are called.
b33a2815
VK
7009 *
7010 * DRRS can be further extended to support other internal panels and also
7011 * the scenario of video playback wherein RR is set based on the rate
7012 * requested by userspace.
7013 */
7014
7015/**
7016 * intel_dp_drrs_init - Init basic DRRS work and mutex.
2f773477 7017 * @connector: eDP connector
b33a2815
VK
7018 * @fixed_mode: preferred mode of panel
7019 *
7020 * This function is called only once at driver load to initialize basic
7021 * DRRS stuff.
7022 *
7023 * Returns:
7024 * Downclock mode if panel supports it, else return NULL.
7025 * DRRS support is determined by the presence of downclock mode (apart
7026 * from VBT setting).
7027 */
4f9db5b5 7028static struct drm_display_mode *
2f773477
VS
7029intel_dp_drrs_init(struct intel_connector *connector,
7030 struct drm_display_mode *fixed_mode)
4f9db5b5 7031{
2f773477 7032 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
4f9db5b5
PB
7033 struct drm_display_mode *downclock_mode = NULL;
7034
9da7d693
DV
7035 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
7036 mutex_init(&dev_priv->drrs.mutex);
7037
dd11bc10 7038 if (INTEL_GEN(dev_priv) <= 6) {
4f9db5b5
PB
7039 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
7040 return NULL;
7041 }
7042
7043 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 7044 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
7045 return NULL;
7046 }
7047
abf1aae8 7048 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
4f9db5b5 7049 if (!downclock_mode) {
a1d26342 7050 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
7051 return NULL;
7052 }
7053
96178eeb 7054 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 7055
96178eeb 7056 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 7057 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
7058 return downclock_mode;
7059}
7060
ed92f0b2 7061static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 7062 struct intel_connector *intel_connector)
ed92f0b2 7063{
de25eb7f
RV
7064 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7065 struct drm_device *dev = &dev_priv->drm;
2f773477 7066 struct drm_connector *connector = &intel_connector->base;
ed92f0b2 7067 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 7068 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2 7069 bool has_dpcd;
6517d273 7070 enum pipe pipe = INVALID_PIPE;
69d93820
CW
7071 intel_wakeref_t wakeref;
7072 struct edid *edid;
ed92f0b2 7073
1853a9da 7074 if (!intel_dp_is_edp(intel_dp))
ed92f0b2
PZ
7075 return true;
7076
36b80aa3
JRS
7077 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7078
97a824e1
ID
7079 /*
7080 * On IBX/CPT we may get here with LVDS already registered. Since the
7081 * driver uses the only internal power sequencer available for both
7082 * eDP and LVDS bail out early in this case to prevent interfering
7083 * with an already powered-on LVDS power sequencer.
7084 */
17be4942 7085 if (intel_get_lvds_encoder(dev_priv)) {
97a824e1
ID
7086 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7087 DRM_INFO("LVDS was detected, not registering eDP\n");
7088
7089 return false;
7090 }
7091
69d93820
CW
7092 with_pps_lock(intel_dp, wakeref) {
7093 intel_dp_init_panel_power_timestamps(intel_dp);
7094 intel_dp_pps_init(intel_dp);
7095 intel_edp_panel_vdd_sanitize(intel_dp);
7096 }
63635217 7097
ed92f0b2 7098 /* Cache DPCD and EDID for edp. */
fe5a66f9 7099 has_dpcd = intel_edp_init_dpcd(intel_dp);
ed92f0b2 7100
fe5a66f9 7101 if (!has_dpcd) {
ed92f0b2
PZ
7102 /* if this fails, presume the device is a ghost */
7103 DRM_INFO("failed to retrieve link info, disabling eDP\n");
b4d06ede 7104 goto out_vdd_off;
ed92f0b2
PZ
7105 }
7106
060c8778 7107 mutex_lock(&dev->mode_config.mutex);
0b99836f 7108 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
7109 if (edid) {
7110 if (drm_add_edid_modes(connector, edid)) {
c555f023 7111 drm_connector_update_edid_property(connector,
ed92f0b2 7112 edid);
ed92f0b2
PZ
7113 } else {
7114 kfree(edid);
7115 edid = ERR_PTR(-EINVAL);
7116 }
7117 } else {
7118 edid = ERR_PTR(-ENOENT);
7119 }
7120 intel_connector->edid = edid;
7121
0dc927eb
VS
7122 fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7123 if (fixed_mode)
7124 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
ed92f0b2
PZ
7125
7126 /* fallback to VBT if available for eDP */
325710d3
VS
7127 if (!fixed_mode)
7128 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
060c8778 7129 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 7130
920a14b2 7131 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
01527b31
CT
7132 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7133 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
7134
7135 /*
7136 * Figure out the current pipe for the initial backlight setup.
7137 * If the current pipe isn't valid, try the PPS pipe, and if that
7138 * fails just assume pipe A.
7139 */
9f2bdb00 7140 pipe = vlv_active_pipe(intel_dp);
6517d273
VS
7141
7142 if (pipe != PIPE_A && pipe != PIPE_B)
7143 pipe = intel_dp->pps_pipe;
7144
7145 if (pipe != PIPE_A && pipe != PIPE_B)
7146 pipe = PIPE_A;
7147
7148 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
7149 pipe_name(pipe));
01527b31
CT
7150 }
7151
d93fa1b4 7152 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5507faeb 7153 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6517d273 7154 intel_panel_setup_backlight(connector, pipe);
ed92f0b2 7155
9531221d
HG
7156 if (fixed_mode)
7157 drm_connector_init_panel_orientation_property(
7158 connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
7159
ed92f0b2 7160 return true;
b4d06ede
ID
7161
7162out_vdd_off:
7163 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7164 /*
7165 * vdd might still be enabled do to the delayed vdd off.
7166 * Make sure vdd is actually turned off here.
7167 */
69d93820
CW
7168 with_pps_lock(intel_dp, wakeref)
7169 edp_panel_vdd_off_sync(intel_dp);
b4d06ede
ID
7170
7171 return false;
ed92f0b2
PZ
7172}
7173
9301397a
MN
7174static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7175{
7176 struct intel_connector *intel_connector;
7177 struct drm_connector *connector;
7178
7179 intel_connector = container_of(work, typeof(*intel_connector),
7180 modeset_retry_work);
7181 connector = &intel_connector->base;
7182 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7183 connector->name);
7184
7185 /* Grab the locks before changing connector property*/
7186 mutex_lock(&connector->dev->mode_config.mutex);
7187 /* Set connector link status to BAD and send a Uevent to notify
7188 * userspace to do a modeset.
7189 */
97e14fbe
DV
7190 drm_connector_set_link_status_property(connector,
7191 DRM_MODE_LINK_STATUS_BAD);
9301397a
MN
7192 mutex_unlock(&connector->dev->mode_config.mutex);
7193 /* Send Hotplug uevent so userspace can reprobe */
7194 drm_kms_helper_hotplug_event(connector->dev);
7195}
7196
16c25533 7197bool
f0fec3f2
PZ
7198intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7199 struct intel_connector *intel_connector)
a4fc5ed6 7200{
f0fec3f2
PZ
7201 struct drm_connector *connector = &intel_connector->base;
7202 struct intel_dp *intel_dp = &intel_dig_port->dp;
7203 struct intel_encoder *intel_encoder = &intel_dig_port->base;
7204 struct drm_device *dev = intel_encoder->base.dev;
fac5e23e 7205 struct drm_i915_private *dev_priv = to_i915(dev);
8f4f2797 7206 enum port port = intel_encoder->port;
7a418e34 7207 int type;
a4fc5ed6 7208
9301397a
MN
7209 /* Initialize the work for modeset in case of link train failure */
7210 INIT_WORK(&intel_connector->modeset_retry_work,
7211 intel_dp_modeset_retry_work_fn);
7212
ccb1a831
VS
7213 if (WARN(intel_dig_port->max_lanes < 1,
7214 "Not enough lanes (%d) for DP on port %c\n",
7215 intel_dig_port->max_lanes, port_name(port)))
7216 return false;
7217
55cfc580
JN
7218 intel_dp_set_source_rates(intel_dp);
7219
d7e8ef02 7220 intel_dp->reset_link_params = true;
a4a5d2f8 7221 intel_dp->pps_pipe = INVALID_PIPE;
9f2bdb00 7222 intel_dp->active_pipe = INVALID_PIPE;
a4a5d2f8 7223
ec5b01dd 7224 /* intel_dp vfuncs */
4f8036a2 7225 if (HAS_DDI(dev_priv))
ad64217b
ACO
7226 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
7227
0767935e
DV
7228 /* Preserve the current hw state. */
7229 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 7230 intel_dp->attached_connector = intel_connector;
3d3dc149 7231
7b91bf7f 7232 if (intel_dp_is_port_edp(dev_priv, port))
b329530c 7233 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
7234 else
7235 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 7236
9f2bdb00
VS
7237 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7238 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7239
f7d24902
ID
7240 /*
7241 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7242 * for DP the encoder type can be set by the caller to
7243 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7244 */
7245 if (type == DRM_MODE_CONNECTOR_eDP)
7246 intel_encoder->type = INTEL_OUTPUT_EDP;
7247
c17ed5b5 7248 /* eDP only on port B and/or C on vlv/chv */
920a14b2 7249 if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1853a9da
JN
7250 intel_dp_is_edp(intel_dp) &&
7251 port != PORT_B && port != PORT_C))
c17ed5b5
VS
7252 return false;
7253
e7281eab
ID
7254 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
7255 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7256 port_name(port));
7257
b329530c 7258 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
7259 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7260
b2ae318a 7261 if (!HAS_GMCH(dev_priv))
05021389 7262 connector->interlace_allowed = true;
a4fc5ed6
KP
7263 connector->doublescan_allowed = 0;
7264
bdabdb63 7265 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
5432fcaf 7266
b6339585 7267 intel_dp_aux_init(intel_dp);
7a418e34 7268
df0e9248 7269 intel_connector_attach_encoder(intel_connector, intel_encoder);
a4fc5ed6 7270
4f8036a2 7271 if (HAS_DDI(dev_priv))
bcbc889b
PZ
7272 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7273 else
7274 intel_connector->get_hw_state = intel_connector_get_hw_state;
7275
0e32b39c 7276 /* init MST on ports that can support it */
1853a9da 7277 if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
9787e835
RV
7278 (port == PORT_B || port == PORT_C ||
7279 port == PORT_D || port == PORT_F))
0c9b3715
JN
7280 intel_dp_mst_encoder_init(intel_dig_port,
7281 intel_connector->base.base.id);
0e32b39c 7282
36b5f425 7283 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
a121f4e5
VS
7284 intel_dp_aux_fini(intel_dp);
7285 intel_dp_mst_encoder_cleanup(intel_dig_port);
7286 goto fail;
b2f246a8 7287 }
32f9d658 7288
f684960e 7289 intel_dp_add_properties(intel_dp, connector);
20f24d77 7290
fdddd08c 7291 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
20f24d77
SP
7292 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7293 if (ret)
7294 DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
7295 }
f684960e 7296
a4fc5ed6
KP
7297 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7298 * 0xd. Failure to do so will result in spurious interrupts being
7299 * generated on the port when a cable is not attached.
7300 */
1c0f1b3d 7301 if (IS_G45(dev_priv)) {
a4fc5ed6
KP
7302 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
7303 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
7304 }
16c25533
PZ
7305
7306 return true;
a121f4e5
VS
7307
7308fail:
a121f4e5
VS
7309 drm_connector_cleanup(connector);
7310
7311 return false;
a4fc5ed6 7312}
f0fec3f2 7313
c39055b0 7314bool intel_dp_init(struct drm_i915_private *dev_priv,
457c52d8
CW
7315 i915_reg_t output_reg,
7316 enum port port)
f0fec3f2
PZ
7317{
7318 struct intel_digital_port *intel_dig_port;
7319 struct intel_encoder *intel_encoder;
7320 struct drm_encoder *encoder;
7321 struct intel_connector *intel_connector;
7322
b14c5679 7323 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2 7324 if (!intel_dig_port)
457c52d8 7325 return false;
f0fec3f2 7326
08d9bc92 7327 intel_connector = intel_connector_alloc();
11aee0f6
SM
7328 if (!intel_connector)
7329 goto err_connector_alloc;
f0fec3f2
PZ
7330
7331 intel_encoder = &intel_dig_port->base;
7332 encoder = &intel_encoder->base;
7333
c39055b0
ACO
7334 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7335 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7336 "DP %c", port_name(port)))
893da0c9 7337 goto err_encoder_init;
f0fec3f2 7338
c85d200e 7339 intel_encoder->hotplug = intel_dp_hotplug;
5bfe2ac0 7340 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 7341 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 7342 intel_encoder->get_config = intel_dp_get_config;
63a23d24 7343 intel_encoder->update_pipe = intel_panel_update_backlight;
07f9cd0b 7344 intel_encoder->suspend = intel_dp_encoder_suspend;
920a14b2 7345 if (IS_CHERRYVIEW(dev_priv)) {
9197c88b 7346 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
7347 intel_encoder->pre_enable = chv_pre_enable_dp;
7348 intel_encoder->enable = vlv_enable_dp;
1a8ff607 7349 intel_encoder->disable = vlv_disable_dp;
580d3811 7350 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 7351 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
11a914c2 7352 } else if (IS_VALLEYVIEW(dev_priv)) {
ecff4f3b 7353 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
7354 intel_encoder->pre_enable = vlv_pre_enable_dp;
7355 intel_encoder->enable = vlv_enable_dp;
1a8ff607 7356 intel_encoder->disable = vlv_disable_dp;
49277c31 7357 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 7358 } else {
ecff4f3b
JN
7359 intel_encoder->pre_enable = g4x_pre_enable_dp;
7360 intel_encoder->enable = g4x_enable_dp;
1a8ff607 7361 intel_encoder->disable = g4x_disable_dp;
51a9f6df 7362 intel_encoder->post_disable = g4x_post_disable_dp;
ab1f90f9 7363 }
f0fec3f2 7364
f0fec3f2 7365 intel_dig_port->dp.output_reg = output_reg;
ccb1a831 7366 intel_dig_port->max_lanes = 4;
f0fec3f2 7367
cca0502b 7368 intel_encoder->type = INTEL_OUTPUT_DP;
79f255a0 7369 intel_encoder->power_domain = intel_port_to_power_domain(port);
920a14b2 7370 if (IS_CHERRYVIEW(dev_priv)) {
882ec384
VS
7371 if (port == PORT_D)
7372 intel_encoder->crtc_mask = 1 << 2;
7373 else
7374 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
7375 } else {
7376 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
7377 }
bc079e8b 7378 intel_encoder->cloneable = 0;
03cdc1d4 7379 intel_encoder->port = port;
f0fec3f2 7380
13cf5504 7381 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
13cf5504 7382
385e4de0
VS
7383 if (port != PORT_A)
7384 intel_infoframe_init(intel_dig_port);
7385
39053089 7386 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
11aee0f6
SM
7387 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7388 goto err_init_connector;
7389
457c52d8 7390 return true;
11aee0f6
SM
7391
7392err_init_connector:
7393 drm_encoder_cleanup(encoder);
893da0c9 7394err_encoder_init:
11aee0f6
SM
7395 kfree(intel_connector);
7396err_connector_alloc:
7397 kfree(intel_dig_port);
457c52d8 7398 return false;
f0fec3f2 7399}
0e32b39c 7400
1a4313d1 7401void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
0e32b39c 7402{
1a4313d1
VS
7403 struct intel_encoder *encoder;
7404
7405 for_each_intel_encoder(&dev_priv->drm, encoder) {
7406 struct intel_dp *intel_dp;
0e32b39c 7407
1a4313d1
VS
7408 if (encoder->type != INTEL_OUTPUT_DDI)
7409 continue;
5aa56969 7410
1a4313d1 7411 intel_dp = enc_to_intel_dp(&encoder->base);
5aa56969 7412
1a4313d1 7413 if (!intel_dp->can_mst)
0e32b39c
DA
7414 continue;
7415
1a4313d1
VS
7416 if (intel_dp->is_mst)
7417 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
0e32b39c
DA
7418 }
7419}
7420
1a4313d1 7421void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
0e32b39c 7422{
1a4313d1 7423 struct intel_encoder *encoder;
0e32b39c 7424
1a4313d1
VS
7425 for_each_intel_encoder(&dev_priv->drm, encoder) {
7426 struct intel_dp *intel_dp;
5aa56969 7427 int ret;
0e32b39c 7428
1a4313d1
VS
7429 if (encoder->type != INTEL_OUTPUT_DDI)
7430 continue;
7431
7432 intel_dp = enc_to_intel_dp(&encoder->base);
7433
7434 if (!intel_dp->can_mst)
5aa56969 7435 continue;
0e32b39c 7436
1a4313d1 7437 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
6be1cf96
LP
7438 if (ret) {
7439 intel_dp->is_mst = false;
7440 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7441 false);
7442 }
0e32b39c
DA
7443 }
7444}