2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
35 #include <asm/byteorder.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_crtc.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_hdcp.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/i915_drm.h>
45 #include "i915_debugfs.h"
47 #include "i915_trace.h"
48 #include "intel_atomic.h"
49 #include "intel_audio.h"
50 #include "intel_connector.h"
51 #include "intel_ddi.h"
52 #include "intel_display_types.h"
54 #include "intel_dp_link_training.h"
55 #include "intel_dp_mst.h"
56 #include "intel_dpio_phy.h"
57 #include "intel_fifo_underrun.h"
58 #include "intel_hdcp.h"
59 #include "intel_hdmi.h"
60 #include "intel_hotplug.h"
61 #include "intel_lspcon.h"
62 #include "intel_lvds.h"
63 #include "intel_panel.h"
64 #include "intel_psr.h"
65 #include "intel_sideband.h"
67 #include "intel_vdsc.h"
69 #define DP_DPRX_ESI_LEN 14
71 /* DP DSC throughput values used for slice count calculations KPixels/s */
72 #define DP_DSC_PEAK_PIXEL_RATE 2720000
73 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
74 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
76 /* DP DSC FEC Overhead factor = 1/(0.972261) */
77 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261
79 /* Compliance test status bits */
80 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
81 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
82 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
83 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
90 static const struct dp_link_dpll g4x_dpll[] = {
92 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
94 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
97 static const struct dp_link_dpll pch_dpll[] = {
99 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
101 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
104 static const struct dp_link_dpll vlv_dpll[] = {
106 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
108 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
112 * CHV supports eDP 1.4 that have more link rates.
113 * Below only provides the fixed rate but exclude variable rate.
115 static const struct dp_link_dpll chv_dpll[] = {
117 * CHV requires to program fractional division for m2.
118 * m2 is stored in fixed point format using formula below
119 * (m2_int << 22) | m2_fraction
121 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
122 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
123 { 270000, /* m2_int = 27, m2_fraction = 0 */
124 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
127 /* Constants for DP DSC configurations */
128 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
130 /* With Single pipe configuration, HW is capable of supporting maximum
131 * of 4 slices per line.
133 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
136 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
137 * @intel_dp: DP struct
139 * If a CPU or PCH DP output is attached to an eDP panel, this function
140 * will return true, and false otherwise.
142 bool intel_dp_is_edp(struct intel_dp *intel_dp)
144 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
146 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
149 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
151 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
154 static void intel_dp_link_down(struct intel_encoder *encoder,
155 const struct intel_crtc_state *old_crtc_state);
156 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
157 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
158 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
159 const struct intel_crtc_state *crtc_state);
160 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
162 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
164 /* update sink rates from dpcd */
165 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
167 static const int dp_rates[] = {
168 162000, 270000, 540000, 810000
172 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
174 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
175 if (dp_rates[i] > max_rate)
177 intel_dp->sink_rates[i] = dp_rates[i];
180 intel_dp->num_sink_rates = i;
183 /* Get length of rates array potentially limited by max_rate. */
184 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
188 /* Limit results by potentially reduced max rate */
189 for (i = 0; i < len; i++) {
190 if (rates[len - i - 1] <= max_rate)
197 /* Get length of common rates array potentially limited by max_rate. */
198 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
201 return intel_dp_rate_limit_len(intel_dp->common_rates,
202 intel_dp->num_common_rates, max_rate);
205 /* Theoretical max between source and sink */
206 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
208 return intel_dp->common_rates[intel_dp->num_common_rates - 1];
211 /* Theoretical max between source and sink */
212 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
214 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
215 int source_max = intel_dig_port->max_lanes;
216 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
217 int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
219 return min3(source_max, sink_max, fia_max);
222 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
224 return intel_dp->max_link_lane_count;
228 intel_dp_link_required(int pixel_clock, int bpp)
230 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
231 return DIV_ROUND_UP(pixel_clock * bpp, 8);
235 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
237 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
238 * link rate that is generally expressed in Gbps. Since, 8 bits of data
239 * is transmitted every LS_Clk per lane, there is no need to account for
240 * the channel encoding that is done in the PHY layer here.
243 return max_link_clock * max_lanes;
247 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
249 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
250 struct intel_encoder *encoder = &intel_dig_port->base;
251 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
252 int max_dotclk = dev_priv->max_dotclk_freq;
255 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
257 if (type != DP_DS_PORT_TYPE_VGA)
260 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
261 intel_dp->downstream_ports);
263 if (ds_max_dotclk != 0)
264 max_dotclk = min(max_dotclk, ds_max_dotclk);
269 static int cnl_max_source_rate(struct intel_dp *intel_dp)
271 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
272 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
273 enum port port = dig_port->base.port;
275 u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
277 /* Low voltage SKUs are limited to max of 5.4G */
278 if (voltage == VOLTAGE_INFO_0_85V)
281 /* For this SKU 8.1G is supported in all ports */
282 if (IS_CNL_WITH_PORT_F(dev_priv))
285 /* For other SKUs, max rate on ports A and D is 5.4G */
286 if (port == PORT_A || port == PORT_D)
292 static int icl_max_source_rate(struct intel_dp *intel_dp)
294 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
295 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
296 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
298 if (intel_phy_is_combo(dev_priv, phy) &&
299 !IS_ELKHARTLAKE(dev_priv) &&
300 !intel_dp_is_edp(intel_dp))
307 intel_dp_set_source_rates(struct intel_dp *intel_dp)
309 /* The values must be in increasing order */
310 static const int cnl_rates[] = {
311 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
313 static const int bxt_rates[] = {
314 162000, 216000, 243000, 270000, 324000, 432000, 540000
316 static const int skl_rates[] = {
317 162000, 216000, 270000, 324000, 432000, 540000
319 static const int hsw_rates[] = {
320 162000, 270000, 540000
322 static const int g4x_rates[] = {
325 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
326 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
327 const struct ddi_vbt_port_info *info =
328 &dev_priv->vbt.ddi_port_info[dig_port->base.port];
329 const int *source_rates;
330 int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
332 /* This should only be done once */
333 WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
335 if (INTEL_GEN(dev_priv) >= 10) {
336 source_rates = cnl_rates;
337 size = ARRAY_SIZE(cnl_rates);
338 if (IS_GEN(dev_priv, 10))
339 max_rate = cnl_max_source_rate(intel_dp);
341 max_rate = icl_max_source_rate(intel_dp);
342 } else if (IS_GEN9_LP(dev_priv)) {
343 source_rates = bxt_rates;
344 size = ARRAY_SIZE(bxt_rates);
345 } else if (IS_GEN9_BC(dev_priv)) {
346 source_rates = skl_rates;
347 size = ARRAY_SIZE(skl_rates);
348 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
349 IS_BROADWELL(dev_priv)) {
350 source_rates = hsw_rates;
351 size = ARRAY_SIZE(hsw_rates);
353 source_rates = g4x_rates;
354 size = ARRAY_SIZE(g4x_rates);
357 if (max_rate && vbt_max_rate)
358 max_rate = min(max_rate, vbt_max_rate);
359 else if (vbt_max_rate)
360 max_rate = vbt_max_rate;
363 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
365 intel_dp->source_rates = source_rates;
366 intel_dp->num_source_rates = size;
369 static int intersect_rates(const int *source_rates, int source_len,
370 const int *sink_rates, int sink_len,
373 int i = 0, j = 0, k = 0;
375 while (i < source_len && j < sink_len) {
376 if (source_rates[i] == sink_rates[j]) {
377 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
379 common_rates[k] = source_rates[i];
383 } else if (source_rates[i] < sink_rates[j]) {
392 /* return index of rate in rates array, or -1 if not found */
393 static int intel_dp_rate_index(const int *rates, int len, int rate)
397 for (i = 0; i < len; i++)
398 if (rate == rates[i])
404 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
406 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
408 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
409 intel_dp->num_source_rates,
410 intel_dp->sink_rates,
411 intel_dp->num_sink_rates,
412 intel_dp->common_rates);
414 /* Paranoia, there should always be something in common. */
415 if (WARN_ON(intel_dp->num_common_rates == 0)) {
416 intel_dp->common_rates[0] = 162000;
417 intel_dp->num_common_rates = 1;
421 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
425 * FIXME: we need to synchronize the current link parameters with
426 * hardware readout. Currently fast link training doesn't work on
429 if (link_rate == 0 ||
430 link_rate > intel_dp->max_link_rate)
433 if (lane_count == 0 ||
434 lane_count > intel_dp_max_lane_count(intel_dp))
440 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
444 const struct drm_display_mode *fixed_mode =
445 intel_dp->attached_connector->panel.fixed_mode;
446 int mode_rate, max_rate;
448 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
449 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
450 if (mode_rate > max_rate)
456 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
457 int link_rate, u8 lane_count)
461 index = intel_dp_rate_index(intel_dp->common_rates,
462 intel_dp->num_common_rates,
465 if (intel_dp_is_edp(intel_dp) &&
466 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
467 intel_dp->common_rates[index - 1],
469 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
472 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
473 intel_dp->max_link_lane_count = lane_count;
474 } else if (lane_count > 1) {
475 if (intel_dp_is_edp(intel_dp) &&
476 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
477 intel_dp_max_common_rate(intel_dp),
479 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
482 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
483 intel_dp->max_link_lane_count = lane_count >> 1;
485 DRM_ERROR("Link Training Unsuccessful\n");
492 u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
494 return div_u64(mul_u32_u32(mode_clock, 1000000U),
495 DP_DSC_FEC_OVERHEAD_FACTOR);
499 small_joiner_ram_size_bits(struct drm_i915_private *i915)
501 if (INTEL_GEN(i915) >= 11)
507 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
508 u32 link_clock, u32 lane_count,
509 u32 mode_clock, u32 mode_hdisplay)
511 u32 bits_per_pixel, max_bpp_small_joiner_ram;
515 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
516 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
517 * for SST -> TimeSlotsPerMTP is 1,
518 * for MST -> TimeSlotsPerMTP has to be calculated
520 bits_per_pixel = (link_clock * lane_count * 8) /
521 intel_dp_mode_to_fec_clock(mode_clock);
522 DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
524 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
525 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
527 DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
530 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
531 * check, output bpp from small joiner RAM check)
533 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
535 /* Error out if the max bpp is less than smallest allowed valid bpp */
536 if (bits_per_pixel < valid_dsc_bpp[0]) {
537 DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
538 bits_per_pixel, valid_dsc_bpp[0]);
542 /* Find the nearest match in the array of known BPPs from VESA */
543 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
544 if (bits_per_pixel < valid_dsc_bpp[i + 1])
547 bits_per_pixel = valid_dsc_bpp[i];
550 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
551 * fractional part is 0
553 return bits_per_pixel << 4;
556 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
557 int mode_clock, int mode_hdisplay)
559 u8 min_slice_count, i;
562 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
563 min_slice_count = DIV_ROUND_UP(mode_clock,
564 DP_DSC_MAX_ENC_THROUGHPUT_0);
566 min_slice_count = DIV_ROUND_UP(mode_clock,
567 DP_DSC_MAX_ENC_THROUGHPUT_1);
569 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
570 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
571 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
575 /* Also take into account max slice width */
576 min_slice_count = min_t(u8, min_slice_count,
577 DIV_ROUND_UP(mode_hdisplay,
580 /* Find the closest match to the valid slice count values */
581 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
582 if (valid_dsc_slicecount[i] >
583 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
586 if (min_slice_count <= valid_dsc_slicecount[i])
587 return valid_dsc_slicecount[i];
590 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
594 static enum drm_mode_status
595 intel_dp_mode_valid(struct drm_connector *connector,
596 struct drm_display_mode *mode)
598 struct intel_dp *intel_dp = intel_attached_dp(connector);
599 struct intel_connector *intel_connector = to_intel_connector(connector);
600 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
601 struct drm_i915_private *dev_priv = to_i915(connector->dev);
602 int target_clock = mode->clock;
603 int max_rate, mode_rate, max_lanes, max_link_clock;
605 u16 dsc_max_output_bpp = 0;
606 u8 dsc_slice_count = 0;
608 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
609 return MODE_NO_DBLESCAN;
611 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
613 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
614 if (mode->hdisplay > fixed_mode->hdisplay)
617 if (mode->vdisplay > fixed_mode->vdisplay)
620 target_clock = fixed_mode->clock;
623 max_link_clock = intel_dp_max_link_rate(intel_dp);
624 max_lanes = intel_dp_max_lane_count(intel_dp);
626 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
627 mode_rate = intel_dp_link_required(target_clock, 18);
630 * Output bpp is stored in 6.4 format so right shift by 4 to get the
631 * integer value since we support only integer values of bpp.
633 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
634 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
635 if (intel_dp_is_edp(intel_dp)) {
637 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
639 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
641 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
643 intel_dp_dsc_get_output_bpp(dev_priv,
647 mode->hdisplay) >> 4;
649 intel_dp_dsc_get_slice_count(intel_dp,
655 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
656 target_clock > max_dotclk)
657 return MODE_CLOCK_HIGH;
659 if (mode->clock < 10000)
660 return MODE_CLOCK_LOW;
662 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
663 return MODE_H_ILLEGAL;
665 return intel_mode_valid_max_plane_size(dev_priv, mode);
668 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
675 for (i = 0; i < src_bytes; i++)
676 v |= ((u32)src[i]) << ((3 - i) * 8);
680 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
685 for (i = 0; i < dst_bytes; i++)
686 dst[i] = src >> ((3-i) * 8);
690 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
692 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
693 bool force_disable_vdd);
695 intel_dp_pps_init(struct intel_dp *intel_dp);
697 static intel_wakeref_t
698 pps_lock(struct intel_dp *intel_dp)
700 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
701 intel_wakeref_t wakeref;
704 * See intel_power_sequencer_reset() why we need
705 * a power domain reference here.
707 wakeref = intel_display_power_get(dev_priv,
708 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
710 mutex_lock(&dev_priv->pps_mutex);
715 static intel_wakeref_t
716 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
718 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
720 mutex_unlock(&dev_priv->pps_mutex);
721 intel_display_power_put(dev_priv,
722 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
727 #define with_pps_lock(dp, wf) \
728 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
731 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
733 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
734 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
735 enum pipe pipe = intel_dp->pps_pipe;
736 bool pll_enabled, release_cl_override = false;
737 enum dpio_phy phy = DPIO_PHY(pipe);
738 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
741 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
742 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
743 pipe_name(pipe), intel_dig_port->base.base.base.id,
744 intel_dig_port->base.base.name))
747 DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
748 pipe_name(pipe), intel_dig_port->base.base.base.id,
749 intel_dig_port->base.base.name);
751 /* Preserve the BIOS-computed detected bit. This is
752 * supposed to be read-only.
754 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
755 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
756 DP |= DP_PORT_WIDTH(1);
757 DP |= DP_LINK_TRAIN_PAT_1;
759 if (IS_CHERRYVIEW(dev_priv))
760 DP |= DP_PIPE_SEL_CHV(pipe);
762 DP |= DP_PIPE_SEL(pipe);
764 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
767 * The DPLL for the pipe must be enabled for this to work.
768 * So enable temporarily it if it's not already enabled.
771 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
772 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
774 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
775 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
776 DRM_ERROR("Failed to force on pll for pipe %c!\n",
783 * Similar magic as in intel_dp_enable_port().
784 * We _must_ do this port enable + disable trick
785 * to make this power sequencer lock onto the port.
786 * Otherwise even VDD force bit won't work.
788 I915_WRITE(intel_dp->output_reg, DP);
789 POSTING_READ(intel_dp->output_reg);
791 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
792 POSTING_READ(intel_dp->output_reg);
794 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
795 POSTING_READ(intel_dp->output_reg);
798 vlv_force_pll_off(dev_priv, pipe);
800 if (release_cl_override)
801 chv_phy_powergate_ch(dev_priv, phy, ch, false);
805 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
807 struct intel_encoder *encoder;
808 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
811 * We don't have power sequencer currently.
812 * Pick one that's not used by other ports.
814 for_each_intel_dp(&dev_priv->drm, encoder) {
815 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
817 if (encoder->type == INTEL_OUTPUT_EDP) {
818 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
819 intel_dp->active_pipe != intel_dp->pps_pipe);
821 if (intel_dp->pps_pipe != INVALID_PIPE)
822 pipes &= ~(1 << intel_dp->pps_pipe);
824 WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
826 if (intel_dp->active_pipe != INVALID_PIPE)
827 pipes &= ~(1 << intel_dp->active_pipe);
834 return ffs(pipes) - 1;
838 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
840 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
841 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
844 lockdep_assert_held(&dev_priv->pps_mutex);
846 /* We should never land here with regular DP ports */
847 WARN_ON(!intel_dp_is_edp(intel_dp));
849 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
850 intel_dp->active_pipe != intel_dp->pps_pipe);
852 if (intel_dp->pps_pipe != INVALID_PIPE)
853 return intel_dp->pps_pipe;
855 pipe = vlv_find_free_pps(dev_priv);
858 * Didn't find one. This should not happen since there
859 * are two power sequencers and up to two eDP ports.
861 if (WARN_ON(pipe == INVALID_PIPE))
864 vlv_steal_power_sequencer(dev_priv, pipe);
865 intel_dp->pps_pipe = pipe;
867 DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
868 pipe_name(intel_dp->pps_pipe),
869 intel_dig_port->base.base.base.id,
870 intel_dig_port->base.base.name);
872 /* init power sequencer on this pipe and port */
873 intel_dp_init_panel_power_sequencer(intel_dp);
874 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
877 * Even vdd force doesn't work until we've made
878 * the power sequencer lock in on the port.
880 vlv_power_sequencer_kick(intel_dp);
882 return intel_dp->pps_pipe;
886 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
888 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
889 int backlight_controller = dev_priv->vbt.backlight.controller;
891 lockdep_assert_held(&dev_priv->pps_mutex);
893 /* We should never land here with regular DP ports */
894 WARN_ON(!intel_dp_is_edp(intel_dp));
896 if (!intel_dp->pps_reset)
897 return backlight_controller;
899 intel_dp->pps_reset = false;
902 * Only the HW needs to be reprogrammed, the SW state is fixed and
903 * has been setup during connector init.
905 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
907 return backlight_controller;
910 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
913 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
916 return I915_READ(PP_STATUS(pipe)) & PP_ON;
919 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
922 return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
925 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
932 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
934 vlv_pipe_check pipe_check)
938 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
939 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
940 PANEL_PORT_SELECT_MASK;
942 if (port_sel != PANEL_PORT_SELECT_VLV(port))
945 if (!pipe_check(dev_priv, pipe))
955 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
957 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
958 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
959 enum port port = intel_dig_port->base.port;
961 lockdep_assert_held(&dev_priv->pps_mutex);
963 /* try to find a pipe with this port selected */
964 /* first pick one where the panel is on */
965 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
967 /* didn't find one? pick one where vdd is on */
968 if (intel_dp->pps_pipe == INVALID_PIPE)
969 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
970 vlv_pipe_has_vdd_on);
971 /* didn't find one? pick one with just the correct port */
972 if (intel_dp->pps_pipe == INVALID_PIPE)
973 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
976 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
977 if (intel_dp->pps_pipe == INVALID_PIPE) {
978 DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n",
979 intel_dig_port->base.base.base.id,
980 intel_dig_port->base.base.name);
984 DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
985 intel_dig_port->base.base.base.id,
986 intel_dig_port->base.base.name,
987 pipe_name(intel_dp->pps_pipe));
989 intel_dp_init_panel_power_sequencer(intel_dp);
990 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
993 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
995 struct intel_encoder *encoder;
997 if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
998 !IS_GEN9_LP(dev_priv)))
1002 * We can't grab pps_mutex here due to deadlock with power_domain
1003 * mutex when power_domain functions are called while holding pps_mutex.
1004 * That also means that in order to use pps_pipe the code needs to
1005 * hold both a power domain reference and pps_mutex, and the power domain
1006 * reference get/put must be done while _not_ holding pps_mutex.
1007 * pps_{lock,unlock}() do these steps in the correct order, so one
1008 * should use them always.
1011 for_each_intel_dp(&dev_priv->drm, encoder) {
1012 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1014 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
1016 if (encoder->type != INTEL_OUTPUT_EDP)
1019 if (IS_GEN9_LP(dev_priv))
1020 intel_dp->pps_reset = true;
1022 intel_dp->pps_pipe = INVALID_PIPE;
1026 struct pps_registers {
1034 static void intel_pps_get_registers(struct intel_dp *intel_dp,
1035 struct pps_registers *regs)
1037 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1040 memset(regs, 0, sizeof(*regs));
1042 if (IS_GEN9_LP(dev_priv))
1043 pps_idx = bxt_power_sequencer_idx(intel_dp);
1044 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1045 pps_idx = vlv_power_sequencer_pipe(intel_dp);
1047 regs->pp_ctrl = PP_CONTROL(pps_idx);
1048 regs->pp_stat = PP_STATUS(pps_idx);
1049 regs->pp_on = PP_ON_DELAYS(pps_idx);
1050 regs->pp_off = PP_OFF_DELAYS(pps_idx);
1052 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
1053 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
1054 regs->pp_div = INVALID_MMIO_REG;
1056 regs->pp_div = PP_DIVISOR(pps_idx);
1060 _pp_ctrl_reg(struct intel_dp *intel_dp)
1062 struct pps_registers regs;
1064 intel_pps_get_registers(intel_dp, ®s);
1066 return regs.pp_ctrl;
1070 _pp_stat_reg(struct intel_dp *intel_dp)
1072 struct pps_registers regs;
1074 intel_pps_get_registers(intel_dp, ®s);
1076 return regs.pp_stat;
1079 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
1080 This function only applicable when panel PM state is not to be tracked */
1081 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
1084 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
1086 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1087 intel_wakeref_t wakeref;
1089 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
1092 with_pps_lock(intel_dp, wakeref) {
1093 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1094 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
1095 i915_reg_t pp_ctrl_reg, pp_div_reg;
1098 pp_ctrl_reg = PP_CONTROL(pipe);
1099 pp_div_reg = PP_DIVISOR(pipe);
1100 pp_div = I915_READ(pp_div_reg);
1101 pp_div &= PP_REFERENCE_DIVIDER_MASK;
1103 /* 0x1F write to PP_DIV_REG sets max cycle delay */
1104 I915_WRITE(pp_div_reg, pp_div | 0x1F);
1105 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
1106 msleep(intel_dp->panel_power_cycle_delay);
1113 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1115 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1117 lockdep_assert_held(&dev_priv->pps_mutex);
1119 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1120 intel_dp->pps_pipe == INVALID_PIPE)
1123 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
1126 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1128 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1130 lockdep_assert_held(&dev_priv->pps_mutex);
1132 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1133 intel_dp->pps_pipe == INVALID_PIPE)
1136 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1140 intel_dp_check_edp(struct intel_dp *intel_dp)
1142 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1144 if (!intel_dp_is_edp(intel_dp))
1147 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1148 WARN(1, "eDP powered off while attempting aux channel communication.\n");
1149 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1150 I915_READ(_pp_stat_reg(intel_dp)),
1151 I915_READ(_pp_ctrl_reg(intel_dp)));
1156 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1158 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1159 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1163 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1164 done = wait_event_timeout(i915->gmbus_wait_queue, C,
1165 msecs_to_jiffies_timeout(10));
1167 /* just trace the final value */
1168 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1171 DRM_ERROR("dp aux hw did not signal timeout!\n");
1177 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1179 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1185 * The clock divider is based off the hrawclk, and would like to run at
1186 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
1188 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1191 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1193 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1194 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1200 * The clock divider is based off the cdclk or PCH rawclk, and would
1201 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1202 * divide by 2000 and use that
1204 if (dig_port->aux_ch == AUX_CH_A)
1205 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1207 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1210 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1212 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1213 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1215 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1216 /* Workaround for non-ULT HSW */
1224 return ilk_get_aux_clock_divider(intel_dp, index);
1227 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1230 * SKL doesn't need us to program the AUX clock divider (Hardware will
1231 * derive the clock from CDCLK automatically). We still implement the
1232 * get_aux_clock_divider vfunc to plug-in into the existing code.
1234 return index ? 0 : 1;
1237 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1239 u32 aux_clock_divider)
1241 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1242 struct drm_i915_private *dev_priv =
1243 to_i915(intel_dig_port->base.base.dev);
1244 u32 precharge, timeout;
1246 if (IS_GEN(dev_priv, 6))
1251 if (IS_BROADWELL(dev_priv))
1252 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1254 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1256 return DP_AUX_CH_CTL_SEND_BUSY |
1257 DP_AUX_CH_CTL_DONE |
1258 DP_AUX_CH_CTL_INTERRUPT |
1259 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1261 DP_AUX_CH_CTL_RECEIVE_ERROR |
1262 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1263 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1264 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1267 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1271 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1274 ret = DP_AUX_CH_CTL_SEND_BUSY |
1275 DP_AUX_CH_CTL_DONE |
1276 DP_AUX_CH_CTL_INTERRUPT |
1277 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1278 DP_AUX_CH_CTL_TIME_OUT_MAX |
1279 DP_AUX_CH_CTL_RECEIVE_ERROR |
1280 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1281 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1282 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1284 if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
1285 ret |= DP_AUX_CH_CTL_TBT_IO;
1291 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1292 const u8 *send, int send_bytes,
1293 u8 *recv, int recv_size,
1294 u32 aux_send_ctl_flags)
1296 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1297 struct drm_i915_private *i915 =
1298 to_i915(intel_dig_port->base.base.dev);
1299 struct intel_uncore *uncore = &i915->uncore;
1300 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
1301 bool is_tc_port = intel_phy_is_tc(i915, phy);
1302 i915_reg_t ch_ctl, ch_data[5];
1303 u32 aux_clock_divider;
1304 enum intel_display_power_domain aux_domain =
1305 intel_aux_power_domain(intel_dig_port);
1306 intel_wakeref_t aux_wakeref;
1307 intel_wakeref_t pps_wakeref;
1308 int i, ret, recv_bytes;
1313 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1314 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1315 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1318 intel_tc_port_lock(intel_dig_port);
1320 aux_wakeref = intel_display_power_get(i915, aux_domain);
1321 pps_wakeref = pps_lock(intel_dp);
1324 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1325 * In such cases we want to leave VDD enabled and it's up to upper layers
1326 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1329 vdd = edp_panel_vdd_on(intel_dp);
1331 /* dp aux is extremely sensitive to irq latency, hence request the
1332 * lowest possible wakeup latency and so prevent the cpu from going into
1333 * deep sleep states.
1335 pm_qos_update_request(&i915->pm_qos, 0);
1337 intel_dp_check_edp(intel_dp);
1339 /* Try to wait for any previous AUX channel activity */
1340 for (try = 0; try < 3; try++) {
1341 status = intel_uncore_read_notrace(uncore, ch_ctl);
1342 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1346 /* just trace the final value */
1347 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1350 const u32 status = intel_uncore_read(uncore, ch_ctl);
1352 if (status != intel_dp->aux_busy_last_status) {
1353 WARN(1, "dp_aux_ch not started status 0x%08x\n",
1355 intel_dp->aux_busy_last_status = status;
1362 /* Only 5 data registers! */
1363 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1368 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1369 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1373 send_ctl |= aux_send_ctl_flags;
1375 /* Must try at least 3 times according to DP spec */
1376 for (try = 0; try < 5; try++) {
1377 /* Load the send data into the aux channel data registers */
1378 for (i = 0; i < send_bytes; i += 4)
1379 intel_uncore_write(uncore,
1381 intel_dp_pack_aux(send + i,
1384 /* Send the command and wait for it to complete */
1385 intel_uncore_write(uncore, ch_ctl, send_ctl);
1387 status = intel_dp_aux_wait_done(intel_dp);
1389 /* Clear done status and any errors */
1390 intel_uncore_write(uncore,
1393 DP_AUX_CH_CTL_DONE |
1394 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1395 DP_AUX_CH_CTL_RECEIVE_ERROR);
1397 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1398 * 400us delay required for errors and timeouts
1399 * Timeout errors from the HW already meet this
1400 * requirement so skip to next iteration
1402 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1405 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1406 usleep_range(400, 500);
1409 if (status & DP_AUX_CH_CTL_DONE)
1414 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1415 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1421 /* Check for timeout or receive error.
1422 * Timeouts occur when the sink is not connected
1424 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1425 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1430 /* Timeouts occur when the device isn't connected, so they're
1431 * "normal" -- don't fill the kernel log with these */
1432 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1433 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1438 /* Unload any bytes sent back from the other side */
1439 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1440 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1443 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1444 * We have no idea of what happened so we return -EBUSY so
1445 * drm layer takes care for the necessary retries.
1447 if (recv_bytes == 0 || recv_bytes > 20) {
1448 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1454 if (recv_bytes > recv_size)
1455 recv_bytes = recv_size;
1457 for (i = 0; i < recv_bytes; i += 4)
1458 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1459 recv + i, recv_bytes - i);
1463 pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
1466 edp_panel_vdd_off(intel_dp, false);
1468 pps_unlock(intel_dp, pps_wakeref);
1469 intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1472 intel_tc_port_unlock(intel_dig_port);
1477 #define BARE_ADDRESS_SIZE 3
1478 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
1481 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1482 const struct drm_dp_aux_msg *msg)
1484 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1485 txbuf[1] = (msg->address >> 8) & 0xff;
1486 txbuf[2] = msg->address & 0xff;
1487 txbuf[3] = msg->size - 1;
1491 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1493 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1494 u8 txbuf[20], rxbuf[20];
1495 size_t txsize, rxsize;
1498 intel_dp_aux_header(txbuf, msg);
1500 switch (msg->request & ~DP_AUX_I2C_MOT) {
1501 case DP_AUX_NATIVE_WRITE:
1502 case DP_AUX_I2C_WRITE:
1503 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1504 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1505 rxsize = 2; /* 0 or 1 data bytes */
1507 if (WARN_ON(txsize > 20))
1510 WARN_ON(!msg->buffer != !msg->size);
1513 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1515 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1518 msg->reply = rxbuf[0] >> 4;
1521 /* Number of bytes written in a short write. */
1522 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1524 /* Return payload size. */
1530 case DP_AUX_NATIVE_READ:
1531 case DP_AUX_I2C_READ:
1532 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1533 rxsize = msg->size + 1;
1535 if (WARN_ON(rxsize > 20))
1538 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1541 msg->reply = rxbuf[0] >> 4;
1543 * Assume happy day, and copy the data. The caller is
1544 * expected to check msg->reply before touching it.
1546 * Return payload size.
1549 memcpy(msg->buffer, rxbuf + 1, ret);
1562 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1564 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1565 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1566 enum aux_ch aux_ch = dig_port->aux_ch;
1572 return DP_AUX_CH_CTL(aux_ch);
1574 MISSING_CASE(aux_ch);
1575 return DP_AUX_CH_CTL(AUX_CH_B);
1579 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1581 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1582 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1583 enum aux_ch aux_ch = dig_port->aux_ch;
1589 return DP_AUX_CH_DATA(aux_ch, index);
1591 MISSING_CASE(aux_ch);
1592 return DP_AUX_CH_DATA(AUX_CH_B, index);
1596 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1598 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1599 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1600 enum aux_ch aux_ch = dig_port->aux_ch;
1604 return DP_AUX_CH_CTL(aux_ch);
1608 return PCH_DP_AUX_CH_CTL(aux_ch);
1610 MISSING_CASE(aux_ch);
1611 return DP_AUX_CH_CTL(AUX_CH_A);
1615 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1617 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1618 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1619 enum aux_ch aux_ch = dig_port->aux_ch;
1623 return DP_AUX_CH_DATA(aux_ch, index);
1627 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1629 MISSING_CASE(aux_ch);
1630 return DP_AUX_CH_DATA(AUX_CH_A, index);
1634 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1636 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1637 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1638 enum aux_ch aux_ch = dig_port->aux_ch;
1647 return DP_AUX_CH_CTL(aux_ch);
1649 MISSING_CASE(aux_ch);
1650 return DP_AUX_CH_CTL(AUX_CH_A);
1654 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1656 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1657 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1658 enum aux_ch aux_ch = dig_port->aux_ch;
1667 return DP_AUX_CH_DATA(aux_ch, index);
1669 MISSING_CASE(aux_ch);
1670 return DP_AUX_CH_DATA(AUX_CH_A, index);
1675 intel_dp_aux_fini(struct intel_dp *intel_dp)
1677 kfree(intel_dp->aux.name);
1681 intel_dp_aux_init(struct intel_dp *intel_dp)
1683 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1684 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1685 struct intel_encoder *encoder = &dig_port->base;
1687 if (INTEL_GEN(dev_priv) >= 9) {
1688 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1689 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1690 } else if (HAS_PCH_SPLIT(dev_priv)) {
1691 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1692 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1694 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1695 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1698 if (INTEL_GEN(dev_priv) >= 9)
1699 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1700 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1701 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1702 else if (HAS_PCH_SPLIT(dev_priv))
1703 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1705 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1707 if (INTEL_GEN(dev_priv) >= 9)
1708 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1710 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1712 drm_dp_aux_init(&intel_dp->aux);
1714 /* Failure to allocate our preferred name is not critical */
1715 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1716 port_name(encoder->port));
1717 intel_dp->aux.transfer = intel_dp_aux_transfer;
1720 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1722 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1724 return max_rate >= 540000;
1727 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1729 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1731 return max_rate >= 810000;
1735 intel_dp_set_clock(struct intel_encoder *encoder,
1736 struct intel_crtc_state *pipe_config)
1738 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1739 const struct dp_link_dpll *divisor = NULL;
1742 if (IS_G4X(dev_priv)) {
1744 count = ARRAY_SIZE(g4x_dpll);
1745 } else if (HAS_PCH_SPLIT(dev_priv)) {
1747 count = ARRAY_SIZE(pch_dpll);
1748 } else if (IS_CHERRYVIEW(dev_priv)) {
1750 count = ARRAY_SIZE(chv_dpll);
1751 } else if (IS_VALLEYVIEW(dev_priv)) {
1753 count = ARRAY_SIZE(vlv_dpll);
1756 if (divisor && count) {
1757 for (i = 0; i < count; i++) {
1758 if (pipe_config->port_clock == divisor[i].clock) {
1759 pipe_config->dpll = divisor[i].dpll;
1760 pipe_config->clock_set = true;
1767 static void snprintf_int_array(char *str, size_t len,
1768 const int *array, int nelem)
1774 for (i = 0; i < nelem; i++) {
1775 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1783 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1785 char str[128]; /* FIXME: too big for stack? */
1787 if ((drm_debug & DRM_UT_KMS) == 0)
1790 snprintf_int_array(str, sizeof(str),
1791 intel_dp->source_rates, intel_dp->num_source_rates);
1792 DRM_DEBUG_KMS("source rates: %s\n", str);
1794 snprintf_int_array(str, sizeof(str),
1795 intel_dp->sink_rates, intel_dp->num_sink_rates);
1796 DRM_DEBUG_KMS("sink rates: %s\n", str);
1798 snprintf_int_array(str, sizeof(str),
1799 intel_dp->common_rates, intel_dp->num_common_rates);
1800 DRM_DEBUG_KMS("common rates: %s\n", str);
1804 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1808 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1809 if (WARN_ON(len <= 0))
1812 return intel_dp->common_rates[len - 1];
1815 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1817 int i = intel_dp_rate_index(intel_dp->sink_rates,
1818 intel_dp->num_sink_rates, rate);
1826 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1827 u8 *link_bw, u8 *rate_select)
1829 /* eDP 1.4 rate select method. */
1830 if (intel_dp->use_rate_select) {
1833 intel_dp_rate_select(intel_dp, port_clock);
1835 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1840 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1841 const struct intel_crtc_state *pipe_config)
1843 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1845 /* On TGL, FEC is supported on all Pipes */
1846 if (INTEL_GEN(dev_priv) >= 12)
1849 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
1855 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1856 const struct intel_crtc_state *pipe_config)
1858 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1859 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1862 static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1863 const struct intel_crtc_state *pipe_config)
1865 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1867 /* On TGL, DSC is supported on all Pipes */
1868 if (INTEL_GEN(dev_priv) >= 12)
1871 if (INTEL_GEN(dev_priv) >= 10 &&
1872 pipe_config->cpu_transcoder != TRANSCODER_A)
1878 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1879 const struct intel_crtc_state *pipe_config)
1881 if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
1884 return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1885 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1888 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1889 struct intel_crtc_state *pipe_config)
1891 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1892 struct intel_connector *intel_connector = intel_dp->attached_connector;
1895 bpp = pipe_config->pipe_bpp;
1896 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1899 bpp = min(bpp, 3*bpc);
1901 if (intel_dp_is_edp(intel_dp)) {
1902 /* Get bpp from vbt only for panels that dont have bpp in edid */
1903 if (intel_connector->base.display_info.bpc == 0 &&
1904 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1905 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1906 dev_priv->vbt.edp.bpp);
1907 bpp = dev_priv->vbt.edp.bpp;
1914 /* Adjust link config limits based on compliance test requests. */
1916 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1917 struct intel_crtc_state *pipe_config,
1918 struct link_config_limits *limits)
1920 /* For DP Compliance we override the computed bpp for the pipe */
1921 if (intel_dp->compliance.test_data.bpc != 0) {
1922 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1924 limits->min_bpp = limits->max_bpp = bpp;
1925 pipe_config->dither_force_disable = bpp == 6 * 3;
1927 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1930 /* Use values requested by Compliance Test Request */
1931 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1934 /* Validate the compliance test data since max values
1935 * might have changed due to link train fallback.
1937 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1938 intel_dp->compliance.test_lane_count)) {
1939 index = intel_dp_rate_index(intel_dp->common_rates,
1940 intel_dp->num_common_rates,
1941 intel_dp->compliance.test_link_rate);
1943 limits->min_clock = limits->max_clock = index;
1944 limits->min_lane_count = limits->max_lane_count =
1945 intel_dp->compliance.test_lane_count;
1950 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
1953 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
1954 * format of the number of bytes per pixel will be half the number
1955 * of bytes of RGB pixel.
1957 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1963 /* Optimize link config in order: max bpp, min clock, min lanes */
1965 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1966 struct intel_crtc_state *pipe_config,
1967 const struct link_config_limits *limits)
1969 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1970 int bpp, clock, lane_count;
1971 int mode_rate, link_clock, link_avail;
1973 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1974 int output_bpp = intel_dp_output_bpp(pipe_config, bpp);
1976 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1979 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1980 for (lane_count = limits->min_lane_count;
1981 lane_count <= limits->max_lane_count;
1983 link_clock = intel_dp->common_rates[clock];
1984 link_avail = intel_dp_max_data_rate(link_clock,
1987 if (mode_rate <= link_avail) {
1988 pipe_config->lane_count = lane_count;
1989 pipe_config->pipe_bpp = bpp;
1990 pipe_config->port_clock = link_clock;
2001 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
2004 u8 dsc_bpc[3] = {0};
2006 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
2008 for (i = 0; i < num_bpc; i++) {
2009 if (dsc_max_bpc >= dsc_bpc[i])
2010 return dsc_bpc[i] * 3;
2016 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
2017 struct intel_crtc_state *pipe_config,
2018 struct drm_connector_state *conn_state,
2019 struct link_config_limits *limits)
2021 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2022 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2023 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2028 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2029 intel_dp_supports_fec(intel_dp, pipe_config);
2031 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
2034 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
2035 if (INTEL_GEN(dev_priv) >= 12)
2036 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
2038 dsc_max_bpc = min_t(u8, 10,
2039 conn_state->max_requested_bpc);
2041 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
2043 /* Min Input BPC for ICL+ is 8 */
2044 if (pipe_bpp < 8 * 3) {
2045 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
2050 * For now enable DSC for max bpp, max link rate, max lane count.
2051 * Optimize this later for the minimum possible link rate/lane count
2052 * with DSC enabled for the requested mode.
2054 pipe_config->pipe_bpp = pipe_bpp;
2055 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
2056 pipe_config->lane_count = limits->max_lane_count;
2058 if (intel_dp_is_edp(intel_dp)) {
2059 pipe_config->dsc_params.compressed_bpp =
2060 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
2061 pipe_config->pipe_bpp);
2062 pipe_config->dsc_params.slice_count =
2063 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
2066 u16 dsc_max_output_bpp;
2067 u8 dsc_dp_slice_count;
2069 dsc_max_output_bpp =
2070 intel_dp_dsc_get_output_bpp(dev_priv,
2071 pipe_config->port_clock,
2072 pipe_config->lane_count,
2073 adjusted_mode->crtc_clock,
2074 adjusted_mode->crtc_hdisplay);
2075 dsc_dp_slice_count =
2076 intel_dp_dsc_get_slice_count(intel_dp,
2077 adjusted_mode->crtc_clock,
2078 adjusted_mode->crtc_hdisplay);
2079 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
2080 DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
2083 pipe_config->dsc_params.compressed_bpp = min_t(u16,
2084 dsc_max_output_bpp >> 4,
2085 pipe_config->pipe_bpp);
2086 pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
2089 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2090 * is greater than the maximum Cdclock and if slice count is even
2091 * then we need to use 2 VDSC instances.
2093 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
2094 if (pipe_config->dsc_params.slice_count > 1) {
2095 pipe_config->dsc_params.dsc_split = true;
2097 DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
2102 ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
2104 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
2105 "Compressed BPP = %d\n",
2106 pipe_config->pipe_bpp,
2107 pipe_config->dsc_params.compressed_bpp);
2111 pipe_config->dsc_params.compression_enable = true;
2112 DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
2113 "Compressed Bpp = %d Slice Count = %d\n",
2114 pipe_config->pipe_bpp,
2115 pipe_config->dsc_params.compressed_bpp,
2116 pipe_config->dsc_params.slice_count);
2121 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
2123 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
2130 intel_dp_compute_link_config(struct intel_encoder *encoder,
2131 struct intel_crtc_state *pipe_config,
2132 struct drm_connector_state *conn_state)
2134 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2135 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2136 struct link_config_limits limits;
2140 common_len = intel_dp_common_len_rate_limit(intel_dp,
2141 intel_dp->max_link_rate);
2143 /* No common link rates between source and sink */
2144 WARN_ON(common_len <= 0);
2146 limits.min_clock = 0;
2147 limits.max_clock = common_len - 1;
2149 limits.min_lane_count = 1;
2150 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2152 limits.min_bpp = intel_dp_min_bpp(pipe_config);
2153 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2155 if (intel_dp_is_edp(intel_dp)) {
2157 * Use the maximum clock and number of lanes the eDP panel
2158 * advertizes being capable of. The panels are generally
2159 * designed to support only a single clock and lane
2160 * configuration, and typically these values correspond to the
2161 * native resolution of the panel.
2163 limits.min_lane_count = limits.max_lane_count;
2164 limits.min_clock = limits.max_clock;
2167 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2169 DRM_DEBUG_KMS("DP link computation with max lane count %i "
2170 "max rate %d max bpp %d pixel clock %iKHz\n",
2171 limits.max_lane_count,
2172 intel_dp->common_rates[limits.max_clock],
2173 limits.max_bpp, adjusted_mode->crtc_clock);
2176 * Optimize for slow and wide. This is the place to add alternative
2177 * optimization policy.
2179 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2181 /* enable compression if the mode doesn't fit available BW */
2182 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2183 if (ret || intel_dp->force_dsc_en) {
2184 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2185 conn_state, &limits);
2190 if (pipe_config->dsc_params.compression_enable) {
2191 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2192 pipe_config->lane_count, pipe_config->port_clock,
2193 pipe_config->pipe_bpp,
2194 pipe_config->dsc_params.compressed_bpp);
2196 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2197 intel_dp_link_required(adjusted_mode->crtc_clock,
2198 pipe_config->dsc_params.compressed_bpp),
2199 intel_dp_max_data_rate(pipe_config->port_clock,
2200 pipe_config->lane_count));
2202 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2203 pipe_config->lane_count, pipe_config->port_clock,
2204 pipe_config->pipe_bpp);
2206 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2207 intel_dp_link_required(adjusted_mode->crtc_clock,
2208 pipe_config->pipe_bpp),
2209 intel_dp_max_data_rate(pipe_config->port_clock,
2210 pipe_config->lane_count));
2216 intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2217 struct drm_connector *connector,
2218 struct intel_crtc_state *crtc_state)
2220 const struct drm_display_info *info = &connector->display_info;
2221 const struct drm_display_mode *adjusted_mode =
2222 &crtc_state->base.adjusted_mode;
2223 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2226 if (!drm_mode_is_420_only(info, adjusted_mode) ||
2227 !intel_dp_get_colorimetry_status(intel_dp) ||
2228 !connector->ycbcr_420_allowed)
2231 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2233 /* YCBCR 420 output conversion needs a scaler */
2234 ret = skl_update_scaler_crtc(crtc_state);
2236 DRM_DEBUG_KMS("Scaler allocation for output failed\n");
2240 intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
2245 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2246 const struct drm_connector_state *conn_state)
2248 const struct intel_digital_connector_state *intel_conn_state =
2249 to_intel_digital_connector_state(conn_state);
2250 const struct drm_display_mode *adjusted_mode =
2251 &crtc_state->base.adjusted_mode;
2254 * Our YCbCr output is always limited range.
2255 * crtc_state->limited_color_range only applies to RGB,
2256 * and it must never be set for YCbCr or we risk setting
2257 * some conflicting bits in PIPECONF which will mess up
2258 * the colors on the monitor.
2260 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
2263 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2266 * CEA-861-E - 5.1 Default Encoding Parameters
2267 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2269 return crtc_state->pipe_bpp != 18 &&
2270 drm_default_rgb_quant_range(adjusted_mode) ==
2271 HDMI_QUANTIZATION_RANGE_LIMITED;
2273 return intel_conn_state->broadcast_rgb ==
2274 INTEL_BROADCAST_RGB_LIMITED;
2279 intel_dp_compute_config(struct intel_encoder *encoder,
2280 struct intel_crtc_state *pipe_config,
2281 struct drm_connector_state *conn_state)
2283 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2284 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2285 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2286 struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2287 enum port port = encoder->port;
2288 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2289 struct intel_connector *intel_connector = intel_dp->attached_connector;
2290 struct intel_digital_connector_state *intel_conn_state =
2291 to_intel_digital_connector_state(conn_state);
2292 bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2293 DP_DPCD_QUIRK_CONSTANT_N);
2294 int ret = 0, output_bpp;
2296 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2297 pipe_config->has_pch_encoder = true;
2299 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2301 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2303 ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
2309 pipe_config->has_drrs = false;
2310 if (IS_G4X(dev_priv) || port == PORT_A)
2311 pipe_config->has_audio = false;
2312 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2313 pipe_config->has_audio = intel_dp->has_audio;
2315 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2317 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2318 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2321 if (INTEL_GEN(dev_priv) >= 9) {
2322 ret = skl_update_scaler_crtc(pipe_config);
2327 if (HAS_GMCH(dev_priv))
2328 intel_gmch_panel_fitting(intel_crtc, pipe_config,
2329 conn_state->scaling_mode);
2331 intel_pch_panel_fitting(intel_crtc, pipe_config,
2332 conn_state->scaling_mode);
2335 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2338 if (HAS_GMCH(dev_priv) &&
2339 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2342 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2345 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2349 pipe_config->limited_color_range =
2350 intel_dp_limited_color_range(pipe_config, conn_state);
2352 if (pipe_config->dsc_params.compression_enable)
2353 output_bpp = pipe_config->dsc_params.compressed_bpp;
2355 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
2357 intel_link_compute_m_n(output_bpp,
2358 pipe_config->lane_count,
2359 adjusted_mode->crtc_clock,
2360 pipe_config->port_clock,
2361 &pipe_config->dp_m_n,
2362 constant_n, pipe_config->fec_enable);
2364 if (intel_connector->panel.downclock_mode != NULL &&
2365 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2366 pipe_config->has_drrs = true;
2367 intel_link_compute_m_n(output_bpp,
2368 pipe_config->lane_count,
2369 intel_connector->panel.downclock_mode->clock,
2370 pipe_config->port_clock,
2371 &pipe_config->dp_m2_n2,
2372 constant_n, pipe_config->fec_enable);
2375 if (!HAS_DDI(dev_priv))
2376 intel_dp_set_clock(encoder, pipe_config);
2378 intel_psr_compute_config(intel_dp, pipe_config);
2380 intel_hdcp_transcoder_config(intel_connector,
2381 pipe_config->cpu_transcoder);
2386 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2387 int link_rate, u8 lane_count,
2390 intel_dp->link_trained = false;
2391 intel_dp->link_rate = link_rate;
2392 intel_dp->lane_count = lane_count;
2393 intel_dp->link_mst = link_mst;
2396 static void intel_dp_prepare(struct intel_encoder *encoder,
2397 const struct intel_crtc_state *pipe_config)
2399 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2400 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2401 enum port port = encoder->port;
2402 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2403 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2405 intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2406 pipe_config->lane_count,
2407 intel_crtc_has_type(pipe_config,
2408 INTEL_OUTPUT_DP_MST));
2410 intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
2411 intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
2414 * There are four kinds of DP registers:
2421 * IBX PCH and CPU are the same for almost everything,
2422 * except that the CPU DP PLL is configured in this
2425 * CPT PCH is quite different, having many bits moved
2426 * to the TRANS_DP_CTL register instead. That
2427 * configuration happens (oddly) in ironlake_pch_enable
2430 /* Preserve the BIOS-computed detected bit. This is
2431 * supposed to be read-only.
2433 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2435 /* Handle DP bits in common between all three register formats */
2436 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2437 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2439 /* Split out the IBX/CPU vs CPT settings */
2441 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2442 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2443 intel_dp->DP |= DP_SYNC_HS_HIGH;
2444 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2445 intel_dp->DP |= DP_SYNC_VS_HIGH;
2446 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2448 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2449 intel_dp->DP |= DP_ENHANCED_FRAMING;
2451 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2452 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2455 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2457 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2458 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2459 trans_dp |= TRANS_DP_ENH_FRAMING;
2461 trans_dp &= ~TRANS_DP_ENH_FRAMING;
2462 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
2464 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2465 intel_dp->DP |= DP_COLOR_RANGE_16_235;
2467 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2468 intel_dp->DP |= DP_SYNC_HS_HIGH;
2469 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2470 intel_dp->DP |= DP_SYNC_VS_HIGH;
2471 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2473 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2474 intel_dp->DP |= DP_ENHANCED_FRAMING;
2476 if (IS_CHERRYVIEW(dev_priv))
2477 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2479 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2483 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2484 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
2486 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
2487 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
2489 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2490 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
2492 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2494 static void wait_panel_status(struct intel_dp *intel_dp,
2498 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2499 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2501 lockdep_assert_held(&dev_priv->pps_mutex);
2503 intel_pps_verify_state(intel_dp);
2505 pp_stat_reg = _pp_stat_reg(intel_dp);
2506 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2508 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2510 I915_READ(pp_stat_reg),
2511 I915_READ(pp_ctrl_reg));
2513 if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
2515 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2516 I915_READ(pp_stat_reg),
2517 I915_READ(pp_ctrl_reg));
2519 DRM_DEBUG_KMS("Wait complete\n");
2522 static void wait_panel_on(struct intel_dp *intel_dp)
2524 DRM_DEBUG_KMS("Wait for panel power on\n");
2525 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2528 static void wait_panel_off(struct intel_dp *intel_dp)
2530 DRM_DEBUG_KMS("Wait for panel power off time\n");
2531 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2534 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2536 ktime_t panel_power_on_time;
2537 s64 panel_power_off_duration;
2539 DRM_DEBUG_KMS("Wait for panel power cycle\n");
2541 /* take the difference of currrent time and panel power off time
2542 * and then make panel wait for t11_t12 if needed. */
2543 panel_power_on_time = ktime_get_boottime();
2544 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2546 /* When we disable the VDD override bit last we have to do the manual
2548 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2549 wait_remaining_ms_from_jiffies(jiffies,
2550 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2552 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2555 static void wait_backlight_on(struct intel_dp *intel_dp)
2557 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2558 intel_dp->backlight_on_delay);
2561 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2563 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2564 intel_dp->backlight_off_delay);
2567 /* Read the current pp_control value, unlocking the register if it
2571 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2573 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2576 lockdep_assert_held(&dev_priv->pps_mutex);
2578 control = I915_READ(_pp_ctrl_reg(intel_dp));
2579 if (WARN_ON(!HAS_DDI(dev_priv) &&
2580 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2581 control &= ~PANEL_UNLOCK_MASK;
2582 control |= PANEL_UNLOCK_REGS;
2588 * Must be paired with edp_panel_vdd_off().
2589 * Must hold pps_mutex around the whole on/off sequence.
2590 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2592 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2594 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2595 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2597 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2598 bool need_to_disable = !intel_dp->want_panel_vdd;
2600 lockdep_assert_held(&dev_priv->pps_mutex);
2602 if (!intel_dp_is_edp(intel_dp))
2605 cancel_delayed_work(&intel_dp->panel_vdd_work);
2606 intel_dp->want_panel_vdd = true;
2608 if (edp_have_panel_vdd(intel_dp))
2609 return need_to_disable;
2611 intel_display_power_get(dev_priv,
2612 intel_aux_power_domain(intel_dig_port));
2614 DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n",
2615 intel_dig_port->base.base.base.id,
2616 intel_dig_port->base.base.name);
2618 if (!edp_have_panel_power(intel_dp))
2619 wait_panel_power_cycle(intel_dp);
2621 pp = ironlake_get_pp_control(intel_dp);
2622 pp |= EDP_FORCE_VDD;
2624 pp_stat_reg = _pp_stat_reg(intel_dp);
2625 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2627 I915_WRITE(pp_ctrl_reg, pp);
2628 POSTING_READ(pp_ctrl_reg);
2629 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2630 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2632 * If the panel wasn't on, delay before accessing aux channel
2634 if (!edp_have_panel_power(intel_dp)) {
2635 DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n",
2636 intel_dig_port->base.base.base.id,
2637 intel_dig_port->base.base.name);
2638 msleep(intel_dp->panel_power_up_delay);
2641 return need_to_disable;
2645 * Must be paired with intel_edp_panel_vdd_off() or
2646 * intel_edp_panel_off().
2647 * Nested calls to these functions are not allowed since
2648 * we drop the lock. Caller must use some higher level
2649 * locking to prevent nested calls from other threads.
2651 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2653 intel_wakeref_t wakeref;
2656 if (!intel_dp_is_edp(intel_dp))
2660 with_pps_lock(intel_dp, wakeref)
2661 vdd = edp_panel_vdd_on(intel_dp);
2662 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
2663 dp_to_dig_port(intel_dp)->base.base.base.id,
2664 dp_to_dig_port(intel_dp)->base.base.name);
2667 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2669 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2670 struct intel_digital_port *intel_dig_port =
2671 dp_to_dig_port(intel_dp);
2673 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2675 lockdep_assert_held(&dev_priv->pps_mutex);
2677 WARN_ON(intel_dp->want_panel_vdd);
2679 if (!edp_have_panel_vdd(intel_dp))
2682 DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n",
2683 intel_dig_port->base.base.base.id,
2684 intel_dig_port->base.base.name);
2686 pp = ironlake_get_pp_control(intel_dp);
2687 pp &= ~EDP_FORCE_VDD;
2689 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2690 pp_stat_reg = _pp_stat_reg(intel_dp);
2692 I915_WRITE(pp_ctrl_reg, pp);
2693 POSTING_READ(pp_ctrl_reg);
2695 /* Make sure sequencer is idle before allowing subsequent activity */
2696 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2697 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2699 if ((pp & PANEL_POWER_ON) == 0)
2700 intel_dp->panel_power_off_time = ktime_get_boottime();
2702 intel_display_power_put_unchecked(dev_priv,
2703 intel_aux_power_domain(intel_dig_port));
2706 static void edp_panel_vdd_work(struct work_struct *__work)
2708 struct intel_dp *intel_dp =
2709 container_of(to_delayed_work(__work),
2710 struct intel_dp, panel_vdd_work);
2711 intel_wakeref_t wakeref;
2713 with_pps_lock(intel_dp, wakeref) {
2714 if (!intel_dp->want_panel_vdd)
2715 edp_panel_vdd_off_sync(intel_dp);
2719 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2721 unsigned long delay;
2724 * Queue the timer to fire a long time from now (relative to the power
2725 * down delay) to keep the panel power up across a sequence of
2728 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2729 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2733 * Must be paired with edp_panel_vdd_on().
2734 * Must hold pps_mutex around the whole on/off sequence.
2735 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2737 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2739 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2741 lockdep_assert_held(&dev_priv->pps_mutex);
2743 if (!intel_dp_is_edp(intel_dp))
2746 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
2747 dp_to_dig_port(intel_dp)->base.base.base.id,
2748 dp_to_dig_port(intel_dp)->base.base.name);
2750 intel_dp->want_panel_vdd = false;
2753 edp_panel_vdd_off_sync(intel_dp);
2755 edp_panel_vdd_schedule_off(intel_dp);
2758 static void edp_panel_on(struct intel_dp *intel_dp)
2760 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2762 i915_reg_t pp_ctrl_reg;
2764 lockdep_assert_held(&dev_priv->pps_mutex);
2766 if (!intel_dp_is_edp(intel_dp))
2769 DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n",
2770 dp_to_dig_port(intel_dp)->base.base.base.id,
2771 dp_to_dig_port(intel_dp)->base.base.name);
2773 if (WARN(edp_have_panel_power(intel_dp),
2774 "[ENCODER:%d:%s] panel power already on\n",
2775 dp_to_dig_port(intel_dp)->base.base.base.id,
2776 dp_to_dig_port(intel_dp)->base.base.name))
2779 wait_panel_power_cycle(intel_dp);
2781 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2782 pp = ironlake_get_pp_control(intel_dp);
2783 if (IS_GEN(dev_priv, 5)) {
2784 /* ILK workaround: disable reset around power sequence */
2785 pp &= ~PANEL_POWER_RESET;
2786 I915_WRITE(pp_ctrl_reg, pp);
2787 POSTING_READ(pp_ctrl_reg);
2790 pp |= PANEL_POWER_ON;
2791 if (!IS_GEN(dev_priv, 5))
2792 pp |= PANEL_POWER_RESET;
2794 I915_WRITE(pp_ctrl_reg, pp);
2795 POSTING_READ(pp_ctrl_reg);
2797 wait_panel_on(intel_dp);
2798 intel_dp->last_power_on = jiffies;
2800 if (IS_GEN(dev_priv, 5)) {
2801 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2802 I915_WRITE(pp_ctrl_reg, pp);
2803 POSTING_READ(pp_ctrl_reg);
2807 void intel_edp_panel_on(struct intel_dp *intel_dp)
2809 intel_wakeref_t wakeref;
2811 if (!intel_dp_is_edp(intel_dp))
2814 with_pps_lock(intel_dp, wakeref)
2815 edp_panel_on(intel_dp);
2819 static void edp_panel_off(struct intel_dp *intel_dp)
2821 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2822 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2824 i915_reg_t pp_ctrl_reg;
2826 lockdep_assert_held(&dev_priv->pps_mutex);
2828 if (!intel_dp_is_edp(intel_dp))
2831 DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n",
2832 dig_port->base.base.base.id, dig_port->base.base.name);
2834 WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
2835 dig_port->base.base.base.id, dig_port->base.base.name);
2837 pp = ironlake_get_pp_control(intel_dp);
2838 /* We need to switch off panel power _and_ force vdd, for otherwise some
2839 * panels get very unhappy and cease to work. */
2840 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2843 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2845 intel_dp->want_panel_vdd = false;
2847 I915_WRITE(pp_ctrl_reg, pp);
2848 POSTING_READ(pp_ctrl_reg);
2850 wait_panel_off(intel_dp);
2851 intel_dp->panel_power_off_time = ktime_get_boottime();
2853 /* We got a reference when we enabled the VDD. */
2854 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
2857 void intel_edp_panel_off(struct intel_dp *intel_dp)
2859 intel_wakeref_t wakeref;
2861 if (!intel_dp_is_edp(intel_dp))
2864 with_pps_lock(intel_dp, wakeref)
2865 edp_panel_off(intel_dp);
2868 /* Enable backlight in the panel power control. */
2869 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2871 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2872 intel_wakeref_t wakeref;
2875 * If we enable the backlight right away following a panel power
2876 * on, we may see slight flicker as the panel syncs with the eDP
2877 * link. So delay a bit to make sure the image is solid before
2878 * allowing it to appear.
2880 wait_backlight_on(intel_dp);
2882 with_pps_lock(intel_dp, wakeref) {
2883 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2886 pp = ironlake_get_pp_control(intel_dp);
2887 pp |= EDP_BLC_ENABLE;
2889 I915_WRITE(pp_ctrl_reg, pp);
2890 POSTING_READ(pp_ctrl_reg);
2894 /* Enable backlight PWM and backlight PP control. */
2895 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2896 const struct drm_connector_state *conn_state)
2898 struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2900 if (!intel_dp_is_edp(intel_dp))
2903 DRM_DEBUG_KMS("\n");
2905 intel_panel_enable_backlight(crtc_state, conn_state);
2906 _intel_edp_backlight_on(intel_dp);
2909 /* Disable backlight in the panel power control. */
2910 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2912 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2913 intel_wakeref_t wakeref;
2915 if (!intel_dp_is_edp(intel_dp))
2918 with_pps_lock(intel_dp, wakeref) {
2919 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2922 pp = ironlake_get_pp_control(intel_dp);
2923 pp &= ~EDP_BLC_ENABLE;
2925 I915_WRITE(pp_ctrl_reg, pp);
2926 POSTING_READ(pp_ctrl_reg);
2929 intel_dp->last_backlight_off = jiffies;
2930 edp_wait_backlight_off(intel_dp);
2933 /* Disable backlight PP control and backlight PWM. */
2934 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2936 struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2938 if (!intel_dp_is_edp(intel_dp))
2941 DRM_DEBUG_KMS("\n");
2943 _intel_edp_backlight_off(intel_dp);
2944 intel_panel_disable_backlight(old_conn_state);
2948 * Hook for controlling the panel power control backlight through the bl_power
2949 * sysfs attribute. Take care to handle multiple calls.
2951 static void intel_edp_backlight_power(struct intel_connector *connector,
2954 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2955 intel_wakeref_t wakeref;
2959 with_pps_lock(intel_dp, wakeref)
2960 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2961 if (is_enabled == enable)
2964 DRM_DEBUG_KMS("panel power control backlight %s\n",
2965 enable ? "enable" : "disable");
2968 _intel_edp_backlight_on(intel_dp);
2970 _intel_edp_backlight_off(intel_dp);
2973 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2975 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2976 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2977 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2979 I915_STATE_WARN(cur_state != state,
2980 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
2981 dig_port->base.base.base.id, dig_port->base.base.name,
2982 onoff(state), onoff(cur_state));
2984 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2986 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2988 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2990 I915_STATE_WARN(cur_state != state,
2991 "eDP PLL state assertion failure (expected %s, current %s)\n",
2992 onoff(state), onoff(cur_state));
2994 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2995 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2997 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2998 const struct intel_crtc_state *pipe_config)
3000 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3001 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3003 assert_pipe_disabled(dev_priv, crtc->pipe);
3004 assert_dp_port_disabled(intel_dp);
3005 assert_edp_pll_disabled(dev_priv);
3007 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
3008 pipe_config->port_clock);
3010 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
3012 if (pipe_config->port_clock == 162000)
3013 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
3015 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
3017 I915_WRITE(DP_A, intel_dp->DP);
3022 * [DevILK] Work around required when enabling DP PLL
3023 * while a pipe is enabled going to FDI:
3024 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
3025 * 2. Program DP PLL enable
3027 if (IS_GEN(dev_priv, 5))
3028 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
3030 intel_dp->DP |= DP_PLL_ENABLE;
3032 I915_WRITE(DP_A, intel_dp->DP);
3037 static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
3038 const struct intel_crtc_state *old_crtc_state)
3040 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
3041 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3043 assert_pipe_disabled(dev_priv, crtc->pipe);
3044 assert_dp_port_disabled(intel_dp);
3045 assert_edp_pll_enabled(dev_priv);
3047 DRM_DEBUG_KMS("disabling eDP PLL\n");
3049 intel_dp->DP &= ~DP_PLL_ENABLE;
3051 I915_WRITE(DP_A, intel_dp->DP);
3056 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
3059 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
3060 * be capable of signalling downstream hpd with a long pulse.
3061 * Whether or not that means D3 is safe to use is not clear,
3062 * but let's assume so until proven otherwise.
3064 * FIXME should really check all downstream ports...
3066 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
3067 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
3068 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
3071 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
3072 const struct intel_crtc_state *crtc_state,
3077 if (!crtc_state->dsc_params.compression_enable)
3080 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
3081 enable ? DP_DECOMPRESSION_EN : 0);
3083 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
3084 enable ? "enable" : "disable");
3087 /* If the sink supports it, try to set the power state appropriately */
3088 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
3092 /* Should have a valid DPCD by this point */
3093 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3096 if (mode != DRM_MODE_DPMS_ON) {
3097 if (downstream_hpd_needs_d0(intel_dp))
3100 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3103 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
3106 * When turning on, we need to retry for 1ms to give the sink
3109 for (i = 0; i < 3; i++) {
3110 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3117 if (ret == 1 && lspcon->active)
3118 lspcon_wait_pcon_mode(lspcon);
3122 DRM_DEBUG_KMS("failed to %s sink power state\n",
3123 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
3126 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3127 enum port port, enum pipe *pipe)
3131 for_each_pipe(dev_priv, p) {
3132 u32 val = I915_READ(TRANS_DP_CTL(p));
3134 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3140 DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
3142 /* must initialize pipe to something for the asserts */
3148 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3149 i915_reg_t dp_reg, enum port port,
3155 val = I915_READ(dp_reg);
3157 ret = val & DP_PORT_EN;
3159 /* asserts want to know the pipe even if the port is disabled */
3160 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3161 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3162 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3163 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3164 else if (IS_CHERRYVIEW(dev_priv))
3165 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3167 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3172 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3175 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3176 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3177 intel_wakeref_t wakeref;
3180 wakeref = intel_display_power_get_if_enabled(dev_priv,
3181 encoder->power_domain);
3185 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3186 encoder->port, pipe);
3188 intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3193 static void intel_dp_get_config(struct intel_encoder *encoder,
3194 struct intel_crtc_state *pipe_config)
3196 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3197 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3199 enum port port = encoder->port;
3200 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3202 if (encoder->type == INTEL_OUTPUT_EDP)
3203 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3205 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3207 tmp = I915_READ(intel_dp->output_reg);
3209 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3211 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3212 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3214 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3215 flags |= DRM_MODE_FLAG_PHSYNC;
3217 flags |= DRM_MODE_FLAG_NHSYNC;
3219 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3220 flags |= DRM_MODE_FLAG_PVSYNC;
3222 flags |= DRM_MODE_FLAG_NVSYNC;
3224 if (tmp & DP_SYNC_HS_HIGH)
3225 flags |= DRM_MODE_FLAG_PHSYNC;
3227 flags |= DRM_MODE_FLAG_NHSYNC;
3229 if (tmp & DP_SYNC_VS_HIGH)
3230 flags |= DRM_MODE_FLAG_PVSYNC;
3232 flags |= DRM_MODE_FLAG_NVSYNC;
3235 pipe_config->base.adjusted_mode.flags |= flags;
3237 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3238 pipe_config->limited_color_range = true;
3240 pipe_config->lane_count =
3241 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3243 intel_dp_get_m_n(crtc, pipe_config);
3245 if (port == PORT_A) {
3246 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3247 pipe_config->port_clock = 162000;
3249 pipe_config->port_clock = 270000;
3252 pipe_config->base.adjusted_mode.crtc_clock =
3253 intel_dotclock_calculate(pipe_config->port_clock,
3254 &pipe_config->dp_m_n);
3256 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3257 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3259 * This is a big fat ugly hack.
3261 * Some machines in UEFI boot mode provide us a VBT that has 18
3262 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3263 * unknown we fail to light up. Yet the same BIOS boots up with
3264 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3265 * max, not what it tells us to use.
3267 * Note: This will still be broken if the eDP panel is not lit
3268 * up by the BIOS, and thus we can't get the mode at module
3271 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3272 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3273 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3277 static void intel_disable_dp(struct intel_encoder *encoder,
3278 const struct intel_crtc_state *old_crtc_state,
3279 const struct drm_connector_state *old_conn_state)
3281 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3283 intel_dp->link_trained = false;
3285 if (old_crtc_state->has_audio)
3286 intel_audio_codec_disable(encoder,
3287 old_crtc_state, old_conn_state);
3289 /* Make sure the panel is off before trying to change the mode. But also
3290 * ensure that we have vdd while we switch off the panel. */
3291 intel_edp_panel_vdd_on(intel_dp);
3292 intel_edp_backlight_off(old_conn_state);
3293 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3294 intel_edp_panel_off(intel_dp);
3297 static void g4x_disable_dp(struct intel_encoder *encoder,
3298 const struct intel_crtc_state *old_crtc_state,
3299 const struct drm_connector_state *old_conn_state)
3301 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3304 static void vlv_disable_dp(struct intel_encoder *encoder,
3305 const struct intel_crtc_state *old_crtc_state,
3306 const struct drm_connector_state *old_conn_state)
3308 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3311 static void g4x_post_disable_dp(struct intel_encoder *encoder,
3312 const struct intel_crtc_state *old_crtc_state,
3313 const struct drm_connector_state *old_conn_state)
3315 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3316 enum port port = encoder->port;
3319 * Bspec does not list a specific disable sequence for g4x DP.
3320 * Follow the ilk+ sequence (disable pipe before the port) for
3321 * g4x DP as it does not suffer from underruns like the normal
3322 * g4x modeset sequence (disable pipe after the port).
3324 intel_dp_link_down(encoder, old_crtc_state);
3326 /* Only ilk+ has port A */
3328 ironlake_edp_pll_off(intel_dp, old_crtc_state);
3331 static void vlv_post_disable_dp(struct intel_encoder *encoder,
3332 const struct intel_crtc_state *old_crtc_state,
3333 const struct drm_connector_state *old_conn_state)
3335 intel_dp_link_down(encoder, old_crtc_state);
3338 static void chv_post_disable_dp(struct intel_encoder *encoder,
3339 const struct intel_crtc_state *old_crtc_state,
3340 const struct drm_connector_state *old_conn_state)
3342 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3344 intel_dp_link_down(encoder, old_crtc_state);
3346 vlv_dpio_get(dev_priv);
3348 /* Assert data lane reset */
3349 chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3351 vlv_dpio_put(dev_priv);
3355 _intel_dp_set_link_train(struct intel_dp *intel_dp,
3359 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3360 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3361 enum port port = intel_dig_port->base.port;
3362 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
3364 if (dp_train_pat & train_pat_mask)
3365 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
3366 dp_train_pat & train_pat_mask);
3368 if (HAS_DDI(dev_priv)) {
3369 u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl);
3371 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3372 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3374 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3376 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3377 switch (dp_train_pat & train_pat_mask) {
3378 case DP_TRAINING_PATTERN_DISABLE:
3379 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3382 case DP_TRAINING_PATTERN_1:
3383 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3385 case DP_TRAINING_PATTERN_2:
3386 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3388 case DP_TRAINING_PATTERN_3:
3389 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3391 case DP_TRAINING_PATTERN_4:
3392 temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3395 I915_WRITE(intel_dp->regs.dp_tp_ctl, temp);
3397 } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3398 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3399 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3401 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3402 case DP_TRAINING_PATTERN_DISABLE:
3403 *DP |= DP_LINK_TRAIN_OFF_CPT;
3405 case DP_TRAINING_PATTERN_1:
3406 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3408 case DP_TRAINING_PATTERN_2:
3409 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3411 case DP_TRAINING_PATTERN_3:
3412 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3413 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3418 *DP &= ~DP_LINK_TRAIN_MASK;
3420 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3421 case DP_TRAINING_PATTERN_DISABLE:
3422 *DP |= DP_LINK_TRAIN_OFF;
3424 case DP_TRAINING_PATTERN_1:
3425 *DP |= DP_LINK_TRAIN_PAT_1;
3427 case DP_TRAINING_PATTERN_2:
3428 *DP |= DP_LINK_TRAIN_PAT_2;
3430 case DP_TRAINING_PATTERN_3:
3431 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3432 *DP |= DP_LINK_TRAIN_PAT_2;
3438 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3439 const struct intel_crtc_state *old_crtc_state)
3441 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3443 /* enable with pattern 1 (as per spec) */
3445 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3448 * Magic for VLV/CHV. We _must_ first set up the register
3449 * without actually enabling the port, and then do another
3450 * write to enable the port. Otherwise link training will
3451 * fail when the power sequencer is freshly used for this port.
3453 intel_dp->DP |= DP_PORT_EN;
3454 if (old_crtc_state->has_audio)
3455 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3457 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3458 POSTING_READ(intel_dp->output_reg);
3461 static void intel_enable_dp(struct intel_encoder *encoder,
3462 const struct intel_crtc_state *pipe_config,
3463 const struct drm_connector_state *conn_state)
3465 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3466 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3467 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3468 u32 dp_reg = I915_READ(intel_dp->output_reg);
3469 enum pipe pipe = crtc->pipe;
3470 intel_wakeref_t wakeref;
3472 if (WARN_ON(dp_reg & DP_PORT_EN))
3475 with_pps_lock(intel_dp, wakeref) {
3476 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3477 vlv_init_panel_power_sequencer(encoder, pipe_config);
3479 intel_dp_enable_port(intel_dp, pipe_config);
3481 edp_panel_vdd_on(intel_dp);
3482 edp_panel_on(intel_dp);
3483 edp_panel_vdd_off(intel_dp, true);
3486 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3487 unsigned int lane_mask = 0x0;
3489 if (IS_CHERRYVIEW(dev_priv))
3490 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3492 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3496 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3497 intel_dp_start_link_train(intel_dp);
3498 intel_dp_stop_link_train(intel_dp);
3500 if (pipe_config->has_audio) {
3501 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3503 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3507 static void g4x_enable_dp(struct intel_encoder *encoder,
3508 const struct intel_crtc_state *pipe_config,
3509 const struct drm_connector_state *conn_state)
3511 intel_enable_dp(encoder, pipe_config, conn_state);
3512 intel_edp_backlight_on(pipe_config, conn_state);
3515 static void vlv_enable_dp(struct intel_encoder *encoder,
3516 const struct intel_crtc_state *pipe_config,
3517 const struct drm_connector_state *conn_state)
3519 intel_edp_backlight_on(pipe_config, conn_state);
3522 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3523 const struct intel_crtc_state *pipe_config,
3524 const struct drm_connector_state *conn_state)
3526 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3527 enum port port = encoder->port;
3529 intel_dp_prepare(encoder, pipe_config);
3531 /* Only ilk+ has port A */
3533 ironlake_edp_pll_on(intel_dp, pipe_config);
3536 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3538 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3539 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3540 enum pipe pipe = intel_dp->pps_pipe;
3541 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3543 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3545 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3548 edp_panel_vdd_off_sync(intel_dp);
3551 * VLV seems to get confused when multiple power sequencers
3552 * have the same port selected (even if only one has power/vdd
3553 * enabled). The failure manifests as vlv_wait_port_ready() failing
3554 * CHV on the other hand doesn't seem to mind having the same port
3555 * selected in multiple power sequencers, but let's clear the
3556 * port select always when logically disconnecting a power sequencer
3559 DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
3560 pipe_name(pipe), intel_dig_port->base.base.base.id,
3561 intel_dig_port->base.base.name);
3562 I915_WRITE(pp_on_reg, 0);
3563 POSTING_READ(pp_on_reg);
3565 intel_dp->pps_pipe = INVALID_PIPE;
3568 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3571 struct intel_encoder *encoder;
3573 lockdep_assert_held(&dev_priv->pps_mutex);
3575 for_each_intel_dp(&dev_priv->drm, encoder) {
3576 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3578 WARN(intel_dp->active_pipe == pipe,
3579 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
3580 pipe_name(pipe), encoder->base.base.id,
3581 encoder->base.name);
3583 if (intel_dp->pps_pipe != pipe)
3586 DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
3587 pipe_name(pipe), encoder->base.base.id,
3588 encoder->base.name);
3590 /* make sure vdd is off before we steal it */
3591 vlv_detach_power_sequencer(intel_dp);
3595 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3596 const struct intel_crtc_state *crtc_state)
3598 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3599 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3600 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3602 lockdep_assert_held(&dev_priv->pps_mutex);
3604 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3606 if (intel_dp->pps_pipe != INVALID_PIPE &&
3607 intel_dp->pps_pipe != crtc->pipe) {
3609 * If another power sequencer was being used on this
3610 * port previously make sure to turn off vdd there while
3611 * we still have control of it.
3613 vlv_detach_power_sequencer(intel_dp);
3617 * We may be stealing the power
3618 * sequencer from another port.
3620 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3622 intel_dp->active_pipe = crtc->pipe;
3624 if (!intel_dp_is_edp(intel_dp))
3627 /* now it's all ours */
3628 intel_dp->pps_pipe = crtc->pipe;
3630 DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
3631 pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
3632 encoder->base.name);
3634 /* init power sequencer on this pipe and port */
3635 intel_dp_init_panel_power_sequencer(intel_dp);
3636 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3639 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3640 const struct intel_crtc_state *pipe_config,
3641 const struct drm_connector_state *conn_state)
3643 vlv_phy_pre_encoder_enable(encoder, pipe_config);
3645 intel_enable_dp(encoder, pipe_config, conn_state);
3648 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3649 const struct intel_crtc_state *pipe_config,
3650 const struct drm_connector_state *conn_state)
3652 intel_dp_prepare(encoder, pipe_config);
3654 vlv_phy_pre_pll_enable(encoder, pipe_config);
3657 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3658 const struct intel_crtc_state *pipe_config,
3659 const struct drm_connector_state *conn_state)
3661 chv_phy_pre_encoder_enable(encoder, pipe_config);
3663 intel_enable_dp(encoder, pipe_config, conn_state);
3665 /* Second common lane will stay alive on its own now */
3666 chv_phy_release_cl2_override(encoder);
3669 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3670 const struct intel_crtc_state *pipe_config,
3671 const struct drm_connector_state *conn_state)
3673 intel_dp_prepare(encoder, pipe_config);
3675 chv_phy_pre_pll_enable(encoder, pipe_config);
3678 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3679 const struct intel_crtc_state *old_crtc_state,
3680 const struct drm_connector_state *old_conn_state)
3682 chv_phy_post_pll_disable(encoder, old_crtc_state);
3686 * Fetch AUX CH registers 0x202 - 0x207 which contain
3687 * link status information
3690 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3692 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3693 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3696 /* These are source-specific values. */
3698 intel_dp_voltage_max(struct intel_dp *intel_dp)
3700 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3701 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3702 enum port port = encoder->port;
3704 if (HAS_DDI(dev_priv))
3705 return intel_ddi_dp_voltage_max(encoder);
3706 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3707 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3708 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3709 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3710 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3711 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3713 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3717 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
3719 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3720 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3721 enum port port = encoder->port;
3723 if (HAS_DDI(dev_priv)) {
3724 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3725 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3726 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3727 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3728 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3729 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3730 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3731 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3732 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3733 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3735 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3737 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3738 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3739 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3740 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3741 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3742 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3743 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3745 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3748 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3749 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3750 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3751 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3752 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3753 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3754 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3755 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3757 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3762 static u32 vlv_signal_levels(struct intel_dp *intel_dp)
3764 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3765 unsigned long demph_reg_value, preemph_reg_value,
3766 uniqtranscale_reg_value;
3767 u8 train_set = intel_dp->train_set[0];
3769 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3770 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3771 preemph_reg_value = 0x0004000;
3772 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3773 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3774 demph_reg_value = 0x2B405555;
3775 uniqtranscale_reg_value = 0x552AB83A;
3777 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3778 demph_reg_value = 0x2B404040;
3779 uniqtranscale_reg_value = 0x5548B83A;
3781 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3782 demph_reg_value = 0x2B245555;
3783 uniqtranscale_reg_value = 0x5560B83A;
3785 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3786 demph_reg_value = 0x2B405555;
3787 uniqtranscale_reg_value = 0x5598DA3A;
3793 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3794 preemph_reg_value = 0x0002000;
3795 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3796 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3797 demph_reg_value = 0x2B404040;
3798 uniqtranscale_reg_value = 0x5552B83A;
3800 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3801 demph_reg_value = 0x2B404848;
3802 uniqtranscale_reg_value = 0x5580B83A;
3804 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3805 demph_reg_value = 0x2B404040;
3806 uniqtranscale_reg_value = 0x55ADDA3A;
3812 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3813 preemph_reg_value = 0x0000000;
3814 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3815 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3816 demph_reg_value = 0x2B305555;
3817 uniqtranscale_reg_value = 0x5570B83A;
3819 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3820 demph_reg_value = 0x2B2B4040;
3821 uniqtranscale_reg_value = 0x55ADDA3A;
3827 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3828 preemph_reg_value = 0x0006000;
3829 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3830 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3831 demph_reg_value = 0x1B405555;
3832 uniqtranscale_reg_value = 0x55ADDA3A;
3842 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3843 uniqtranscale_reg_value, 0);
3848 static u32 chv_signal_levels(struct intel_dp *intel_dp)
3850 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3851 u32 deemph_reg_value, margin_reg_value;
3852 bool uniq_trans_scale = false;
3853 u8 train_set = intel_dp->train_set[0];
3855 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3856 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3857 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3858 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3859 deemph_reg_value = 128;
3860 margin_reg_value = 52;
3862 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3863 deemph_reg_value = 128;
3864 margin_reg_value = 77;
3866 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3867 deemph_reg_value = 128;
3868 margin_reg_value = 102;
3870 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3871 deemph_reg_value = 128;
3872 margin_reg_value = 154;
3873 uniq_trans_scale = true;
3879 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3880 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3881 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3882 deemph_reg_value = 85;
3883 margin_reg_value = 78;
3885 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3886 deemph_reg_value = 85;
3887 margin_reg_value = 116;
3889 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3890 deemph_reg_value = 85;
3891 margin_reg_value = 154;
3897 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3898 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3899 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3900 deemph_reg_value = 64;
3901 margin_reg_value = 104;
3903 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3904 deemph_reg_value = 64;
3905 margin_reg_value = 154;
3911 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3912 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3913 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3914 deemph_reg_value = 43;
3915 margin_reg_value = 154;
3925 chv_set_phy_signal_level(encoder, deemph_reg_value,
3926 margin_reg_value, uniq_trans_scale);
3932 g4x_signal_levels(u8 train_set)
3934 u32 signal_levels = 0;
3936 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3937 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3939 signal_levels |= DP_VOLTAGE_0_4;
3941 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3942 signal_levels |= DP_VOLTAGE_0_6;
3944 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3945 signal_levels |= DP_VOLTAGE_0_8;
3947 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3948 signal_levels |= DP_VOLTAGE_1_2;
3951 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3952 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3954 signal_levels |= DP_PRE_EMPHASIS_0;
3956 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3957 signal_levels |= DP_PRE_EMPHASIS_3_5;
3959 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3960 signal_levels |= DP_PRE_EMPHASIS_6;
3962 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3963 signal_levels |= DP_PRE_EMPHASIS_9_5;
3966 return signal_levels;
3969 /* SNB CPU eDP voltage swing and pre-emphasis control */
3971 snb_cpu_edp_signal_levels(u8 train_set)
3973 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3974 DP_TRAIN_PRE_EMPHASIS_MASK);
3975 switch (signal_levels) {
3976 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3977 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3978 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3979 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3980 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3981 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3982 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3983 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3984 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3985 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3986 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3987 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3988 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3989 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3991 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3992 "0x%x\n", signal_levels);
3993 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3997 /* IVB CPU eDP voltage swing and pre-emphasis control */
3999 ivb_cpu_edp_signal_levels(u8 train_set)
4001 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4002 DP_TRAIN_PRE_EMPHASIS_MASK);
4003 switch (signal_levels) {
4004 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4005 return EDP_LINK_TRAIN_400MV_0DB_IVB;
4006 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4007 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
4008 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4009 return EDP_LINK_TRAIN_400MV_6DB_IVB;
4011 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4012 return EDP_LINK_TRAIN_600MV_0DB_IVB;
4013 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4014 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
4016 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4017 return EDP_LINK_TRAIN_800MV_0DB_IVB;
4018 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4019 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
4022 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4023 "0x%x\n", signal_levels);
4024 return EDP_LINK_TRAIN_500MV_0DB_IVB;
4029 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
4031 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4032 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4033 enum port port = intel_dig_port->base.port;
4034 u32 signal_levels, mask = 0;
4035 u8 train_set = intel_dp->train_set[0];
4037 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
4038 signal_levels = bxt_signal_levels(intel_dp);
4039 } else if (HAS_DDI(dev_priv)) {
4040 signal_levels = ddi_signal_levels(intel_dp);
4041 mask = DDI_BUF_EMP_MASK;
4042 } else if (IS_CHERRYVIEW(dev_priv)) {
4043 signal_levels = chv_signal_levels(intel_dp);
4044 } else if (IS_VALLEYVIEW(dev_priv)) {
4045 signal_levels = vlv_signal_levels(intel_dp);
4046 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
4047 signal_levels = ivb_cpu_edp_signal_levels(train_set);
4048 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
4049 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
4050 signal_levels = snb_cpu_edp_signal_levels(train_set);
4051 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
4053 signal_levels = g4x_signal_levels(train_set);
4054 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
4058 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
4060 DRM_DEBUG_KMS("Using vswing level %d\n",
4061 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
4062 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
4063 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
4064 DP_TRAIN_PRE_EMPHASIS_SHIFT);
4066 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
4068 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
4069 POSTING_READ(intel_dp->output_reg);
4073 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
4076 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4077 struct drm_i915_private *dev_priv =
4078 to_i915(intel_dig_port->base.base.dev);
4080 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
4082 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
4083 POSTING_READ(intel_dp->output_reg);
4086 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
4088 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4089 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4090 enum port port = intel_dig_port->base.port;
4093 if (!HAS_DDI(dev_priv))
4096 val = I915_READ(intel_dp->regs.dp_tp_ctl);
4097 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
4098 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
4099 I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
4102 * Until TGL on PORT_A we can have only eDP in SST mode. There the only
4103 * reason we need to set idle transmission mode is to work around a HW
4104 * issue where we enable the pipe while not in idle link-training mode.
4105 * In this case there is requirement to wait for a minimum number of
4106 * idle patterns to be sent.
4108 if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
4111 if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
4112 DP_TP_STATUS_IDLE_DONE, 1))
4113 DRM_ERROR("Timed out waiting for DP idle patterns\n");
4117 intel_dp_link_down(struct intel_encoder *encoder,
4118 const struct intel_crtc_state *old_crtc_state)
4120 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4121 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4122 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4123 enum port port = encoder->port;
4124 u32 DP = intel_dp->DP;
4126 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
4129 DRM_DEBUG_KMS("\n");
4131 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
4132 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
4133 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4134 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4136 DP &= ~DP_LINK_TRAIN_MASK;
4137 DP |= DP_LINK_TRAIN_PAT_IDLE;
4139 I915_WRITE(intel_dp->output_reg, DP);
4140 POSTING_READ(intel_dp->output_reg);
4142 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4143 I915_WRITE(intel_dp->output_reg, DP);
4144 POSTING_READ(intel_dp->output_reg);
4147 * HW workaround for IBX, we need to move the port
4148 * to transcoder A after disabling it to allow the
4149 * matching HDMI port to be enabled on transcoder A.
4151 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4153 * We get CPU/PCH FIFO underruns on the other pipe when
4154 * doing the workaround. Sweep them under the rug.
4156 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4157 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4159 /* always enable with pattern 1 (as per spec) */
4160 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4161 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4162 DP_LINK_TRAIN_PAT_1;
4163 I915_WRITE(intel_dp->output_reg, DP);
4164 POSTING_READ(intel_dp->output_reg);
4167 I915_WRITE(intel_dp->output_reg, DP);
4168 POSTING_READ(intel_dp->output_reg);
4170 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4171 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4172 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4175 msleep(intel_dp->panel_power_down_delay);
4179 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4180 intel_wakeref_t wakeref;
4182 with_pps_lock(intel_dp, wakeref)
4183 intel_dp->active_pipe = INVALID_PIPE;
4188 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4193 * Prior to DP1.3 the bit represented by
4194 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4195 * if it is set DP_DPCD_REV at 0000h could be at a value less than
4196 * the true capability of the panel. The only way to check is to
4197 * then compare 0000h and 2200h.
4199 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4200 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4203 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4204 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4205 DRM_ERROR("DPCD failed read at extended capabilities\n");
4209 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4210 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4214 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4217 DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4218 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4220 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4224 intel_dp_read_dpcd(struct intel_dp *intel_dp)
4226 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4227 sizeof(intel_dp->dpcd)) < 0)
4228 return false; /* aux transfer failed */
4230 intel_dp_extended_receiver_capabilities(intel_dp);
4232 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4234 return intel_dp->dpcd[DP_DPCD_REV] != 0;
4237 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4241 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4244 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4247 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4250 * Clear the cached register set to avoid using stale values
4251 * for the sinks that do not support DSC.
4253 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4255 /* Clear fec_capable to avoid using stale values */
4256 intel_dp->fec_capable = 0;
4258 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4259 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4260 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4261 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4263 sizeof(intel_dp->dsc_dpcd)) < 0)
4264 DRM_ERROR("Failed to read DPCD register 0x%x\n",
4267 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4268 (int)sizeof(intel_dp->dsc_dpcd),
4269 intel_dp->dsc_dpcd);
4271 /* FEC is supported only on DP 1.4 */
4272 if (!intel_dp_is_edp(intel_dp) &&
4273 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4274 &intel_dp->fec_capable) < 0)
4275 DRM_ERROR("Failed to read FEC DPCD register\n");
4277 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
4282 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4284 struct drm_i915_private *dev_priv =
4285 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4287 /* this function is meant to be called only once */
4288 WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
4290 if (!intel_dp_read_dpcd(intel_dp))
4293 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4294 drm_dp_is_branch(intel_dp->dpcd));
4297 * Read the eDP display control registers.
4299 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4300 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4301 * set, but require eDP 1.4+ detection (e.g. for supported link rates
4302 * method). The display control registers should read zero if they're
4303 * not supported anyway.
4305 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4306 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4307 sizeof(intel_dp->edp_dpcd))
4308 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
4309 intel_dp->edp_dpcd);
4312 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4313 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4315 intel_psr_init_dpcd(intel_dp);
4317 /* Read the eDP 1.4+ supported link rates. */
4318 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4319 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4322 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4323 sink_rates, sizeof(sink_rates));
4325 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4326 int val = le16_to_cpu(sink_rates[i]);
4331 /* Value read multiplied by 200kHz gives the per-lane
4332 * link rate in kHz. The source rates are, however,
4333 * stored in terms of LS_Clk kHz. The full conversion
4334 * back to symbols is
4335 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4337 intel_dp->sink_rates[i] = (val * 200) / 10;
4339 intel_dp->num_sink_rates = i;
4343 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4344 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4346 if (intel_dp->num_sink_rates)
4347 intel_dp->use_rate_select = true;
4349 intel_dp_set_sink_rates(intel_dp);
4351 intel_dp_set_common_rates(intel_dp);
4353 /* Read the eDP DSC DPCD registers */
4354 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4355 intel_dp_get_dsc_sink_cap(intel_dp);
4362 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4364 if (!intel_dp_read_dpcd(intel_dp))
4368 * Don't clobber cached eDP rates. Also skip re-reading
4369 * the OUI/ID since we know it won't change.
4371 if (!intel_dp_is_edp(intel_dp)) {
4372 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4373 drm_dp_is_branch(intel_dp->dpcd));
4375 intel_dp_set_sink_rates(intel_dp);
4376 intel_dp_set_common_rates(intel_dp);
4380 * Some eDP panels do not set a valid value for sink count, that is why
4381 * it don't care about read it here and in intel_edp_init_dpcd().
4383 if (!intel_dp_is_edp(intel_dp) &&
4384 !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
4388 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4393 * Sink count can change between short pulse hpd hence
4394 * a member variable in intel_dp will track any changes
4395 * between short pulse interrupts.
4397 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4400 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4401 * a dongle is present but no display. Unless we require to know
4402 * if a dongle is present or not, we don't need to update
4403 * downstream port information. So, an early return here saves
4404 * time from performing other operations which are not required.
4406 if (!intel_dp->sink_count)
4410 if (!drm_dp_is_branch(intel_dp->dpcd))
4411 return true; /* native DP sink */
4413 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4414 return true; /* no per-port downstream info */
4416 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4417 intel_dp->downstream_ports,
4418 DP_MAX_DOWNSTREAM_PORTS) < 0)
4419 return false; /* downstream port status fetch failed */
4425 intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4429 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4432 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4435 return mstm_cap & DP_MST_CAP;
4439 intel_dp_can_mst(struct intel_dp *intel_dp)
4441 return i915_modparams.enable_dp_mst &&
4442 intel_dp->can_mst &&
4443 intel_dp_sink_can_mst(intel_dp);
4447 intel_dp_configure_mst(struct intel_dp *intel_dp)
4449 struct intel_encoder *encoder =
4450 &dp_to_dig_port(intel_dp)->base;
4451 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4453 DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support? port: %s, sink: %s, modparam: %s\n",
4454 encoder->base.base.id, encoder->base.name,
4455 yesno(intel_dp->can_mst), yesno(sink_can_mst),
4456 yesno(i915_modparams.enable_dp_mst));
4458 if (!intel_dp->can_mst)
4461 intel_dp->is_mst = sink_can_mst &&
4462 i915_modparams.enable_dp_mst;
4464 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4469 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4471 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4472 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4477 intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
4478 const struct intel_crtc_state *crtc_state)
4480 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4481 struct dp_sdp vsc_sdp = {};
4483 /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
4484 vsc_sdp.sdp_header.HB0 = 0;
4485 vsc_sdp.sdp_header.HB1 = 0x7;
4488 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
4489 * Colorimetry Format indication.
4491 vsc_sdp.sdp_header.HB2 = 0x5;
4494 * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
4495 * Colorimetry Format indication (HB2 = 05h).
4497 vsc_sdp.sdp_header.HB3 = 0x13;
4500 * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
4501 * DB16[3:0] DP 1.4a spec, Table 2-120
4503 vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
4504 /* RGB->YCBCR color conversion uses the BT.709 color space. */
4505 vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
4508 * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
4509 * the following Component Bit Depth values are defined:
4515 switch (crtc_state->pipe_bpp) {
4517 vsc_sdp.db[17] = 0x1;
4519 case 30: /* 10bpc */
4520 vsc_sdp.db[17] = 0x2;
4522 case 36: /* 12bpc */
4523 vsc_sdp.db[17] = 0x3;
4525 case 48: /* 16bpc */
4526 vsc_sdp.db[17] = 0x4;
4529 MISSING_CASE(crtc_state->pipe_bpp);
4534 * Dynamic Range (Bit 7)
4535 * 0 = VESA range, 1 = CTA range.
4536 * all YCbCr are always limited range
4538 vsc_sdp.db[17] |= 0x80;
4541 * Content Type (Bits 2:0)
4542 * 000b = Not defined.
4547 * All other values are RESERVED.
4548 * Note: See CTA-861-G for the definition and expected
4549 * processing by a stream sink for the above contect types.
4553 intel_dig_port->write_infoframe(&intel_dig_port->base,
4554 crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
4557 void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
4558 const struct intel_crtc_state *crtc_state)
4560 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
4563 intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
4566 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4570 u8 test_lane_count, test_link_bw;
4574 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4575 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4579 DRM_DEBUG_KMS("Lane count read failed\n");
4582 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4584 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4587 DRM_DEBUG_KMS("Link Rate read failed\n");
4590 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4592 /* Validate the requested link rate and lane count */
4593 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4597 intel_dp->compliance.test_lane_count = test_lane_count;
4598 intel_dp->compliance.test_link_rate = test_link_rate;
4603 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4607 __be16 h_width, v_height;
4610 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4611 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4614 DRM_DEBUG_KMS("Test pattern read failed\n");
4617 if (test_pattern != DP_COLOR_RAMP)
4620 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4623 DRM_DEBUG_KMS("H Width read failed\n");
4627 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4630 DRM_DEBUG_KMS("V Height read failed\n");
4634 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4637 DRM_DEBUG_KMS("TEST MISC read failed\n");
4640 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4642 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4644 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4645 case DP_TEST_BIT_DEPTH_6:
4646 intel_dp->compliance.test_data.bpc = 6;
4648 case DP_TEST_BIT_DEPTH_8:
4649 intel_dp->compliance.test_data.bpc = 8;
4655 intel_dp->compliance.test_data.video_pattern = test_pattern;
4656 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4657 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4658 /* Set test active flag here so userspace doesn't interrupt things */
4659 intel_dp->compliance.test_active = 1;
4664 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4666 u8 test_result = DP_TEST_ACK;
4667 struct intel_connector *intel_connector = intel_dp->attached_connector;
4668 struct drm_connector *connector = &intel_connector->base;
4670 if (intel_connector->detect_edid == NULL ||
4671 connector->edid_corrupt ||
4672 intel_dp->aux.i2c_defer_count > 6) {
4673 /* Check EDID read for NACKs, DEFERs and corruption
4674 * (DP CTS 1.2 Core r1.1)
4675 * 4.2.2.4 : Failed EDID read, I2C_NAK
4676 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4677 * 4.2.2.6 : EDID corruption detected
4678 * Use failsafe mode for all cases
4680 if (intel_dp->aux.i2c_nack_count > 0 ||
4681 intel_dp->aux.i2c_defer_count > 0)
4682 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4683 intel_dp->aux.i2c_nack_count,
4684 intel_dp->aux.i2c_defer_count);
4685 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4687 struct edid *block = intel_connector->detect_edid;
4689 /* We have to write the checksum
4690 * of the last block read
4692 block += intel_connector->detect_edid->extensions;
4694 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4695 block->checksum) <= 0)
4696 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4698 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4699 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4702 /* Set test active flag here so userspace doesn't interrupt things */
4703 intel_dp->compliance.test_active = 1;
4708 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4710 u8 test_result = DP_TEST_NAK;
4714 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4716 u8 response = DP_TEST_NAK;
4720 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4722 DRM_DEBUG_KMS("Could not read test request from sink\n");
4727 case DP_TEST_LINK_TRAINING:
4728 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4729 response = intel_dp_autotest_link_training(intel_dp);
4731 case DP_TEST_LINK_VIDEO_PATTERN:
4732 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4733 response = intel_dp_autotest_video_pattern(intel_dp);
4735 case DP_TEST_LINK_EDID_READ:
4736 DRM_DEBUG_KMS("EDID test requested\n");
4737 response = intel_dp_autotest_edid(intel_dp);
4739 case DP_TEST_LINK_PHY_TEST_PATTERN:
4740 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4741 response = intel_dp_autotest_phy_pattern(intel_dp);
4744 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4748 if (response & DP_TEST_ACK)
4749 intel_dp->compliance.test_type = request;
4752 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4754 DRM_DEBUG_KMS("Could not write test response to sink\n");
4758 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4762 if (intel_dp->is_mst) {
4763 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4768 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
4769 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4773 /* check link status - esi[10] = 0x200c */
4774 if (intel_dp->active_mst_links > 0 &&
4775 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4776 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4777 intel_dp_start_link_train(intel_dp);
4778 intel_dp_stop_link_train(intel_dp);
4781 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4782 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4785 for (retry = 0; retry < 3; retry++) {
4787 wret = drm_dp_dpcd_write(&intel_dp->aux,
4788 DP_SINK_COUNT_ESI+1,
4795 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4797 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4805 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4806 intel_dp->is_mst = false;
4807 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4815 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4817 u8 link_status[DP_LINK_STATUS_SIZE];
4819 if (!intel_dp->link_trained)
4823 * While PSR source HW is enabled, it will control main-link sending
4824 * frames, enabling and disabling it so trying to do a retrain will fail
4825 * as the link would or not be on or it could mix training patterns
4826 * and frame data at the same time causing retrain to fail.
4827 * Also when exiting PSR, HW will retrain the link anyways fixing
4828 * any link status error.
4830 if (intel_psr_enabled(intel_dp))
4833 if (!intel_dp_get_link_status(intel_dp, link_status))
4837 * Validate the cached values of intel_dp->link_rate and
4838 * intel_dp->lane_count before attempting to retrain.
4840 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4841 intel_dp->lane_count))
4844 /* Retrain if Channel EQ or CR not ok */
4845 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4848 int intel_dp_retrain_link(struct intel_encoder *encoder,
4849 struct drm_modeset_acquire_ctx *ctx)
4851 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4852 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4853 struct intel_connector *connector = intel_dp->attached_connector;
4854 struct drm_connector_state *conn_state;
4855 struct intel_crtc_state *crtc_state;
4856 struct intel_crtc *crtc;
4859 /* FIXME handle the MST connectors as well */
4861 if (!connector || connector->base.status != connector_status_connected)
4864 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4869 conn_state = connector->base.state;
4871 crtc = to_intel_crtc(conn_state->crtc);
4875 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4879 crtc_state = to_intel_crtc_state(crtc->base.state);
4881 WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4883 if (!crtc_state->base.active)
4886 if (conn_state->commit &&
4887 !try_wait_for_completion(&conn_state->commit->hw_done))
4890 if (!intel_dp_needs_link_retrain(intel_dp))
4893 /* Suppress underruns caused by re-training */
4894 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4895 if (crtc_state->has_pch_encoder)
4896 intel_set_pch_fifo_underrun_reporting(dev_priv,
4897 intel_crtc_pch_transcoder(crtc), false);
4899 intel_dp_start_link_train(intel_dp);
4900 intel_dp_stop_link_train(intel_dp);
4902 /* Keep underrun reporting disabled until things are stable */
4903 intel_wait_for_vblank(dev_priv, crtc->pipe);
4905 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4906 if (crtc_state->has_pch_encoder)
4907 intel_set_pch_fifo_underrun_reporting(dev_priv,
4908 intel_crtc_pch_transcoder(crtc), true);
4914 * If display is now connected check links status,
4915 * there has been known issues of link loss triggering
4918 * Some sinks (eg. ASUS PB287Q) seem to perform some
4919 * weird HPD ping pong during modesets. So we can apparently
4920 * end up with HPD going low during a modeset, and then
4921 * going back up soon after. And once that happens we must
4922 * retrain the link to get a picture. That's in case no
4923 * userspace component reacted to intermittent HPD dip.
4925 static enum intel_hotplug_state
4926 intel_dp_hotplug(struct intel_encoder *encoder,
4927 struct intel_connector *connector,
4930 struct drm_modeset_acquire_ctx ctx;
4931 enum intel_hotplug_state state;
4934 state = intel_encoder_hotplug(encoder, connector, irq_received);
4936 drm_modeset_acquire_init(&ctx, 0);
4939 ret = intel_dp_retrain_link(encoder, &ctx);
4941 if (ret == -EDEADLK) {
4942 drm_modeset_backoff(&ctx);
4949 drm_modeset_drop_locks(&ctx);
4950 drm_modeset_acquire_fini(&ctx);
4951 WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4954 * Keeping it consistent with intel_ddi_hotplug() and
4955 * intel_hdmi_hotplug().
4957 if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
4958 state = INTEL_HOTPLUG_RETRY;
4963 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4967 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4970 if (drm_dp_dpcd_readb(&intel_dp->aux,
4971 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4974 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4976 if (val & DP_AUTOMATED_TEST_REQUEST)
4977 intel_dp_handle_test_request(intel_dp);
4979 if (val & DP_CP_IRQ)
4980 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4982 if (val & DP_SINK_SPECIFIC_IRQ)
4983 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4987 * According to DP spec
4990 * 2. Configure link according to Receiver Capabilities
4991 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4992 * 4. Check link status on receipt of hot-plug interrupt
4994 * intel_dp_short_pulse - handles short pulse interrupts
4995 * when full detection is not required.
4996 * Returns %true if short pulse is handled and full detection
4997 * is NOT required and %false otherwise.
5000 intel_dp_short_pulse(struct intel_dp *intel_dp)
5002 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5003 u8 old_sink_count = intel_dp->sink_count;
5007 * Clearing compliance test variables to allow capturing
5008 * of values for next automated test request.
5010 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5013 * Now read the DPCD to see if it's actually running
5014 * If the current value of sink count doesn't match with
5015 * the value that was stored earlier or dpcd read failed
5016 * we need to do full detection
5018 ret = intel_dp_get_dpcd(intel_dp);
5020 if ((old_sink_count != intel_dp->sink_count) || !ret) {
5021 /* No need to proceed if we are going to do full detect */
5025 intel_dp_check_service_irq(intel_dp);
5027 /* Handle CEC interrupts, if any */
5028 drm_dp_cec_irq(&intel_dp->aux);
5030 /* defer to the hotplug work for link retraining if needed */
5031 if (intel_dp_needs_link_retrain(intel_dp))
5034 intel_psr_short_pulse(intel_dp);
5036 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
5037 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
5038 /* Send a Hotplug Uevent to userspace to start modeset */
5039 drm_kms_helper_hotplug_event(&dev_priv->drm);
5045 /* XXX this is probably wrong for multiple downstream ports */
5046 static enum drm_connector_status
5047 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
5049 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
5050 u8 *dpcd = intel_dp->dpcd;
5053 if (WARN_ON(intel_dp_is_edp(intel_dp)))
5054 return connector_status_connected;
5057 lspcon_resume(lspcon);
5059 if (!intel_dp_get_dpcd(intel_dp))
5060 return connector_status_disconnected;
5062 /* if there's no downstream port, we're done */
5063 if (!drm_dp_is_branch(dpcd))
5064 return connector_status_connected;
5066 /* If we're HPD-aware, SINK_COUNT changes dynamically */
5067 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
5068 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5070 return intel_dp->sink_count ?
5071 connector_status_connected : connector_status_disconnected;
5074 if (intel_dp_can_mst(intel_dp))
5075 return connector_status_connected;
5077 /* If no HPD, poke DDC gently */
5078 if (drm_probe_ddc(&intel_dp->aux.ddc))
5079 return connector_status_connected;
5081 /* Well we tried, say unknown for unreliable port types */
5082 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5083 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5084 if (type == DP_DS_PORT_TYPE_VGA ||
5085 type == DP_DS_PORT_TYPE_NON_EDID)
5086 return connector_status_unknown;
5088 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5089 DP_DWN_STRM_PORT_TYPE_MASK;
5090 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5091 type == DP_DWN_STRM_PORT_TYPE_OTHER)
5092 return connector_status_unknown;
5095 /* Anything else is out of spec, warn and ignore */
5096 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
5097 return connector_status_disconnected;
5100 static enum drm_connector_status
5101 edp_detect(struct intel_dp *intel_dp)
5103 return connector_status_connected;
5106 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5108 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5111 switch (encoder->hpd_pin) {
5113 bit = SDE_PORTB_HOTPLUG;
5116 bit = SDE_PORTC_HOTPLUG;
5119 bit = SDE_PORTD_HOTPLUG;
5122 MISSING_CASE(encoder->hpd_pin);
5126 return I915_READ(SDEISR) & bit;
5129 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
5131 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5134 switch (encoder->hpd_pin) {
5136 bit = SDE_PORTB_HOTPLUG_CPT;
5139 bit = SDE_PORTC_HOTPLUG_CPT;
5142 bit = SDE_PORTD_HOTPLUG_CPT;
5145 MISSING_CASE(encoder->hpd_pin);
5149 return I915_READ(SDEISR) & bit;
5152 static bool spt_digital_port_connected(struct intel_encoder *encoder)
5154 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5157 switch (encoder->hpd_pin) {
5159 bit = SDE_PORTA_HOTPLUG_SPT;
5162 bit = SDE_PORTE_HOTPLUG_SPT;
5165 return cpt_digital_port_connected(encoder);
5168 return I915_READ(SDEISR) & bit;
5171 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
5173 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5176 switch (encoder->hpd_pin) {
5178 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5181 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5184 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5187 MISSING_CASE(encoder->hpd_pin);
5191 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5194 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5196 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5199 switch (encoder->hpd_pin) {
5201 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5204 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5207 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5210 MISSING_CASE(encoder->hpd_pin);
5214 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5217 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5219 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5221 if (encoder->hpd_pin == HPD_PORT_A)
5222 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5224 return ibx_digital_port_connected(encoder);
5227 static bool snb_digital_port_connected(struct intel_encoder *encoder)
5229 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5231 if (encoder->hpd_pin == HPD_PORT_A)
5232 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5234 return cpt_digital_port_connected(encoder);
5237 static bool ivb_digital_port_connected(struct intel_encoder *encoder)
5239 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5241 if (encoder->hpd_pin == HPD_PORT_A)
5242 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
5244 return cpt_digital_port_connected(encoder);
5247 static bool bdw_digital_port_connected(struct intel_encoder *encoder)
5249 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5251 if (encoder->hpd_pin == HPD_PORT_A)
5252 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5254 return cpt_digital_port_connected(encoder);
5257 static bool bxt_digital_port_connected(struct intel_encoder *encoder)
5259 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5262 switch (encoder->hpd_pin) {
5264 bit = BXT_DE_PORT_HP_DDIA;
5267 bit = BXT_DE_PORT_HP_DDIB;
5270 bit = BXT_DE_PORT_HP_DDIC;
5273 MISSING_CASE(encoder->hpd_pin);
5277 return I915_READ(GEN8_DE_PORT_ISR) & bit;
5280 static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5281 struct intel_digital_port *intel_dig_port)
5283 enum port port = intel_dig_port->base.port;
5285 return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5288 static bool icl_digital_port_connected(struct intel_encoder *encoder)
5290 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5291 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
5292 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
5294 if (intel_phy_is_combo(dev_priv, phy))
5295 return icl_combo_port_connected(dev_priv, dig_port);
5296 else if (intel_phy_is_tc(dev_priv, phy))
5297 return intel_tc_port_connected(dig_port);
5299 MISSING_CASE(encoder->hpd_pin);
5305 * intel_digital_port_connected - is the specified port connected?
5306 * @encoder: intel_encoder
5308 * In cases where there's a connector physically connected but it can't be used
5309 * by our hardware we also return false, since the rest of the driver should
5310 * pretty much treat the port as disconnected. This is relevant for type-C
5311 * (starting on ICL) where there's ownership involved.
5313 * Return %true if port is connected, %false otherwise.
5315 static bool __intel_digital_port_connected(struct intel_encoder *encoder)
5317 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5319 if (HAS_GMCH(dev_priv)) {
5320 if (IS_GM45(dev_priv))
5321 return gm45_digital_port_connected(encoder);
5323 return g4x_digital_port_connected(encoder);
5326 if (INTEL_GEN(dev_priv) >= 11)
5327 return icl_digital_port_connected(encoder);
5328 else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
5329 return spt_digital_port_connected(encoder);
5330 else if (IS_GEN9_LP(dev_priv))
5331 return bxt_digital_port_connected(encoder);
5332 else if (IS_GEN(dev_priv, 8))
5333 return bdw_digital_port_connected(encoder);
5334 else if (IS_GEN(dev_priv, 7))
5335 return ivb_digital_port_connected(encoder);
5336 else if (IS_GEN(dev_priv, 6))
5337 return snb_digital_port_connected(encoder);
5338 else if (IS_GEN(dev_priv, 5))
5339 return ilk_digital_port_connected(encoder);
5341 MISSING_CASE(INTEL_GEN(dev_priv));
5345 bool intel_digital_port_connected(struct intel_encoder *encoder)
5347 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5348 bool is_connected = false;
5349 intel_wakeref_t wakeref;
5351 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
5352 is_connected = __intel_digital_port_connected(encoder);
5354 return is_connected;
5357 static struct edid *
5358 intel_dp_get_edid(struct intel_dp *intel_dp)
5360 struct intel_connector *intel_connector = intel_dp->attached_connector;
5362 /* use cached edid if we have one */
5363 if (intel_connector->edid) {
5365 if (IS_ERR(intel_connector->edid))
5368 return drm_edid_duplicate(intel_connector->edid);
5370 return drm_get_edid(&intel_connector->base,
5371 &intel_dp->aux.ddc);
5375 intel_dp_set_edid(struct intel_dp *intel_dp)
5377 struct intel_connector *intel_connector = intel_dp->attached_connector;
5380 intel_dp_unset_edid(intel_dp);
5381 edid = intel_dp_get_edid(intel_dp);
5382 intel_connector->detect_edid = edid;
5384 intel_dp->has_audio = drm_detect_monitor_audio(edid);
5385 drm_dp_cec_set_edid(&intel_dp->aux, edid);
5389 intel_dp_unset_edid(struct intel_dp *intel_dp)
5391 struct intel_connector *intel_connector = intel_dp->attached_connector;
5393 drm_dp_cec_unset_edid(&intel_dp->aux);
5394 kfree(intel_connector->detect_edid);
5395 intel_connector->detect_edid = NULL;
5397 intel_dp->has_audio = false;
5401 intel_dp_detect(struct drm_connector *connector,
5402 struct drm_modeset_acquire_ctx *ctx,
5405 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5406 struct intel_dp *intel_dp = intel_attached_dp(connector);
5407 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5408 struct intel_encoder *encoder = &dig_port->base;
5409 enum drm_connector_status status;
5411 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5412 connector->base.id, connector->name);
5413 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5415 /* Can't disconnect eDP */
5416 if (intel_dp_is_edp(intel_dp))
5417 status = edp_detect(intel_dp);
5418 else if (intel_digital_port_connected(encoder))
5419 status = intel_dp_detect_dpcd(intel_dp);
5421 status = connector_status_disconnected;
5423 if (status == connector_status_disconnected) {
5424 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5425 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5427 if (intel_dp->is_mst) {
5428 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5430 intel_dp->mst_mgr.mst_state);
5431 intel_dp->is_mst = false;
5432 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5439 if (intel_dp->reset_link_params) {
5440 /* Initial max link lane count */
5441 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5443 /* Initial max link rate */
5444 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5446 intel_dp->reset_link_params = false;
5449 intel_dp_print_rates(intel_dp);
5451 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5452 if (INTEL_GEN(dev_priv) >= 11)
5453 intel_dp_get_dsc_sink_cap(intel_dp);
5455 intel_dp_configure_mst(intel_dp);
5457 if (intel_dp->is_mst) {
5459 * If we are in MST mode then this connector
5460 * won't appear connected or have anything
5463 status = connector_status_disconnected;
5468 * Some external monitors do not signal loss of link synchronization
5469 * with an IRQ_HPD, so force a link status check.
5471 if (!intel_dp_is_edp(intel_dp)) {
5474 ret = intel_dp_retrain_link(encoder, ctx);
5480 * Clearing NACK and defer counts to get their exact values
5481 * while reading EDID which are required by Compliance tests
5482 * 4.2.2.4 and 4.2.2.5
5484 intel_dp->aux.i2c_nack_count = 0;
5485 intel_dp->aux.i2c_defer_count = 0;
5487 intel_dp_set_edid(intel_dp);
5488 if (intel_dp_is_edp(intel_dp) ||
5489 to_intel_connector(connector)->detect_edid)
5490 status = connector_status_connected;
5492 intel_dp_check_service_irq(intel_dp);
5495 if (status != connector_status_connected && !intel_dp->is_mst)
5496 intel_dp_unset_edid(intel_dp);
5502 intel_dp_force(struct drm_connector *connector)
5504 struct intel_dp *intel_dp = intel_attached_dp(connector);
5505 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5506 struct intel_encoder *intel_encoder = &dig_port->base;
5507 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5508 enum intel_display_power_domain aux_domain =
5509 intel_aux_power_domain(dig_port);
5510 intel_wakeref_t wakeref;
5512 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5513 connector->base.id, connector->name);
5514 intel_dp_unset_edid(intel_dp);
5516 if (connector->status != connector_status_connected)
5519 wakeref = intel_display_power_get(dev_priv, aux_domain);
5521 intel_dp_set_edid(intel_dp);
5523 intel_display_power_put(dev_priv, aux_domain, wakeref);
5526 static int intel_dp_get_modes(struct drm_connector *connector)
5528 struct intel_connector *intel_connector = to_intel_connector(connector);
5531 edid = intel_connector->detect_edid;
5533 int ret = intel_connector_update_modes(connector, edid);
5538 /* if eDP has no EDID, fall back to fixed mode */
5539 if (intel_dp_is_edp(intel_attached_dp(connector)) &&
5540 intel_connector->panel.fixed_mode) {
5541 struct drm_display_mode *mode;
5543 mode = drm_mode_duplicate(connector->dev,
5544 intel_connector->panel.fixed_mode);
5546 drm_mode_probed_add(connector, mode);
5555 intel_dp_connector_register(struct drm_connector *connector)
5557 struct intel_dp *intel_dp = intel_attached_dp(connector);
5558 struct drm_device *dev = connector->dev;
5561 ret = intel_connector_register(connector);
5565 i915_debugfs_connector_add(connector);
5567 DRM_DEBUG_KMS("registering %s bus for %s\n",
5568 intel_dp->aux.name, connector->kdev->kobj.name);
5570 intel_dp->aux.dev = connector->kdev;
5571 ret = drm_dp_aux_register(&intel_dp->aux);
5573 drm_dp_cec_register_connector(&intel_dp->aux,
5574 connector->name, dev->dev);
5579 intel_dp_connector_unregister(struct drm_connector *connector)
5581 struct intel_dp *intel_dp = intel_attached_dp(connector);
5583 drm_dp_cec_unregister_connector(&intel_dp->aux);
5584 drm_dp_aux_unregister(&intel_dp->aux);
5585 intel_connector_unregister(connector);
5588 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5590 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5591 struct intel_dp *intel_dp = &intel_dig_port->dp;
5593 intel_dp_mst_encoder_cleanup(intel_dig_port);
5594 if (intel_dp_is_edp(intel_dp)) {
5595 intel_wakeref_t wakeref;
5597 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5599 * vdd might still be enabled do to the delayed vdd off.
5600 * Make sure vdd is actually turned off here.
5602 with_pps_lock(intel_dp, wakeref)
5603 edp_panel_vdd_off_sync(intel_dp);
5605 if (intel_dp->edp_notifier.notifier_call) {
5606 unregister_reboot_notifier(&intel_dp->edp_notifier);
5607 intel_dp->edp_notifier.notifier_call = NULL;
5611 intel_dp_aux_fini(intel_dp);
5614 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5616 intel_dp_encoder_flush_work(encoder);
5618 drm_encoder_cleanup(encoder);
5619 kfree(enc_to_dig_port(encoder));
5622 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5624 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5625 intel_wakeref_t wakeref;
5627 if (!intel_dp_is_edp(intel_dp))
5631 * vdd might still be enabled do to the delayed vdd off.
5632 * Make sure vdd is actually turned off here.
5634 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5635 with_pps_lock(intel_dp, wakeref)
5636 edp_panel_vdd_off_sync(intel_dp);
5639 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5643 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5644 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5645 msecs_to_jiffies(timeout));
5648 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5652 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5655 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5656 static const struct drm_dp_aux_msg msg = {
5657 .request = DP_AUX_NATIVE_WRITE,
5658 .address = DP_AUX_HDCP_AKSV,
5659 .size = DRM_HDCP_KSV_LEN,
5661 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5665 /* Output An first, that's easy */
5666 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5667 an, DRM_HDCP_AN_LEN);
5668 if (dpcd_ret != DRM_HDCP_AN_LEN) {
5669 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5671 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5675 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5676 * order to get it on the wire, we need to create the AUX header as if
5677 * we were writing the data, and then tickle the hardware to output the
5678 * data once the header is sent out.
5680 intel_dp_aux_header(txbuf, &msg);
5682 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5683 rxbuf, sizeof(rxbuf),
5684 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5686 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5688 } else if (ret == 0) {
5689 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5693 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5694 if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5695 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5702 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5706 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5708 if (ret != DRM_HDCP_KSV_LEN) {
5709 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5710 return ret >= 0 ? -EIO : ret;
5715 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5720 * For some reason the HDMI and DP HDCP specs call this register
5721 * definition by different names. In the HDMI spec, it's called BSTATUS,
5722 * but in DP it's called BINFO.
5724 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5725 bstatus, DRM_HDCP_BSTATUS_LEN);
5726 if (ret != DRM_HDCP_BSTATUS_LEN) {
5727 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5728 return ret >= 0 ? -EIO : ret;
5734 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5739 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5742 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5743 return ret >= 0 ? -EIO : ret;
5750 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5751 bool *repeater_present)
5756 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5760 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5765 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5769 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5770 ri_prime, DRM_HDCP_RI_LEN);
5771 if (ret != DRM_HDCP_RI_LEN) {
5772 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5773 return ret >= 0 ? -EIO : ret;
5779 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5784 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5787 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5788 return ret >= 0 ? -EIO : ret;
5790 *ksv_ready = bstatus & DP_BSTATUS_READY;
5795 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5796 int num_downstream, u8 *ksv_fifo)
5801 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5802 for (i = 0; i < num_downstream; i += 3) {
5803 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5804 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5805 DP_AUX_HDCP_KSV_FIFO,
5806 ksv_fifo + i * DRM_HDCP_KSV_LEN,
5809 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5811 return ret >= 0 ? -EIO : ret;
5818 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5823 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5826 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5827 DP_AUX_HDCP_V_PRIME(i), part,
5828 DRM_HDCP_V_PRIME_PART_LEN);
5829 if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5830 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5831 return ret >= 0 ? -EIO : ret;
5837 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5840 /* Not used for single stream DisplayPort setups */
5845 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5850 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5853 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5857 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5861 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
5867 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5871 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
5875 struct hdcp2_dp_errata_stream_type {
5880 struct hdcp2_dp_msg_data {
5883 bool msg_detectable;
5885 u32 timeout2; /* Added for non_paired situation */
5888 static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
5889 { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
5890 { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
5891 false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
5892 { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
5894 { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
5896 { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
5897 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
5898 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
5899 { HDCP_2_2_AKE_SEND_PAIRING_INFO,
5900 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
5901 HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
5902 { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
5903 { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
5904 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
5905 { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
5907 { HDCP_2_2_REP_SEND_RECVID_LIST,
5908 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
5909 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
5910 { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
5912 { HDCP_2_2_REP_STREAM_MANAGE,
5913 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
5915 { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
5916 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
5917 /* local define to shovel this through the write_2_2 interface */
5918 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
5919 { HDCP_2_2_ERRATA_DP_STREAM_TYPE,
5920 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
5925 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
5930 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5931 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
5932 HDCP_2_2_DP_RXSTATUS_LEN);
5933 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
5934 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5935 return ret >= 0 ? -EIO : ret;
5942 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
5943 u8 msg_id, bool *msg_ready)
5949 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
5954 case HDCP_2_2_AKE_SEND_HPRIME:
5955 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
5958 case HDCP_2_2_AKE_SEND_PAIRING_INFO:
5959 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
5962 case HDCP_2_2_REP_SEND_RECVID_LIST:
5963 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
5967 DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
5975 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
5976 const struct hdcp2_dp_msg_data *hdcp2_msg_data)
5978 struct intel_dp *dp = &intel_dig_port->dp;
5979 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
5980 u8 msg_id = hdcp2_msg_data->msg_id;
5982 bool msg_ready = false;
5984 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
5985 timeout = hdcp2_msg_data->timeout2;
5987 timeout = hdcp2_msg_data->timeout;
5990 * There is no way to detect the CERT, LPRIME and STREAM_READY
5991 * availability. So Wait for timeout and read the msg.
5993 if (!hdcp2_msg_data->msg_detectable) {
5998 * As we want to check the msg availability at timeout, Ignoring
5999 * the timeout at wait for CP_IRQ.
6001 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
6002 ret = hdcp2_detect_msg_availability(intel_dig_port,
6003 msg_id, &msg_ready);
6009 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
6010 hdcp2_msg_data->msg_id, ret, timeout);
6015 static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
6019 for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
6020 if (hdcp2_dp_msg_data[i].msg_id == msg_id)
6021 return &hdcp2_dp_msg_data[i];
6027 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
6028 void *buf, size_t size)
6030 struct intel_dp *dp = &intel_dig_port->dp;
6031 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
6032 unsigned int offset;
6034 ssize_t ret, bytes_to_write, len;
6035 const struct hdcp2_dp_msg_data *hdcp2_msg_data;
6037 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
6038 if (!hdcp2_msg_data)
6041 offset = hdcp2_msg_data->offset;
6043 /* No msg_id in DP HDCP2.2 msgs */
6044 bytes_to_write = size - 1;
6047 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
6049 while (bytes_to_write) {
6050 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
6051 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
6053 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
6054 offset, (void *)byte, len);
6058 bytes_to_write -= ret;
6067 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
6069 u8 rx_info[HDCP_2_2_RXINFO_LEN];
6073 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6074 DP_HDCP_2_2_REG_RXINFO_OFFSET,
6075 (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6076 if (ret != HDCP_2_2_RXINFO_LEN)
6077 return ret >= 0 ? -EIO : ret;
6079 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6080 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6082 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6083 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6085 ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6086 HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6087 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6093 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6094 u8 msg_id, void *buf, size_t size)
6096 unsigned int offset;
6098 ssize_t ret, bytes_to_recv, len;
6099 const struct hdcp2_dp_msg_data *hdcp2_msg_data;
6101 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6102 if (!hdcp2_msg_data)
6104 offset = hdcp2_msg_data->offset;
6106 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6110 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6111 ret = get_receiver_id_list_size(intel_dig_port);
6117 bytes_to_recv = size - 1;
6119 /* DP adaptation msgs has no msg_id */
6122 while (bytes_to_recv) {
6123 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6124 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6126 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6129 DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6133 bytes_to_recv -= ret;
6144 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6145 bool is_repeater, u8 content_type)
6147 struct hdcp2_dp_errata_stream_type stream_type_msg;
6153 * Errata for DP: As Stream type is used for encryption, Receiver
6154 * should be communicated with stream type for the decryption of the
6156 * Repeater will be communicated with stream type as a part of it's
6157 * auth later in time.
6159 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6160 stream_type_msg.stream_type = content_type;
6162 return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
6163 sizeof(stream_type_msg));
6167 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6172 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6176 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6177 ret = HDCP_REAUTH_REQUEST;
6178 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6179 ret = HDCP_LINK_INTEGRITY_FAILURE;
6180 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6181 ret = HDCP_TOPOLOGY_CHANGE;
6187 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6194 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6195 DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6196 rx_caps, HDCP_2_2_RXCAPS_LEN);
6197 if (ret != HDCP_2_2_RXCAPS_LEN)
6198 return ret >= 0 ? -EIO : ret;
6200 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6201 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6207 static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6208 .write_an_aksv = intel_dp_hdcp_write_an_aksv,
6209 .read_bksv = intel_dp_hdcp_read_bksv,
6210 .read_bstatus = intel_dp_hdcp_read_bstatus,
6211 .repeater_present = intel_dp_hdcp_repeater_present,
6212 .read_ri_prime = intel_dp_hdcp_read_ri_prime,
6213 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6214 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6215 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6216 .toggle_signalling = intel_dp_hdcp_toggle_signalling,
6217 .check_link = intel_dp_hdcp_check_link,
6218 .hdcp_capable = intel_dp_hdcp_capable,
6219 .write_2_2_msg = intel_dp_hdcp2_write_msg,
6220 .read_2_2_msg = intel_dp_hdcp2_read_msg,
6221 .config_stream_type = intel_dp_hdcp2_config_stream_type,
6222 .check_2_2_link = intel_dp_hdcp2_check_link,
6223 .hdcp_2_2_capable = intel_dp_hdcp2_capable,
6224 .protocol = HDCP_PROTOCOL_DP,
6227 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6229 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6230 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6232 lockdep_assert_held(&dev_priv->pps_mutex);
6234 if (!edp_have_panel_vdd(intel_dp))
6238 * The VDD bit needs a power domain reference, so if the bit is
6239 * already enabled when we boot or resume, grab this reference and
6240 * schedule a vdd off, so we don't hold on to the reference
6243 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6244 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
6246 edp_panel_vdd_schedule_off(intel_dp);
6249 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6251 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6252 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6255 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6256 encoder->port, &pipe))
6259 return INVALID_PIPE;
6262 void intel_dp_encoder_reset(struct drm_encoder *encoder)
6264 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6265 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6266 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
6267 intel_wakeref_t wakeref;
6269 if (!HAS_DDI(dev_priv))
6270 intel_dp->DP = I915_READ(intel_dp->output_reg);
6273 lspcon_resume(lspcon);
6275 intel_dp->reset_link_params = true;
6277 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
6278 !intel_dp_is_edp(intel_dp))
6281 with_pps_lock(intel_dp, wakeref) {
6282 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6283 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6285 if (intel_dp_is_edp(intel_dp)) {
6287 * Reinit the power sequencer, in case BIOS did
6288 * something nasty with it.
6290 intel_dp_pps_init(intel_dp);
6291 intel_edp_panel_vdd_sanitize(intel_dp);
6296 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6297 .force = intel_dp_force,
6298 .fill_modes = drm_helper_probe_single_connector_modes,
6299 .atomic_get_property = intel_digital_connector_atomic_get_property,
6300 .atomic_set_property = intel_digital_connector_atomic_set_property,
6301 .late_register = intel_dp_connector_register,
6302 .early_unregister = intel_dp_connector_unregister,
6303 .destroy = intel_connector_destroy,
6304 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6305 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6308 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6309 .detect_ctx = intel_dp_detect,
6310 .get_modes = intel_dp_get_modes,
6311 .mode_valid = intel_dp_mode_valid,
6312 .atomic_check = intel_digital_connector_atomic_check,
6315 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6316 .reset = intel_dp_encoder_reset,
6317 .destroy = intel_dp_encoder_destroy,
6321 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6323 struct intel_dp *intel_dp = &intel_dig_port->dp;
6325 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
6327 * vdd off can generate a long pulse on eDP which
6328 * would require vdd on to handle it, and thus we
6329 * would end up in an endless cycle of
6330 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6332 DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n",
6333 intel_dig_port->base.base.base.id,
6334 intel_dig_port->base.base.name);
6338 DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n",
6339 intel_dig_port->base.base.base.id,
6340 intel_dig_port->base.base.name,
6341 long_hpd ? "long" : "short");
6344 intel_dp->reset_link_params = true;
6348 if (intel_dp->is_mst) {
6349 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6351 * If we were in MST mode, and device is not
6352 * there, get out of MST mode
6354 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6355 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6356 intel_dp->is_mst = false;
6357 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6364 if (!intel_dp->is_mst) {
6367 handled = intel_dp_short_pulse(intel_dp);
6376 /* check the VBT to see whether the eDP is on another port */
6377 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6380 * eDP not supported on g4x. so bail out early just
6381 * for a bit extra safety in case the VBT is bonkers.
6383 if (INTEL_GEN(dev_priv) < 5)
6386 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6389 return intel_bios_is_port_edp(dev_priv, port);
6393 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6395 struct drm_i915_private *dev_priv = to_i915(connector->dev);
6396 enum port port = dp_to_dig_port(intel_dp)->base.port;
6398 if (!IS_G4X(dev_priv) && port != PORT_A)
6399 intel_attach_force_audio_property(connector);
6401 intel_attach_broadcast_rgb_property(connector);
6402 if (HAS_GMCH(dev_priv))
6403 drm_connector_attach_max_bpc_property(connector, 6, 10);
6404 else if (INTEL_GEN(dev_priv) >= 5)
6405 drm_connector_attach_max_bpc_property(connector, 6, 12);
6407 if (intel_dp_is_edp(intel_dp)) {
6408 u32 allowed_scalers;
6410 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6411 if (!HAS_GMCH(dev_priv))
6412 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6414 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6416 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6421 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6423 intel_dp->panel_power_off_time = ktime_get_boottime();
6424 intel_dp->last_power_on = jiffies;
6425 intel_dp->last_backlight_off = jiffies;
6429 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6431 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6432 u32 pp_on, pp_off, pp_ctl;
6433 struct pps_registers regs;
6435 intel_pps_get_registers(intel_dp, ®s);
6437 pp_ctl = ironlake_get_pp_control(intel_dp);
6439 /* Ensure PPS is unlocked */
6440 if (!HAS_DDI(dev_priv))
6441 I915_WRITE(regs.pp_ctrl, pp_ctl);
6443 pp_on = I915_READ(regs.pp_on);
6444 pp_off = I915_READ(regs.pp_off);
6446 /* Pull timing values out of registers */
6447 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6448 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6449 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6450 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
6452 if (i915_mmio_reg_valid(regs.pp_div)) {
6455 pp_div = I915_READ(regs.pp_div);
6457 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
6459 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
6464 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6466 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6468 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6472 intel_pps_verify_state(struct intel_dp *intel_dp)
6474 struct edp_power_seq hw;
6475 struct edp_power_seq *sw = &intel_dp->pps_delays;
6477 intel_pps_readout_hw_state(intel_dp, &hw);
6479 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6480 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6481 DRM_ERROR("PPS state mismatch\n");
6482 intel_pps_dump_state("sw", sw);
6483 intel_pps_dump_state("hw", &hw);
6488 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6490 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6491 struct edp_power_seq cur, vbt, spec,
6492 *final = &intel_dp->pps_delays;
6494 lockdep_assert_held(&dev_priv->pps_mutex);
6496 /* already initialized? */
6497 if (final->t11_t12 != 0)
6500 intel_pps_readout_hw_state(intel_dp, &cur);
6502 intel_pps_dump_state("cur", &cur);
6504 vbt = dev_priv->vbt.edp.pps;
6505 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6506 * of 500ms appears to be too short. Ocassionally the panel
6507 * just fails to power back on. Increasing the delay to 800ms
6508 * seems sufficient to avoid this problem.
6510 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6511 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6512 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6515 /* T11_T12 delay is special and actually in units of 100ms, but zero
6516 * based in the hw (so we need to add 100 ms). But the sw vbt
6517 * table multiplies it with 1000 to make it in units of 100usec,
6519 vbt.t11_t12 += 100 * 10;
6521 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6522 * our hw here, which are all in 100usec. */
6523 spec.t1_t3 = 210 * 10;
6524 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6525 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6526 spec.t10 = 500 * 10;
6527 /* This one is special and actually in units of 100ms, but zero
6528 * based in the hw (so we need to add 100 ms). But the sw vbt
6529 * table multiplies it with 1000 to make it in units of 100usec,
6531 spec.t11_t12 = (510 + 100) * 10;
6533 intel_pps_dump_state("vbt", &vbt);
6535 /* Use the max of the register settings and vbt. If both are
6536 * unset, fall back to the spec limits. */
6537 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
6539 max(cur.field, vbt.field))
6540 assign_final(t1_t3);
6544 assign_final(t11_t12);
6547 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
6548 intel_dp->panel_power_up_delay = get_delay(t1_t3);
6549 intel_dp->backlight_on_delay = get_delay(t8);
6550 intel_dp->backlight_off_delay = get_delay(t9);
6551 intel_dp->panel_power_down_delay = get_delay(t10);
6552 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6555 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6556 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
6557 intel_dp->panel_power_cycle_delay);
6559 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6560 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
6563 * We override the HW backlight delays to 1 because we do manual waits
6564 * on them. For T8, even BSpec recommends doing it. For T9, if we
6565 * don't do this, we'll end up waiting for the backlight off delay
6566 * twice: once when we do the manual sleep, and once when we disable
6567 * the panel and wait for the PP_STATUS bit to become zero.
6573 * HW has only a 100msec granularity for t11_t12 so round it up
6576 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
6580 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
6581 bool force_disable_vdd)
6583 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6584 u32 pp_on, pp_off, port_sel = 0;
6585 int div = dev_priv->rawclk_freq / 1000;
6586 struct pps_registers regs;
6587 enum port port = dp_to_dig_port(intel_dp)->base.port;
6588 const struct edp_power_seq *seq = &intel_dp->pps_delays;
6590 lockdep_assert_held(&dev_priv->pps_mutex);
6592 intel_pps_get_registers(intel_dp, ®s);
6595 * On some VLV machines the BIOS can leave the VDD
6596 * enabled even on power sequencers which aren't
6597 * hooked up to any port. This would mess up the
6598 * power domain tracking the first time we pick
6599 * one of these power sequencers for use since
6600 * edp_panel_vdd_on() would notice that the VDD was
6601 * already on and therefore wouldn't grab the power
6602 * domain reference. Disable VDD first to avoid this.
6603 * This also avoids spuriously turning the VDD on as
6604 * soon as the new power sequencer gets initialized.
6606 if (force_disable_vdd) {
6607 u32 pp = ironlake_get_pp_control(intel_dp);
6609 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
6611 if (pp & EDP_FORCE_VDD)
6612 DRM_DEBUG_KMS("VDD already on, disabling first\n");
6614 pp &= ~EDP_FORCE_VDD;
6616 I915_WRITE(regs.pp_ctrl, pp);
6619 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6620 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6621 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6622 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
6624 /* Haswell doesn't have any port selection bits for the panel
6625 * power sequencer any more. */
6626 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6627 port_sel = PANEL_PORT_SELECT_VLV(port);
6628 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
6631 port_sel = PANEL_PORT_SELECT_DPA;
6634 port_sel = PANEL_PORT_SELECT_DPC;
6637 port_sel = PANEL_PORT_SELECT_DPD;
6647 I915_WRITE(regs.pp_on, pp_on);
6648 I915_WRITE(regs.pp_off, pp_off);
6651 * Compute the divisor for the pp clock, simply match the Bspec formula.
6653 if (i915_mmio_reg_valid(regs.pp_div)) {
6654 I915_WRITE(regs.pp_div,
6655 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
6656 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
6660 pp_ctl = I915_READ(regs.pp_ctrl);
6661 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
6662 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
6663 I915_WRITE(regs.pp_ctrl, pp_ctl);
6666 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6667 I915_READ(regs.pp_on),
6668 I915_READ(regs.pp_off),
6669 i915_mmio_reg_valid(regs.pp_div) ?
6670 I915_READ(regs.pp_div) :
6671 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
6674 static void intel_dp_pps_init(struct intel_dp *intel_dp)
6676 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6678 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6679 vlv_initial_power_sequencer_setup(intel_dp);
6681 intel_dp_init_panel_power_sequencer(intel_dp);
6682 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6687 * intel_dp_set_drrs_state - program registers for RR switch to take effect
6688 * @dev_priv: i915 device
6689 * @crtc_state: a pointer to the active intel_crtc_state
6690 * @refresh_rate: RR to be programmed
6692 * This function gets called when refresh rate (RR) has to be changed from
6693 * one frequency to another. Switches can be between high and low RR
6694 * supported by the panel or to any other RR based on media playback (in
6695 * this case, RR value needs to be passed from user space).
6697 * The caller of this function needs to take a lock on dev_priv->drrs.
6699 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6700 const struct intel_crtc_state *crtc_state,
6703 struct intel_dp *intel_dp = dev_priv->drrs.dp;
6704 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
6705 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6707 if (refresh_rate <= 0) {
6708 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
6712 if (intel_dp == NULL) {
6713 DRM_DEBUG_KMS("DRRS not supported.\n");
6718 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
6722 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6723 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6727 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
6729 index = DRRS_LOW_RR;
6731 if (index == dev_priv->drrs.refresh_rate_type) {
6733 "DRRS requested for previously set RR...ignoring\n");
6737 if (!crtc_state->base.active) {
6738 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
6742 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6745 intel_dp_set_m_n(crtc_state, M1_N1);
6748 intel_dp_set_m_n(crtc_state, M2_N2);
6752 DRM_ERROR("Unsupported refreshrate type\n");
6754 } else if (INTEL_GEN(dev_priv) > 6) {
6755 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6758 val = I915_READ(reg);
6759 if (index > DRRS_HIGH_RR) {
6760 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6761 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6763 val |= PIPECONF_EDP_RR_MODE_SWITCH;
6765 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6766 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6768 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6770 I915_WRITE(reg, val);
6773 dev_priv->drrs.refresh_rate_type = index;
6775 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
6779 * intel_edp_drrs_enable - init drrs struct if supported
6780 * @intel_dp: DP struct
6781 * @crtc_state: A pointer to the active crtc state.
6783 * Initializes frontbuffer_bits and drrs.dp
6785 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6786 const struct intel_crtc_state *crtc_state)
6788 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6790 if (!crtc_state->has_drrs) {
6791 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6795 if (dev_priv->psr.enabled) {
6796 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6800 mutex_lock(&dev_priv->drrs.mutex);
6801 if (dev_priv->drrs.dp) {
6802 DRM_DEBUG_KMS("DRRS already enabled\n");
6806 dev_priv->drrs.busy_frontbuffer_bits = 0;
6808 dev_priv->drrs.dp = intel_dp;
6811 mutex_unlock(&dev_priv->drrs.mutex);
6815 * intel_edp_drrs_disable - Disable DRRS
6816 * @intel_dp: DP struct
6817 * @old_crtc_state: Pointer to old crtc_state.
6820 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6821 const struct intel_crtc_state *old_crtc_state)
6823 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6825 if (!old_crtc_state->has_drrs)
6828 mutex_lock(&dev_priv->drrs.mutex);
6829 if (!dev_priv->drrs.dp) {
6830 mutex_unlock(&dev_priv->drrs.mutex);
6834 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6835 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
6836 intel_dp->attached_connector->panel.fixed_mode->vrefresh);
6838 dev_priv->drrs.dp = NULL;
6839 mutex_unlock(&dev_priv->drrs.mutex);
6841 cancel_delayed_work_sync(&dev_priv->drrs.work);
6844 static void intel_edp_drrs_downclock_work(struct work_struct *work)
6846 struct drm_i915_private *dev_priv =
6847 container_of(work, typeof(*dev_priv), drrs.work.work);
6848 struct intel_dp *intel_dp;
6850 mutex_lock(&dev_priv->drrs.mutex);
6852 intel_dp = dev_priv->drrs.dp;
6858 * The delayed work can race with an invalidate hence we need to
6862 if (dev_priv->drrs.busy_frontbuffer_bits)
6865 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
6866 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6868 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6869 intel_dp->attached_connector->panel.downclock_mode->vrefresh);
6873 mutex_unlock(&dev_priv->drrs.mutex);
6877 * intel_edp_drrs_invalidate - Disable Idleness DRRS
6878 * @dev_priv: i915 device
6879 * @frontbuffer_bits: frontbuffer plane tracking bits
6881 * This function gets called everytime rendering on the given planes start.
6882 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
6884 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6886 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
6887 unsigned int frontbuffer_bits)
6889 struct drm_crtc *crtc;
6892 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6895 cancel_delayed_work(&dev_priv->drrs.work);
6897 mutex_lock(&dev_priv->drrs.mutex);
6898 if (!dev_priv->drrs.dp) {
6899 mutex_unlock(&dev_priv->drrs.mutex);
6903 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6904 pipe = to_intel_crtc(crtc)->pipe;
6906 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6907 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
6909 /* invalidate means busy screen hence upclock */
6910 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6911 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6912 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6914 mutex_unlock(&dev_priv->drrs.mutex);
6918 * intel_edp_drrs_flush - Restart Idleness DRRS
6919 * @dev_priv: i915 device
6920 * @frontbuffer_bits: frontbuffer plane tracking bits
6922 * This function gets called every time rendering on the given planes has
6923 * completed or flip on a crtc is completed. So DRRS should be upclocked
6924 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
6925 * if no other planes are dirty.
6927 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6929 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
6930 unsigned int frontbuffer_bits)
6932 struct drm_crtc *crtc;
6935 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6938 cancel_delayed_work(&dev_priv->drrs.work);
6940 mutex_lock(&dev_priv->drrs.mutex);
6941 if (!dev_priv->drrs.dp) {
6942 mutex_unlock(&dev_priv->drrs.mutex);
6946 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6947 pipe = to_intel_crtc(crtc)->pipe;
6949 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6950 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6952 /* flush means busy screen hence upclock */
6953 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6954 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6955 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6958 * flush also means no more activity hence schedule downclock, if all
6959 * other fbs are quiescent too
6961 if (!dev_priv->drrs.busy_frontbuffer_bits)
6962 schedule_delayed_work(&dev_priv->drrs.work,
6963 msecs_to_jiffies(1000));
6964 mutex_unlock(&dev_priv->drrs.mutex);
6968 * DOC: Display Refresh Rate Switching (DRRS)
6970 * Display Refresh Rate Switching (DRRS) is a power conservation feature
6971 * which enables swtching between low and high refresh rates,
6972 * dynamically, based on the usage scenario. This feature is applicable
6973 * for internal panels.
6975 * Indication that the panel supports DRRS is given by the panel EDID, which
6976 * would list multiple refresh rates for one resolution.
6978 * DRRS is of 2 types - static and seamless.
6979 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6980 * (may appear as a blink on screen) and is used in dock-undock scenario.
6981 * Seamless DRRS involves changing RR without any visual effect to the user
6982 * and can be used during normal system usage. This is done by programming
6983 * certain registers.
6985 * Support for static/seamless DRRS may be indicated in the VBT based on
6986 * inputs from the panel spec.
6988 * DRRS saves power by switching to low RR based on usage scenarios.
6990 * The implementation is based on frontbuffer tracking implementation. When
6991 * there is a disturbance on the screen triggered by user activity or a periodic
6992 * system activity, DRRS is disabled (RR is changed to high RR). When there is
6993 * no movement on screen, after a timeout of 1 second, a switch to low RR is
6996 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
6997 * and intel_edp_drrs_flush() are called.
6999 * DRRS can be further extended to support other internal panels and also
7000 * the scenario of video playback wherein RR is set based on the rate
7001 * requested by userspace.
7005 * intel_dp_drrs_init - Init basic DRRS work and mutex.
7006 * @connector: eDP connector
7007 * @fixed_mode: preferred mode of panel
7009 * This function is called only once at driver load to initialize basic
7013 * Downclock mode if panel supports it, else return NULL.
7014 * DRRS support is determined by the presence of downclock mode (apart
7015 * from VBT setting).
7017 static struct drm_display_mode *
7018 intel_dp_drrs_init(struct intel_connector *connector,
7019 struct drm_display_mode *fixed_mode)
7021 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
7022 struct drm_display_mode *downclock_mode = NULL;
7024 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
7025 mutex_init(&dev_priv->drrs.mutex);
7027 if (INTEL_GEN(dev_priv) <= 6) {
7028 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
7032 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
7033 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
7037 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
7038 if (!downclock_mode) {
7039 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
7043 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
7045 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
7046 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
7047 return downclock_mode;
7050 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
7051 struct intel_connector *intel_connector)
7053 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7054 struct drm_device *dev = &dev_priv->drm;
7055 struct drm_connector *connector = &intel_connector->base;
7056 struct drm_display_mode *fixed_mode = NULL;
7057 struct drm_display_mode *downclock_mode = NULL;
7059 enum pipe pipe = INVALID_PIPE;
7060 intel_wakeref_t wakeref;
7063 if (!intel_dp_is_edp(intel_dp))
7066 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7069 * On IBX/CPT we may get here with LVDS already registered. Since the
7070 * driver uses the only internal power sequencer available for both
7071 * eDP and LVDS bail out early in this case to prevent interfering
7072 * with an already powered-on LVDS power sequencer.
7074 if (intel_get_lvds_encoder(dev_priv)) {
7075 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7076 DRM_INFO("LVDS was detected, not registering eDP\n");
7081 with_pps_lock(intel_dp, wakeref) {
7082 intel_dp_init_panel_power_timestamps(intel_dp);
7083 intel_dp_pps_init(intel_dp);
7084 intel_edp_panel_vdd_sanitize(intel_dp);
7087 /* Cache DPCD and EDID for edp. */
7088 has_dpcd = intel_edp_init_dpcd(intel_dp);
7091 /* if this fails, presume the device is a ghost */
7092 DRM_INFO("failed to retrieve link info, disabling eDP\n");
7096 mutex_lock(&dev->mode_config.mutex);
7097 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7099 if (drm_add_edid_modes(connector, edid)) {
7100 drm_connector_update_edid_property(connector,
7104 edid = ERR_PTR(-EINVAL);
7107 edid = ERR_PTR(-ENOENT);
7109 intel_connector->edid = edid;
7111 fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7113 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7115 /* fallback to VBT if available for eDP */
7117 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7118 mutex_unlock(&dev->mode_config.mutex);
7120 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7121 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7122 register_reboot_notifier(&intel_dp->edp_notifier);
7125 * Figure out the current pipe for the initial backlight setup.
7126 * If the current pipe isn't valid, try the PPS pipe, and if that
7127 * fails just assume pipe A.
7129 pipe = vlv_active_pipe(intel_dp);
7131 if (pipe != PIPE_A && pipe != PIPE_B)
7132 pipe = intel_dp->pps_pipe;
7134 if (pipe != PIPE_A && pipe != PIPE_B)
7137 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
7141 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7142 intel_connector->panel.backlight.power = intel_edp_backlight_power;
7143 intel_panel_setup_backlight(connector, pipe);
7146 drm_connector_init_panel_orientation_property(
7147 connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
7152 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7154 * vdd might still be enabled do to the delayed vdd off.
7155 * Make sure vdd is actually turned off here.
7157 with_pps_lock(intel_dp, wakeref)
7158 edp_panel_vdd_off_sync(intel_dp);
7163 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7165 struct intel_connector *intel_connector;
7166 struct drm_connector *connector;
7168 intel_connector = container_of(work, typeof(*intel_connector),
7169 modeset_retry_work);
7170 connector = &intel_connector->base;
7171 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7174 /* Grab the locks before changing connector property*/
7175 mutex_lock(&connector->dev->mode_config.mutex);
7176 /* Set connector link status to BAD and send a Uevent to notify
7177 * userspace to do a modeset.
7179 drm_connector_set_link_status_property(connector,
7180 DRM_MODE_LINK_STATUS_BAD);
7181 mutex_unlock(&connector->dev->mode_config.mutex);
7182 /* Send Hotplug uevent so userspace can reprobe */
7183 drm_kms_helper_hotplug_event(connector->dev);
7187 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7188 struct intel_connector *intel_connector)
7190 struct drm_connector *connector = &intel_connector->base;
7191 struct intel_dp *intel_dp = &intel_dig_port->dp;
7192 struct intel_encoder *intel_encoder = &intel_dig_port->base;
7193 struct drm_device *dev = intel_encoder->base.dev;
7194 struct drm_i915_private *dev_priv = to_i915(dev);
7195 enum port port = intel_encoder->port;
7196 enum phy phy = intel_port_to_phy(dev_priv, port);
7199 /* Initialize the work for modeset in case of link train failure */
7200 INIT_WORK(&intel_connector->modeset_retry_work,
7201 intel_dp_modeset_retry_work_fn);
7203 if (WARN(intel_dig_port->max_lanes < 1,
7204 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
7205 intel_dig_port->max_lanes, intel_encoder->base.base.id,
7206 intel_encoder->base.name))
7209 intel_dp_set_source_rates(intel_dp);
7211 intel_dp->reset_link_params = true;
7212 intel_dp->pps_pipe = INVALID_PIPE;
7213 intel_dp->active_pipe = INVALID_PIPE;
7215 /* Preserve the current hw state. */
7216 intel_dp->DP = I915_READ(intel_dp->output_reg);
7217 intel_dp->attached_connector = intel_connector;
7219 if (intel_dp_is_port_edp(dev_priv, port)) {
7221 * Currently we don't support eDP on TypeC ports, although in
7222 * theory it could work on TypeC legacy ports.
7224 WARN_ON(intel_phy_is_tc(dev_priv, phy));
7225 type = DRM_MODE_CONNECTOR_eDP;
7227 type = DRM_MODE_CONNECTOR_DisplayPort;
7230 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7231 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7234 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7235 * for DP the encoder type can be set by the caller to
7236 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7238 if (type == DRM_MODE_CONNECTOR_eDP)
7239 intel_encoder->type = INTEL_OUTPUT_EDP;
7241 /* eDP only on port B and/or C on vlv/chv */
7242 if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7243 intel_dp_is_edp(intel_dp) &&
7244 port != PORT_B && port != PORT_C))
7247 DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n",
7248 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7249 intel_encoder->base.base.id, intel_encoder->base.name);
7251 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7252 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7254 if (!HAS_GMCH(dev_priv))
7255 connector->interlace_allowed = true;
7256 connector->doublescan_allowed = 0;
7258 if (INTEL_GEN(dev_priv) >= 11)
7259 connector->ycbcr_420_allowed = true;
7261 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7263 intel_dp_aux_init(intel_dp);
7265 intel_connector_attach_encoder(intel_connector, intel_encoder);
7267 if (HAS_DDI(dev_priv))
7268 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7270 intel_connector->get_hw_state = intel_connector_get_hw_state;
7272 /* init MST on ports that can support it */
7273 if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
7274 (port == PORT_B || port == PORT_C ||
7275 port == PORT_D || port == PORT_F))
7276 intel_dp_mst_encoder_init(intel_dig_port,
7277 intel_connector->base.base.id);
7279 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7280 intel_dp_aux_fini(intel_dp);
7281 intel_dp_mst_encoder_cleanup(intel_dig_port);
7285 intel_dp_add_properties(intel_dp, connector);
7287 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7288 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7290 DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
7293 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7294 * 0xd. Failure to do so will result in spurious interrupts being
7295 * generated on the port when a cable is not attached.
7297 if (IS_G45(dev_priv)) {
7298 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
7299 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
7305 drm_connector_cleanup(connector);
7310 bool intel_dp_init(struct drm_i915_private *dev_priv,
7311 i915_reg_t output_reg,
7314 struct intel_digital_port *intel_dig_port;
7315 struct intel_encoder *intel_encoder;
7316 struct drm_encoder *encoder;
7317 struct intel_connector *intel_connector;
7319 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
7320 if (!intel_dig_port)
7323 intel_connector = intel_connector_alloc();
7324 if (!intel_connector)
7325 goto err_connector_alloc;
7327 intel_encoder = &intel_dig_port->base;
7328 encoder = &intel_encoder->base;
7330 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7331 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7332 "DP %c", port_name(port)))
7333 goto err_encoder_init;
7335 intel_encoder->hotplug = intel_dp_hotplug;
7336 intel_encoder->compute_config = intel_dp_compute_config;
7337 intel_encoder->get_hw_state = intel_dp_get_hw_state;
7338 intel_encoder->get_config = intel_dp_get_config;
7339 intel_encoder->update_pipe = intel_panel_update_backlight;
7340 intel_encoder->suspend = intel_dp_encoder_suspend;
7341 if (IS_CHERRYVIEW(dev_priv)) {
7342 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7343 intel_encoder->pre_enable = chv_pre_enable_dp;
7344 intel_encoder->enable = vlv_enable_dp;
7345 intel_encoder->disable = vlv_disable_dp;
7346 intel_encoder->post_disable = chv_post_disable_dp;
7347 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7348 } else if (IS_VALLEYVIEW(dev_priv)) {
7349 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7350 intel_encoder->pre_enable = vlv_pre_enable_dp;
7351 intel_encoder->enable = vlv_enable_dp;
7352 intel_encoder->disable = vlv_disable_dp;
7353 intel_encoder->post_disable = vlv_post_disable_dp;
7355 intel_encoder->pre_enable = g4x_pre_enable_dp;
7356 intel_encoder->enable = g4x_enable_dp;
7357 intel_encoder->disable = g4x_disable_dp;
7358 intel_encoder->post_disable = g4x_post_disable_dp;
7361 intel_dig_port->dp.output_reg = output_reg;
7362 intel_dig_port->max_lanes = 4;
7364 intel_encoder->type = INTEL_OUTPUT_DP;
7365 intel_encoder->power_domain = intel_port_to_power_domain(port);
7366 if (IS_CHERRYVIEW(dev_priv)) {
7368 intel_encoder->crtc_mask = BIT(PIPE_C);
7370 intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B);
7372 intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
7374 intel_encoder->cloneable = 0;
7375 intel_encoder->port = port;
7377 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
7380 intel_infoframe_init(intel_dig_port);
7382 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7383 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7384 goto err_init_connector;
7389 drm_encoder_cleanup(encoder);
7391 kfree(intel_connector);
7392 err_connector_alloc:
7393 kfree(intel_dig_port);
7397 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7399 struct intel_encoder *encoder;
7401 for_each_intel_encoder(&dev_priv->drm, encoder) {
7402 struct intel_dp *intel_dp;
7404 if (encoder->type != INTEL_OUTPUT_DDI)
7407 intel_dp = enc_to_intel_dp(&encoder->base);
7409 if (!intel_dp->can_mst)
7412 if (intel_dp->is_mst)
7413 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7417 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7419 struct intel_encoder *encoder;
7421 for_each_intel_encoder(&dev_priv->drm, encoder) {
7422 struct intel_dp *intel_dp;
7425 if (encoder->type != INTEL_OUTPUT_DDI)
7428 intel_dp = enc_to_intel_dp(&encoder->base);
7430 if (!intel_dp->can_mst)
7433 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
7435 intel_dp->is_mst = false;
7436 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,